Merge V8 at 3.9.24.13

Bug: 5688872
Change-Id: Id0aa8d23375030494d3189c31774059c0f5398fc
diff --git a/.gitignore b/.gitignore
index b61faef..088daea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,10 +23,10 @@
 /build/gyp
 /obj/
 /out/
-/test/es5conform/data/
-/test/mozilla/data/
-/test/sputnik/sputniktests/
-/test/test262/data/
+/test/es5conform/data
+/test/mozilla/data
+/test/sputnik/sputniktests
+/test/test262/data
 /tools/oom_dump/oom_dump
 /tools/oom_dump/oom_dump.o
 /tools/visual_studio/Debug
diff --git a/AUTHORS b/AUTHORS
index fcb5c20..dfefad1 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -8,6 +8,7 @@
 ARM Ltd.
 Hewlett-Packard Development Company, LP
 Igalia, S.L.
+Joyent, Inc.
 
 Akinori MUSHA <knu@FreeBSD.org>
 Alexander Botero-Lowry <alexbl@FreeBSD.org>
@@ -22,14 +23,18 @@
 Dineel D Sule <dsule@codeaurora.org>
 Erich Ocean <erich.ocean@me.com>
 Fedor Indutny <fedor@indutny.com>
+Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
 Jan de Mooij <jandemooij@gmail.com>
 Jay Freeman <saurik@saurik.com>
 Joel Stanley <joel.stan@gmail.com>
 John Jozwiak <jjozwiak@codeaurora.org>
+Jonathan Liu <net147@gmail.com>
 Kun Zhang <zhangk@codeaurora.org>
 Martyn Capewell <martyn.capewell@arm.com>
+Mathias Bynens <mathias@qiwi.be>
 Matt Hanselman <mjhanselman@gmail.com>
 Maxim Mossienko <maxim.mossienko@gmail.com>
+Michael Lutz <michi@icosahedron.de>
 Michael Smith <mike@w3.org>
 Mike Gilbert <floppymaster@gmail.com>
 Paolo Giarrusso <p.giarrusso@gmail.com>
@@ -42,6 +47,8 @@
 Ryan Dahl <coldredlemur@gmail.com>
 Sanjoy Das <sanjoy@playingwithpointers.com>
 Subrato K De <subratokde@codeaurora.org>
+Tobias Burnus <burnus@net-b.de>
 Vlad Burlik <vladbph@gmail.com>
 Yuqiang Xian <yuqiang.xian@intel.com>
 Zaheer Ahmad <zahmad@codeaurora.org>
+Zhongping Wang <kewpie.w.zp@gmail.com>
diff --git a/Android.libv8.mk b/Android.libv8.mk
index 7395b26..e7c4135 100644
--- a/Android.libv8.mk
+++ b/Android.libv8.mk
@@ -26,6 +26,10 @@
   src/platform-linux.cc \
   src/platform-posix.cc
 
+ifeq ($(TARGET_ARCH),x86)
+V8_LOCAL_SRC_FILES += src/atomicops_internals_x86_gcc.cc
+endif
+
 LOCAL_SRC_FILES := $(V8_LOCAL_SRC_FILES)
 
 LOCAL_JS_LIBRARY_FILES := $(addprefix $(LOCAL_PATH)/, $(V8_LOCAL_JS_LIBRARY_FILES))
diff --git a/Android.mksnapshot.mk b/Android.mksnapshot.mk
index d52d2af..a94c02b 100644
--- a/Android.mksnapshot.mk
+++ b/Android.mksnapshot.mk
@@ -20,6 +20,10 @@
   src/arm/simulator-arm.cc \
   src/snapshot-empty.cc
 
+ifeq ($(HOST_ARCH),x86)
+V8_LOCAL_SRC_FILES += src/atomicops_internals_x86_gcc.cc
+endif
+
 ifeq ($(HOST_OS),linux)
 V8_LOCAL_SRC_FILES += \
   src/platform-linux.cc \
diff --git a/Android.v8common.mk b/Android.v8common.mk
index 20a796a..71ffe57 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -22,6 +22,7 @@
 	src/counters.cc \
 	src/cpu-profiler.cc \
 	src/data-flow.cc \
+	src/date.cc \
 	src/dateparser.cc \
 	src/debug.cc \
 	src/debug-agent.cc \
@@ -42,12 +43,13 @@
 	src/func-name-inferrer.cc \
 	src/global-handles.cc \
 	src/handles.cc \
-	src/hashmap.cc \
 	src/heap.cc \
 	src/heap-profiler.cc \
 	src/hydrogen.cc \
 	src/hydrogen-instructions.cc \
 	src/ic.cc \
+	src/incremental-marking.cc \
+	src/interface.cc \
 	src/interpreter-irregexp.cc \
 	src/isolate.cc \
 	src/jsregexp.cc \
@@ -60,6 +62,7 @@
 	src/messages.cc \
 	src/objects.cc \
 	src/objects-visiting.cc \
+	src/once.cc \
 	src/parser.cc \
 	src/preparse-data.cc \
 	src/preparser.cc \
@@ -79,6 +82,7 @@
 	src/serialize.cc \
 	src/snapshot-common.cc \
 	src/spaces.cc \
+	src/store-buffer.cc \
 	src/string-search.cc \
 	src/string-stream.cc \
 	src/strtod.cc \
@@ -120,7 +124,6 @@
 
 ifeq ($(TARGET_ARCH),x86)
   V8_LOCAL_SRC_FILES += \
-		src/atomicops_internals_x86_gcc.cc \
 		src/ia32/assembler-ia32.cc \
 		src/ia32/builtins-ia32.cc \
 		src/ia32/codegen-ia32.cc \
@@ -175,6 +178,6 @@
 	src/macros.py
 
 V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := \
-	src/proxy.js \
-	src/weakmap.js
+	src/collection.js \
+	src/proxy.js
 
diff --git a/ChangeLog b/ChangeLog
index 99495dd..2240ec0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,647 @@
+2012-03-23: Version 3.9.24
+
+        Activated count-based profiler for ARM.
+
+        Fixed use of proxies as f.prototype properties. (issue 2021)
+
+        Enabled snapshots on MIPS.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-03-21: Version 3.9.23
+
+        Use correct arguments adaptation environment when inlining function
+        containing arguments. (Issue 2014)
+
+        Performance and stability improvements on all platforms.
+
+
+2012-03-20: Version 3.9.22
+
+        Enabled count-based profiler by default.
+
+        Implemented a hash based look-up to speed up address checks
+        in large object space (issue 853).
+
+        Performance and stability improvements on all platforms.
+
+
+2012-03-19: Version 3.9.21
+
+        Fixed push-to-trunk script (and re-push).
+
+        Added API call that identifies strings that are guaranteed only to
+        contain ASCII characters.
+
+
+2012-03-19: Version 3.9.20
+
+        Fixed declarations escaping global strict eval. (Issue 1624)
+
+        Fixed wrapping of receiver for non-strict callbacks. (Issue 1973)
+
+        Fixed function declarations overwriting read-only global properties.
+        (Chromium issue 115452)
+
+        Fixed --use-strict flag in combination with --harmony[-scoping].
+
+        Debugger: naive implementation of "step into Function.prototype.bind".
+
+        Debugger: added ability to set script source from within OnBeforeCompile
+
+        Added flag to always call DebugBreak on abort.
+
+        Re-enabled constructor inlining and inline === comparison with boolean
+        constants. (Issue 2009)
+
+        Don't use an explicit s0 in ClampDoubleToUint8. (Issue 2004)
+
+        Performance and stability improvements on all platforms.
+
+
+2012-03-14: Version 3.9.19
+
+        Ensure there is a smi check of the receiver for global load and call
+        ICs (Chromium issue 117794).
+
+        Performance and stability improvements on all platforms.
+
+
+2012-03-13: Version 3.9.18
+
+        Ensure consistency of Math.sqrt on Intel platforms.
+
+        Remove static initializers in v8. (issue 1859)
+
+        Add explicit dependency on v8_base in the GYP-based build.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-03-12: Version 3.9.17
+
+        Fixed VFP detection through compiler defines. (issue 1996)
+
+        Add Code-related fields to postmortem metadata.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-03-09: Version 3.9.16
+
+        Added basic interface inference for modules (behind the --harmony flag).
+
+        Added Object.is, Number.isFinite, Number.isNaN.
+
+        Updated the Unicode tables to Unicode version 6.1.0.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-03-06: Version 3.9.15
+
+        Fix the heap profiler crash caused by memory layout changes between
+        passes.
+
+        Fix Error.prototype.toString to throw TypeError. (issue 1980)
+
+        Fix double-rounding in strtod for MinGW. (issue 1062)
+
+        Fix corrupted snapshot serializaton on ia32. (Chromium issue v8/1985)
+
+        Performance and stability improvements on all platforms.
+
+
+2012-03-01: Version 3.9.14
+
+        Performance and stability improvements on all platforms.
+
+
+2012-02-29: Version 3.9.13
+
+        Added code kind check before preparing for OSR. (issue 1900, 115073)
+
+        Fixed issue 1802: Pass zone explicitly to zone-allocation on x64 and
+        ARM.
+
+        Ported string construct stub to x64. (issue 849)
+
+        Performance and stability improvements on all platforms.
+
+
+2012-02-28: Version 3.9.12
+
+        Fixed the negative lookup stub to handle deleted entries in a
+        dictionary. (issue 1964)
+
+        Added a new API where the host can supply a callback function. The
+        callback function can resolve the location of a return address on stack
+        to the location where a return-address rewriting profiler stashed the
+        original return address.
+
+        Fixed Chromium issue http://crbug.com/115646: When compiling for-in
+        pass correct context value to the increment instruction.
+
+        Fixed issue 1853: Update breakpoints set with partial file name after
+        compile.
+
+
+2012-02-27: Version 3.9.11
+
+        Made 'module' a context-sensitive keyword (V8 issue 1957).
+
+
+2012-02-24: Version 3.9.10
+
+        Fixed V8 issues 1322, 1772 and 1969.
+
+        Conformance improvements.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-02-23: Version 3.9.9
+
+        Supported fast case for-in in Crankshaft.
+
+        Sped up heap snapshot serialization and dominators construction.
+
+        Randomized allocation addresses on windows. (Chromium issue 115151)
+
+        Fixed compilation with MinGW-w64. (issue 1943)
+
+        Fixed incorrect value of assignments to non-extensible properties.
+
+        Fixed a crash bug in generated code on ia32.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-02-21: Version 3.9.8
+
+        Fixed memory leak and missing #include in StartupDataDecompressor
+        (issue 1960).
+
+        Renamed static methods to avoid shadowing virtual methods and fix Clang
+        C++11 compile error.
+
+        Fixed sequence of element access in array builtins (issue 1790).
+
+        Performance and stability improvements on all platforms.
+
+
+2012-02-16: Version 3.9.7
+
+        Fixed V8 issues 1322, 1878, 1942, 1945 and Chromium issue 113924.
+
+        Fixed GCC-4.7 warnings.
+
+        Added Navier-Stokes benchmark.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-02-14: Version 3.9.6
+
+        Fixed template-related linker error. (issue 1936)
+
+        Allowed inlining of functions containing object literals.  (issue 1322)
+
+        Added --call-graph-size option to tickprocessor.  (issue 1937)
+
+        Heap Snapshot maximum size limit is too low for really big apps. At the
+        moment the limit is 256MB.  (Chromium issue 113015)
+
+        Performance and stability improvements on all platforms.
+
+
+2012-02-09: Version 3.9.5
+
+        Removed unused command line flags.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-02-08: Version 3.9.4
+
+        Properly initialize element-transitioning array literals on ARM.
+        (issue 1930)
+
+        Bug fixes on all platforms.
+
+
+2012-02-07: Version 3.9.3
+
+        When rethrowing an exception, print the stack trace of its original
+        site instead of rethrow site (Chromium issue 60240).
+
+        Increased size of small stacks from 32k to 64k to avoid hitting limits
+        in Chromium (Chromium issue 112843).
+
+
+2012-02-06: Version 3.9.2
+
+        Added timestamp to --trace-gc output. (issue 1932)
+
+        Heap profiler reports implicit references.
+
+        Optionally export metadata with libv8 to enable debuggers to inspect V8
+        state.
+
+
+2012-02-02: Version 3.9.1
+
+        Fixed memory leak in NativeObjectsExplorer::FindOrAddGroupInfo
+        (Chromium issue 112315).
+
+        Fixed a crash in dev tools (Chromium issue 107996).
+
+        Added 'dependencies_traverse': 1 to v8 GYP target.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-02-01: Version 3.9.0
+
+        Reduced memory use immediately after starting V8.
+
+        Stability fixes and performance improvements on all platforms.
+
+
+2012-01-26: Version 3.8.9
+
+        Flush number string cache on GC (issue 1605).
+
+        Provide access to function inferred name with
+	v8::Function::GetInferredName in V8 public API.
+
+        Fix building with Clang (issue 1912).
+
+        Reduce the space used by the stack for the profiling thread.
+
+        Fix misleading documentation of v8::Locker (issue 542).
+
+        Introduce readbinary function in d8 to read binary files.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-01-23: Version 3.8.8
+
+        Limited number of loop iterations in Heap::ReserveSpace
+        (Chromium issue 99027).
+
+        Fixed solaris build (VirtualMemory) (issue 1761).
+
+        Fixed strict vs. non-strict handling of function proxies in
+        higher-order array and string methods.
+
+        Enabled asynchronous remote debugging with d8 (issue 1691).
+
+        Stability and performance improvements on all platforms.
+
+
+2012-01-19: Version 3.8.7
+
+        Ensure that LRandom restores rsi after call to the C function on x64.
+        (Chromium issue http://crbug.com/110509)
+
+        Fixing include issues on *bsd when building with scons.
+        (issue 1897)
+
+        Provide a switch to specify -fno-strict-aliasing
+        (issue 1887)
+
+        Move WIN32 define from standalone.gypi to common.gypi
+        (issue 1760)
+
+        Fix corner-case in heap size estimation.
+        (issue 1893)
+
+        Fix and enable NEW_NON_STRICT_FAST ArgumentsAccess stub on x64.
+        (issue 1903)
+
+        Performance improvements and bug fixes.
+
+
+2012-01-16: Version 3.8.6
+
+        Add primitive WebGL array support to d8.
+
+        Improve heap size estimation (issue 1893).
+
+        Hash collision DOS workaround extended from string keys
+        to numeric keys.
+
+        Provide an API for iterating through all external strings referenced
+        from the JS heap.
+
+        Adjust position recorded for call expressions. http://crbug.com/109195
+
+        Fix GC crash related to instanceof. http://crbug.com/109448
+
+        Performance improvements and bug fixes.
+
+
+2012-01-05: Version 3.8.5
+
+        Fix broken test that assumes that no GC can clear the regexp cache (GC
+        can happen at any time due to Crankshaft).
+
+        Fix handling of bogus receivers for Harmony collections. (issue 1884)
+
+        Add netbsd support to gyp build.
+
+        Determine page size at runtime on posix platforms.
+
+        Ensure that store buffer filtering hash sets are cleared after
+        StoreBuffer::Filter.
+
+        Randomize the seed used for string hashing.  This helps guard against
+        CPU-eating DOS attacks against node.js servers.  Based on code from
+        Bert Belder.  This version only solves the issue for those that compile
+        V8 themselves or those that do not use snapshots.  A snapshot-based
+        precompiled V8 will still have predictable string hash codes.
+
+        Implement callback when script finishes running in V8 API.
+
+        Improve performance of Math.min and Math.max for the case of two
+        arguments. (issue 1325)
+
+
+2012-01-02: Version 3.8.4
+
+        Performance improvements for large Smi-only arrays.
+
+        Fixed InternalArrays construction. (issue 1878)
+
+
+2011-12-27: Version 3.8.3
+
+        Avoid embedding new space objects into code objects in the lithium gap
+        resolver. (chromium:108296)
+
+        Bug fixes and performance optimizations on all platforms.
+
+
+2011-12-21: Version 3.8.2
+
+        Add max optimization flag to v8 gyp build to ensure V8 is always built 
+        fully optimized in Chrome.
+
+        MIPS: Bring MIPS to parity with other platforms.
+
+        Optimizations and stability improvements on all platforms.
+
+
+2011-12-19: Version 3.8.1
+
+        Fixed GCC 4.7 warnings. Patch from Tobias Burnus.
+
+        Stability improvements on all platforms.
+
+
+2011-12-13: Version 3.8.0
+
+        Fixed handling of arrays in DefineOwnProperty. (issue 1756)
+
+        Sync parser and preparser on do-while and return statements. 
+        (issue 1856)
+
+        Fixed another corner case for DefineOwnProperty on arrays (issue 1756).
+
+        Stability and performance improvements on all platforms.
+
+
+2011-12-01: Version 3.7.12
+
+        Increase tick interval for the android platform.
+
+        Fix a bug in the register allocator. (chromium:105112)
+
+        Fix handling of recompiling code. (chromium:105375, v8:1782)
+
+        Start incremental marking on idle notification. (v8:1458)
+
+        Build fixes for various platforms.
+
+        Various performance improvements.
+
+
+2011-11-29: Version 3.7.11
+
+        Fixed bug when generating padding to ensure space for lazy
+        deoptimization.
+        (issue 1846)
+
+        Further reduced pause times due to GC.
+
+        Stability and performance improvements on all platforms.
+
+
+2011-11-23: Version 3.7.10
+
+        Set maximum length of FixedArray in terms of elements instead an
+        absolute number of bytes.
+        (Chromium issue 103103)
+
+        Stability and performance improvements on all platforms.
+
+
+2011-11-21: Version 3.7.9
+
+        Removed exit-time destructors.
+
+        Stability and performance improvements on all platforms.
+
+
+2011-11-17: Version 3.7.8
+
+        Removed hidden prototype from builtins, i.e., deleting an overridden
+        function on builtins will not make the original function reappear.
+
+        Added NetBSD support for scons build.
+
+        Performance improvements on all platforms.
+
+
+2011-11-14: Version 3.7.7
+
+        Fix missing fast property accessors in heap snapshots.
+        (issue 1818)
+
+
+2011-11-11: Version 3.7.6
+
+        Fixed filtering of store buffer for large object pages.
+        (issue 1817)
+
+        Fixed generated hash function on all platforms.
+        (issue 1808)
+
+        Fixed Heap::Shrink to ensure that it does not free pages that are
+        still in use.
+        (Chromium issue 100414)
+
+        Stability and performance improvements on all platforms.
+
+
+2011-11-10: Version 3.7.5
+
+        Added initial gyp infrastructure for MIPS.
+
+        Implemented performance improvements to the incremental garbage
+        collector.
+
+        Added optimizations and stability improvements on all platforms.
+
+
+2011-11-07: Version 3.7.4
+
+        Proper "libv8.so.3.7.4" SONAME for Linux shared library (issue 1786).
+
+        Fix Harmony sets and maps to allow null and undefined as keys
+        (still hidden behind --harmony flag) (issue 1622).
+
+        Implement VirtualMemory on FreeBSD to fix build (issue 1807).
+
+        Enable VFP instructions for Android.
+
+        Fix error handling in Date.prototype.toISOString (issue 1792).
+
+        Bug fixes and performance improvements for all platforms.
+
+        Not officially supported but noteworthy: Crankshaft for MIPS :-)
+
+
+2011-10-28: Version 3.7.3
+
+        Slight deoptimization as a workaround for issue with jslint: Issue
+        1789.
+
+
+2011-10-27: Version 3.7.2
+
+        Fix bug in deoptimization.  Known issue with jslint: Issue 1789.
+
+
+2011-10-26: Version 3.7.1
+
+        Achieved 33% speedup in debug-mode tests.
+
+        Removed special casing of calls to RegExp test and exec methods with no
+        argument.  Now matches new JSC behaviour.  crbug.com/75740.
+
+        Return the empty string on cyclic references in toString (ES5
+        conformance).
+
+        Fixed bug triggered by JSBeautifier.  crbug.com/100409.
+
+        Made Math.random state per-context instead of per-process (issue 864).
+
+        Fixed stack traces to skip native functions.
+
+        Make snapshots (new contexts) smaller and faster.
+
+        Fixed handling of Function.apply for non-array arguments.
+
+        Fixed evaluation order in defineProperties to match FireFox.
+
+        Fixed handling of non-object receivers for array builtins,
+        crbug.com/100702.
+
+        Multiple fixes to improve compliance with test262.
+
+        Fixed compatibility with older Android releases.
+
+        Fixed compilation with gcc-4.5.3.
+
+        Improved performance of WriteUtf8, issue 1665.
+
+        Made native syntax an early error in the preparser.
+
+        Fixed issues 793 and 893 relating to Function.prototype.bind.
+
+        Improved let, const, Set and Map support and other Harmony features
+        (behind the --harmony flag).
+
+        Changed evaluation order for > and <= to match ES5 instead of ES3.
+
+        Bug fixes and performance improvements on all platforms.
+
+
+2011-10-13: Version 3.7.0
+
+        Fixed array handling for Object.defineOwnProperty (ES5 conformance).
+
+        Fixed issue 1757 (string slices of external strings).
+
+        Fixed issue 1759 (ARM).
+
+        Added flag --noclever-optimizations to disable some things that
+        caused trouble in the past.
+
+        Added flag --stress-compaction for testing.
+
+        Added flag --harmony to activate all experimental Harmony features.
+
+
+2011-10-10: Version 3.6.6
+
+        Added a GC pause visualization tool.
+
+        Added presubmit=no and werror=no flags to Makefile.
+
+        ES5/Test262 conformance improvements.
+
+        Fixed compilation issues with GCC 4.5.x (issue 1743).
+
+        Bug fixes and performance improvements on all platforms.
+
+
+2011-10-05: Version 3.6.5
+
+        New incremental garbage collector.
+
+        Removed the hard heap size limit (soft heap size limit is still
+        700/1400Mbytes by default).
+
+        Implemented ES5 generic Array.prototype.toString (Issue 1361).
+
+        V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415).
+
+        Fixed x64 RegExp start-of-string bug (Issues 1746, 1748).
+
+        Fixed propertyIsEnumerable for numeric properties (Issue 1692).
+
+        Fixed the MinGW and Windows 2000 builds.
+
+        Fixed "Prototype chain is not searched if named property handler does
+        not set a property" (Issue 1636).
+
+        Made the RegExp.prototype object be a RegExp object (Issue 1217).
+
+        Disallowed future reserved words as labels in strict mode.
+
+        Fixed string split to correctly coerce the separator to a string
+        (Issue 1711).
+
+        API: Added an optional source length field to the Extension
+        constructor.
+
+        API: Added Debug::DisableAgent to match existing Debug::EnableAgent
+        (Issue 1573).
+
+        Added "native" target to Makefile for the benefit of Linux distros.
+
+        Fixed: debugger stops stepping outside evaluate (Issue 1639).
+
+        More work on ES-Harmony proxies.  Still hidden behind a flag.
+
+        Bug fixes and performance improvements on all platforms.
+
+
 2011-09-15: Version 3.6.4
 
         Fixed d8's broken readline history.
@@ -194,7 +838,7 @@
 
         Fix the debugger for strict-mode functions. (Chromium issue 89236)
 
-        Add GetPropertyAttribute method for Object in the API. (Patch by 
+        Add GetPropertyAttribute method for Object in the API. (Patch by
         Peter Varga)
 
         Fix -Wunused-but-set-variable for gcc-4.6 on x64. (Issue 1291)
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 3448dfb..1c13995 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -72,6 +72,10 @@
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libv8_intermediates)
 $(call add-clean-step, rm -rf $(OUT_DIR)/host/$(HOST_OS)-$(HOST_ARCH)/bin/mksnapshot)
 $(call add-clean-step, rm -rf $(OUT_DIR)/host/$(HOST_OS)-$(HOST_ARCH)/obj/EXECUTABLES/mksnapshot_intermediates)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libv8_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/host/$(HOST_OS)-$(HOST_ARCH)/bin/mksnapshot)
+$(call add-clean-step, rm -rf $(OUT_DIR)/host/$(HOST_OS)-$(HOST_ARCH)/obj/EXECUTABLES/mksnapshot_intermediates)
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/LICENSE b/LICENSE
index e435050..2e516ba 100644
--- a/LICENSE
+++ b/LICENSE
@@ -14,7 +14,9 @@
 
   - Strongtalk assembler, the basis of the files assembler-arm-inl.h,
     assembler-arm.cc, assembler-arm.h, assembler-ia32-inl.h,
-    assembler-ia32.cc, assembler-ia32.h, assembler.cc and assembler.h.
+    assembler-ia32.cc, assembler-ia32.h, assembler-x64-inl.h,
+    assembler-x64.cc, assembler-x64.h, assembler-mips-inl.h,
+    assembler-mips.cc, assembler-mips.h, assembler.cc and assembler.h.
     This code is copyrighted by Sun Microsystems Inc. and released
     under a 3-clause BSD license.
 
@@ -24,7 +26,7 @@
 These libraries have their own licenses; we recommend you read them,
 as their terms may differ from the terms below.
 
-Copyright 2006-2011, the V8 project authors. All rights reserved.
+Copyright 2006-2012, the V8 project authors. All rights reserved.
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
 met:
diff --git a/Makefile b/Makefile
index a7b2731..5dc6ca5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -27,11 +27,14 @@
 
 
 # Variable default definitions. Override them by exporting them in your shell.
-CXX ?= "g++"  # For distcc: export CXX="distcc g++"
-LINK ?= "g++"
+CXX ?= g++
+LINK ?= g++
 OUTDIR ?= out
 TESTJOBS ?= -j16
 GYPFLAGS ?=
+TESTFLAGS ?=
+ANDROID_NDK_ROOT ?=
+ANDROID_TOOL_PREFIX = $(ANDROID_NDK_ROOT)/toolchain/bin/arm-linux-androideabi
 
 # Special build flags. Use them like this: "make library=shared"
 
@@ -50,6 +53,10 @@
 ifeq ($(disassembler), on)
   GYPFLAGS += -Dv8_enable_disassembler=1
 endif
+# objectprint=on
+ifeq ($(objectprint), on)
+  GYPFLAGS += -Dv8_object_print=1
+endif
 # snapshot=off
 ifeq ($(snapshot), off)
   GYPFLAGS += -Dv8_use_snapshot='false'
@@ -68,18 +75,36 @@
 else
   GYPFLAGS += -Dv8_can_use_vfp_instructions=true
 endif
+# debuggersupport=off
+ifeq ($(debuggersupport), off)
+  GYPFLAGS += -Dv8_enable_debugger_support=0
+endif
 # soname_version=1.2.3
 ifdef soname_version
   GYPFLAGS += -Dsoname_version=$(soname_version)
 endif
+# werror=no
+ifeq ($(werror), no)
+  GYPFLAGS += -Dwerror=''
+endif
+# presubmit=no
+ifeq ($(presubmit), no)
+  TESTFLAGS += --no-presubmit
+endif
+# strictaliasing=off (workaround for GCC-4.5)
+ifeq ($(strictaliasing), off)
+  GYPFLAGS += -Dv8_no_strict_aliasing=1
+endif
 
 # ----------------- available targets: --------------------
 # - "dependencies": pulls in external dependencies (currently: GYP)
 # - any arch listed in ARCHES (see below)
 # - any mode listed in MODES
 # - every combination <arch>.<mode>, e.g. "ia32.release"
+# - "native": current host's architecture, release mode
 # - any of the above with .check appended, e.g. "ia32.release.check"
-# - default (no target specified): build all ARCHES and MODES
+# - "android": cross-compile for Android/ARM (release mode)
+# - default (no target specified): build all DEFAULT_ARCHES and MODES
 # - "check": build all targets and run all tests
 # - "<arch>.clean" for any <arch> in ARCHES
 # - "clean": clean all ARCHES
@@ -88,7 +113,8 @@
 
 # Architectures and modes to be compiled. Consider these to be internal
 # variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm
+ARCHES = ia32 x64 arm mips
+DEFAULT_ARCHES = ia32 x64 arm
 MODES = release debug
 
 # List of files that trigger Makefile regeneration:
@@ -103,16 +129,17 @@
 # File where previously used GYPFLAGS are stored.
 ENVFILE = $(OUTDIR)/environment
 
-.PHONY: all check clean dependencies $(ENVFILE).new \
+.PHONY: all check clean dependencies $(ENVFILE).new native \
         $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
-        $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES))
+        $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
+        must-set-ANDROID_NDK_ROOT
 
 # Target definitions. "all" is the default.
 all: $(MODES)
 
 # Compile targets. MODES and ARCHES are convenience targets.
 .SECONDEXPANSION:
-$(MODES): $(addsuffix .$$@,$(ARCHES))
+$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
 
 $(ARCHES): $(addprefix $$@.,$(MODES))
 
@@ -124,21 +151,44 @@
 	                     python -c "print raw_input().capitalize()") \
 	         builddir="$(shell pwd)/$(OUTDIR)/$@"
 
+native: $(OUTDIR)/Makefile-native
+	@$(MAKE) -C "$(OUTDIR)" -f Makefile-native \
+	         CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
+	         builddir="$(shell pwd)/$(OUTDIR)/$@"
+
+# TODO(jkummerow): add "android.debug" when we need it.
+android android.release: $(OUTDIR)/Makefile-android
+	@$(MAKE) -C "$(OUTDIR)" -f Makefile-android \
+	        CXX="$(ANDROID_TOOL_PREFIX)-g++" \
+	        AR="$(ANDROID_TOOL_PREFIX)-ar" \
+	        RANLIB="$(ANDROID_TOOL_PREFIX)-ranlib" \
+	        CC="$(ANDROID_TOOL_PREFIX)-gcc" \
+	        LD="$(ANDROID_TOOL_PREFIX)-ld" \
+	        LINK="$(ANDROID_TOOL_PREFIX)-g++" \
+	        BUILDTYPE=Release \
+	        builddir="$(shell pwd)/$(OUTDIR)/android.release"
+
 # Test targets.
 check: all
-	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+	    --arch=$(shell echo $(DEFAULT_ARCHES) | sed -e 's/ /,/g') \
+	    $(TESTFLAGS)
 
 $(addsuffix .check,$(MODES)): $$(basename $$@)
 	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
-	    --mode=$(basename $@)
+	    --mode=$(basename $@) $(TESTFLAGS)
 
 $(addsuffix .check,$(ARCHES)): $$(basename $$@)
 	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
-	    --arch=$(basename $@)
+	    --arch=$(basename $@) $(TESTFLAGS)
 
 $(CHECKS): $$(basename $$@)
 	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
-	    --arch-and-mode=$(basename $@)
+	    --arch-and-mode=$(basename $@) $(TESTFLAGS)
+
+native.check: native
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
+	    --arch-and-mode=. $(TESTFLAGS)
 
 # Clean targets. You can clean each architecture individually, or everything.
 $(addsuffix .clean,$(ARCHES)):
@@ -147,7 +197,17 @@
 	rm -rf $(OUTDIR)/$(basename $@).debug
 	find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
 
-clean: $(addsuffix .clean,$(ARCHES))
+native.clean:
+	rm -f $(OUTDIR)/Makefile-native
+	rm -rf $(OUTDIR)/native
+	find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
+
+android.clean:
+	rm -f $(OUTDIR)/Makefile-android
+	rm -rf $(OUTDIR)/android.release
+	find $(OUTDIR) -regex '.*\(host\|target\)-android\.mk' -delete
+
+clean: $(addsuffix .clean,$(ARCHES)) native.clean
 
 # GYP file generation targets.
 $(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
@@ -160,11 +220,32 @@
 	              -Ibuild/standalone.gypi --depth=. -Dtarget_arch=x64 \
 	              -S-x64 $(GYPFLAGS)
 
-$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE)
+$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE) build/armu.gypi
 	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
 	              -Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
 	              -S-arm $(GYPFLAGS)
 
+$(OUTDIR)/Makefile-mips: $(GYPFILES) $(ENVFILE) build/mipsu.gypi
+	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
+	              -Ibuild/standalone.gypi --depth=. -Ibuild/mipsu.gypi \
+	              -S-mips $(GYPFLAGS)
+
+$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE)
+	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
+	              -Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
+
+$(OUTDIR)/Makefile-android: $(GYPFILES) $(ENVFILE) build/android.gypi \
+                            must-set-ANDROID_NDK_ROOT
+	CC="${ANDROID_TOOL_PREFIX}-gcc" \
+	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
+	              -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
+	              -S-android $(GYPFLAGS)
+
+must-set-ANDROID_NDK_ROOT:
+ifndef ANDROID_NDK_ROOT
+	  $(error ANDROID_NDK_ROOT is not set)
+endif
+
 # Replaces the old with the new environment file if they're different, which
 # will trigger GYP to regenerate Makefiles.
 $(ENVFILE): $(ENVFILE).new
diff --git a/SConstruct b/SConstruct
index f9c33ca..34d0efc 100644
--- a/SConstruct
+++ b/SConstruct
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -33,6 +33,7 @@
 from os.path import join, dirname, abspath
 from types import DictType, StringTypes
 root_dir = dirname(File('SConstruct').rfile().abspath)
+src_dir = join(root_dir, 'src')
 sys.path.insert(0, join(root_dir, 'tools'))
 import js2c, utils
 
@@ -53,7 +54,7 @@
 
 LIBRARY_FLAGS = {
   'all': {
-    'CPPPATH': [join(root_dir, 'src')],
+    'CPPPATH': [src_dir],
     'regexp:interpreted': {
       'CPPDEFINES': ['V8_INTERPRETED_REGEXP']
     },
@@ -111,13 +112,13 @@
       }
     },
     'os:freebsd': {
-      'CPPPATH' : ['/usr/local/include'],
+      'CPPPATH' : [src_dir, '/usr/local/include'],
       'LIBPATH' : ['/usr/local/lib'],
       'CCFLAGS':      ['-ansi'],
       'LIBS': ['execinfo']
     },
     'os:openbsd': {
-      'CPPPATH' : ['/usr/local/include'],
+      'CPPPATH' : [src_dir, '/usr/local/include'],
       'LIBPATH' : ['/usr/local/lib'],
       'CCFLAGS':      ['-ansi'],
     },
@@ -125,10 +126,14 @@
       # On Solaris, to get isinf, INFINITY, fpclassify and other macros one
       # needs to define __C99FEATURES__.
       'CPPDEFINES': ['__C99FEATURES__'],
-      'CPPPATH' : ['/usr/local/include'],
+      'CPPPATH' : [src_dir, '/usr/local/include'],
       'LIBPATH' : ['/usr/local/lib'],
       'CCFLAGS':      ['-ansi'],
     },
+    'os:netbsd': {
+      'CPPPATH' : [src_dir, '/usr/pkg/include'],
+      'LIBPATH' : ['/usr/pkg/lib'],
+    },
     'os:win32': {
       'CCFLAGS':      ['-DWIN32'],
       'CXXFLAGS':     ['-DWIN32'],
@@ -180,6 +185,9 @@
       'mips_arch_variant:mips32r2': {
         'CPPDEFINES':    ['_MIPS_ARCH_MIPS32R2']
       },
+      'mips_arch_variant:loongson': {
+        'CPPDEFINES':    ['_MIPS_ARCH_LOONGSON']
+      },
       'simulator:none': {
         'CCFLAGS':      ['-EL'],
         'LINKFLAGS':    ['-EL'],
@@ -189,6 +197,9 @@
         'mips_arch_variant:mips32r1': {
           'CCFLAGS':      ['-mips32', '-Wa,-mips32']
         },
+        'mips_arch_variant:loongson': {
+          'CCFLAGS':      ['-march=mips3', '-Wa,-march=mips3']
+        },
         'library:static': {
           'LINKFLAGS':    ['-static', '-static-libgcc']
         },
@@ -207,9 +218,12 @@
       'LINKFLAGS':    ['-m32'],
       'mipsabi:softfloat': {
         'CPPDEFINES':    ['__mips_soft_float=1'],
+        'fpu:on': {
+          'CPPDEFINES' : ['CAN_USE_FPU_INSTRUCTIONS']
+        }
       },
       'mipsabi:hardfloat': {
-        'CPPDEFINES':    ['__mips_hard_float=1'],
+        'CPPDEFINES':    ['__mips_hard_float=1', 'CAN_USE_FPU_INSTRUCTIONS'],
       }
     },
     'arch:x64': {
@@ -291,10 +305,16 @@
                        '-Werror',
                        '-W',
                        '-Wno-unused-parameter',
+                       '-Woverloaded-virtual',
                        '-Wnon-virtual-dtor']
     },
     'os:win32': {
-      'WARNINGFLAGS': ['-pedantic', '-Wno-long-long']
+      'WARNINGFLAGS': ['-pedantic',
+                       '-Wno-long-long',
+                       '-Wno-pedantic-ms-format'],
+      'library:shared': {
+        'LIBS': ['winmm', 'ws2_32']
+      }
     },
     'os:linux': {
       'WARNINGFLAGS': ['-pedantic'],
@@ -364,6 +384,9 @@
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32'],
     },
+    'os:netbsd': {
+      'LIBS': ['execinfo', 'pthread']
+    },
     'compress_startup_data:bz2': {
       'os:linux': {
         'LIBS': ['bz2']
@@ -396,7 +419,7 @@
 
 CCTEST_EXTRA_FLAGS = {
   'all': {
-    'CPPPATH': [join(root_dir, 'src')],
+    'CPPPATH': [src_dir],
     'library:shared': {
       'CPPDEFINES': ['USING_V8_SHARED']
     },
@@ -428,6 +451,9 @@
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32']
     },
+    'os:netbsd': {
+      'LIBS':         ['execinfo', 'pthread']
+    },
     'arch:arm': {
       'LINKFLAGS':   ARM_LINK_FLAGS
     },
@@ -450,7 +476,7 @@
 
 SAMPLE_FLAGS = {
   'all': {
-    'CPPPATH': [join(abspath('.'), 'include')],
+    'CPPPATH': [join(root_dir, 'include')],
     'library:shared': {
       'CPPDEFINES': ['USING_V8_SHARED']
     },
@@ -487,6 +513,10 @@
     'os:win32': {
       'LIBS':         ['winmm', 'ws2_32']
     },
+    'os:netbsd': {
+      'LIBPATH' :     ['/usr/pkg/lib'],
+      'LIBS':         ['execinfo', 'pthread']
+    },
     'arch:arm': {
       'LINKFLAGS':   ARM_LINK_FLAGS,
       'armeabi:soft' : {
@@ -524,6 +554,9 @@
       'mips_arch_variant:mips32r2': {
         'CPPDEFINES':    ['_MIPS_ARCH_MIPS32R2']
       },
+      'mips_arch_variant:loongson': {
+        'CPPDEFINES':    ['_MIPS_ARCH_LOONGSON']
+      },
       'simulator:none': {
         'CCFLAGS':      ['-EL'],
         'LINKFLAGS':    ['-EL'],
@@ -533,6 +566,9 @@
         'mips_arch_variant:mips32r1': {
           'CCFLAGS':      ['-mips32', '-Wa,-mips32']
         },
+        'mips_arch_variant:loongson': {
+          'CCFLAGS':      ['-march=mips3', '-Wa,-march=mips3']
+        },
         'library:static': {
           'LINKFLAGS':    ['-static', '-static-libgcc']
         },
@@ -542,7 +578,10 @@
         },
         'mipsabi:hardfloat': {
           'CCFLAGS':      ['-mhard-float'],
-          'LINKFLAGS':    ['-mhard-float']
+          'LINKFLAGS':    ['-mhard-float'],
+          'fpu:on': {
+            'CPPDEFINES' : ['CAN_USE_FPU_INSTRUCTIONS']
+          }
         }
       }
     },
@@ -629,7 +668,7 @@
 
 PREPARSER_FLAGS = {
   'all': {
-    'CPPPATH': [join(abspath('.'), 'include'), join(abspath('.'), 'src')],
+    'CPPPATH': [join(root_dir, 'include'), src_dir],
     'library:shared': {
       'CPPDEFINES': ['USING_V8_SHARED']
     },
@@ -676,6 +715,9 @@
       'mips_arch_variant:mips32r2': {
         'CPPDEFINES':    ['_MIPS_ARCH_MIPS32R2']
       },
+      'mips_arch_variant:loongson': {
+        'CPPDEFINES':    ['_MIPS_ARCH_LOONGSON']
+      },
       'simulator:none': {
         'CCFLAGS':      ['-EL'],
         'LINKFLAGS':    ['-EL'],
@@ -685,6 +727,9 @@
         'mips_arch_variant:mips32r1': {
           'CCFLAGS':      ['-mips32', '-Wa,-mips32']
         },
+        'mips_arch_variant:loongson': {
+          'CCFLAGS':      ['-march=mips3', '-Wa,-march=mips3']
+        },
         'library:static': {
           'LINKFLAGS':    ['-static', '-static-libgcc']
         },
@@ -818,6 +863,9 @@
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32'],
     },
+    'os:netbsd': {
+      'LIBS': ['pthread'],
+    },
     'arch:arm': {
       'LINKFLAGS':   ARM_LINK_FLAGS
     },
@@ -951,7 +999,7 @@
     'help': 'the architecture to build for'
   },
   'os': {
-    'values': ['freebsd', 'linux', 'macos', 'win32', 'openbsd', 'solaris', 'cygwin'],
+    'values': ['freebsd', 'linux', 'macos', 'win32', 'openbsd', 'solaris', 'cygwin', 'netbsd'],
     'guess': GuessOS,
     'help': 'the os to build for'
   },
@@ -1090,7 +1138,7 @@
     'help': 'generate calling conventiont according to selected mips ABI'
   },
   'mips_arch_variant': {
-    'values': ['mips32r2', 'mips32r1'],
+    'values': ['mips32r2', 'mips32r1', 'loongson'],
     'default': 'mips32r2',
     'help': 'mips variant'
   },
@@ -1104,6 +1152,11 @@
     'default': 'on',
     'help': 'use vfp3 instructions when building the snapshot [Arm only]'
   },
+  'fpu': {
+    'values': ['on', 'off'],
+    'default': 'on',
+    'help': 'use fpu instructions when building the snapshot [MIPS only]'
+  },
 
 }
 
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index bdd9c2b..3c9aae8 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,2 +1,2 @@
-V8 3.6.6.19
-http://v8.googlecode.com/svn/branches/3.6@10446
+V8 3.9.24.13
+http://v8.googlecode.com/svn/branches/3.9@11337
diff --git a/benchmarks/README.txt b/benchmarks/README.txt
index 6676f37..59f76ff 100644
--- a/benchmarks/README.txt
+++ b/benchmarks/README.txt
@@ -77,3 +77,10 @@
 Furthermore, the benchmark runner was changed to run the benchmarks
 for at least a few times to stabilize the reported numbers on slower
 machines.
+
+
+Changes from Version 6 to Version 7
+===================================
+
+Added the Navier-Stokes benchmark, a 2D differential equation solver
+that stresses arithmetic computations on double arrays.
diff --git a/benchmarks/base.js b/benchmarks/base.js
index ffabf24..62c37e1 100644
--- a/benchmarks/base.js
+++ b/benchmarks/base.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -78,7 +78,7 @@
 // Scores are not comparable across versions. Bump the version if
 // you're making changes that will affect that scores, e.g. if you add
 // a new benchmark or change an existing one.
-BenchmarkSuite.version = '6';
+BenchmarkSuite.version = '7';
 
 
 // To make the benchmark results predictable, we replace Math.random
diff --git a/benchmarks/navier-stokes.js b/benchmarks/navier-stokes.js
new file mode 100644
index 0000000..b0dc3c8
--- /dev/null
+++ b/benchmarks/navier-stokes.js
@@ -0,0 +1,387 @@
+/**
+ * Copyright 2012 the V8 project authors. All rights reserved.
+ * Copyright 2009 Oliver Hunt <http://nerget.com>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+var NavierStokes = new BenchmarkSuite('NavierStokes', 1484000,
+                                      [new Benchmark('NavierStokes',
+                                                     runNavierStokes,
+                                                     setupNavierStokes,
+                                                     tearDownNavierStokes)]);
+
+var solver = null;
+
+function runNavierStokes()
+{
+    solver.update();
+}
+
+function setupNavierStokes()
+{
+    solver = new FluidField(null);
+    solver.setResolution(128, 128);
+    solver.setIterations(20);
+    solver.setDisplayFunction(function(){});
+    solver.setUICallback(prepareFrame);
+    solver.reset();
+}
+
+function tearDownNavierStokes()
+{
+    solver = null;
+}
+
+function addPoints(field) {
+    var n = 64;
+    for (var i = 1; i <= n; i++) {
+        field.setVelocity(i, i, n, n);
+        field.setDensity(i, i, 5);
+        field.setVelocity(i, n - i, -n, -n);
+        field.setDensity(i, n - i, 20);
+        field.setVelocity(128 - i, n + i, -n, -n);
+        field.setDensity(128 - i, n + i, 30);
+    }
+}
+
+var framesTillAddingPoints = 0;
+var framesBetweenAddingPoints = 5;
+
+function prepareFrame(field)
+{
+    if (framesTillAddingPoints == 0) {
+        addPoints(field);
+        framesTillAddingPoints = framesBetweenAddingPoints;
+        framesBetweenAddingPoints++;
+    } else {
+        framesTillAddingPoints--;
+    }
+}
+
+// Code from Oliver Hunt (http://nerget.com/fluidSim/pressure.js) starts here.
+function FluidField(canvas) {
+    function addFields(x, s, dt)
+    {
+        for (var i=0; i<size ; i++ ) x[i] += dt*s[i];
+    }
+
+    function set_bnd(b, x)
+    {
+        if (b===1) {
+            for (var i = 1; i <= width; i++) {
+                x[i] =  x[i + rowSize];
+                x[i + (height+1) *rowSize] = x[i + height * rowSize];
+            }
+
+            for (var j = 1; i <= height; i++) {
+                x[j * rowSize] = -x[1 + j * rowSize];
+                x[(width + 1) + j * rowSize] = -x[width + j * rowSize];
+            }
+        } else if (b === 2) {
+            for (var i = 1; i <= width; i++) {
+                x[i] = -x[i + rowSize];
+                x[i + (height + 1) * rowSize] = -x[i + height * rowSize];
+            }
+
+            for (var j = 1; j <= height; j++) {
+                x[j * rowSize] =  x[1 + j * rowSize];
+                x[(width + 1) + j * rowSize] =  x[width + j * rowSize];
+            }
+        } else {
+            for (var i = 1; i <= width; i++) {
+                x[i] =  x[i + rowSize];
+                x[i + (height + 1) * rowSize] = x[i + height * rowSize];
+            }
+
+            for (var j = 1; j <= height; j++) {
+                x[j * rowSize] =  x[1 + j * rowSize];
+                x[(width + 1) + j * rowSize] =  x[width + j * rowSize];
+            }
+        }
+        var maxEdge = (height + 1) * rowSize;
+        x[0]                 = 0.5 * (x[1] + x[rowSize]);
+        x[maxEdge]           = 0.5 * (x[1 + maxEdge] + x[height * rowSize]);
+        x[(width+1)]         = 0.5 * (x[width] + x[(width + 1) + rowSize]);
+        x[(width+1)+maxEdge] = 0.5 * (x[width + maxEdge] + x[(width + 1) + height * rowSize]);
+    }
+
+    function lin_solve(b, x, x0, a, c)
+    {
+        if (a === 0 && c === 1) {
+            for (var j=1 ; j<=height; j++) {
+                var currentRow = j * rowSize;
+                ++currentRow;
+                for (var i = 0; i < width; i++) {
+                    x[currentRow] = x0[currentRow];
+                    ++currentRow;
+                }
+            }
+            set_bnd(b, x);
+        } else {
+            var invC = 1 / c;
+            for (var k=0 ; k<iterations; k++) {
+                for (var j=1 ; j<=height; j++) {
+                    var lastRow = (j - 1) * rowSize;
+                    var currentRow = j * rowSize;
+                    var nextRow = (j + 1) * rowSize;
+                    var lastX = x[currentRow];
+                    ++currentRow;
+                    for (var i=1; i<=width; i++)
+                        lastX = x[currentRow] = (x0[currentRow] + a*(lastX+x[++currentRow]+x[++lastRow]+x[++nextRow])) * invC;
+                }
+                set_bnd(b, x);
+            }
+        }
+    }
+
+    function diffuse(b, x, x0, dt)
+    {
+        var a = 0;
+        lin_solve(b, x, x0, a, 1 + 4*a);
+    }
+
+    function lin_solve2(x, x0, y, y0, a, c)
+    {
+        if (a === 0 && c === 1) {
+            for (var j=1 ; j <= height; j++) {
+                var currentRow = j * rowSize;
+                ++currentRow;
+                for (var i = 0; i < width; i++) {
+                    x[currentRow] = x0[currentRow];
+                    y[currentRow] = y0[currentRow];
+                    ++currentRow;
+                }
+            }
+            set_bnd(1, x);
+            set_bnd(2, y);
+        } else {
+            var invC = 1/c;
+            for (var k=0 ; k<iterations; k++) {
+                for (var j=1 ; j <= height; j++) {
+                    var lastRow = (j - 1) * rowSize;
+                    var currentRow = j * rowSize;
+                    var nextRow = (j + 1) * rowSize;
+                    var lastX = x[currentRow];
+                    var lastY = y[currentRow];
+                    ++currentRow;
+                    for (var i = 1; i <= width; i++) {
+                        lastX = x[currentRow] = (x0[currentRow] + a * (lastX + x[currentRow] + x[lastRow] + x[nextRow])) * invC;
+                        lastY = y[currentRow] = (y0[currentRow] + a * (lastY + y[++currentRow] + y[++lastRow] + y[++nextRow])) * invC;
+                    }
+                }
+                set_bnd(1, x);
+                set_bnd(2, y);
+            }
+        }
+    }
+
+    function diffuse2(x, x0, y, y0, dt)
+    {
+        var a = 0;
+        lin_solve2(x, x0, y, y0, a, 1 + 4 * a);
+    }
+
+    function advect(b, d, d0, u, v, dt)
+    {
+        var Wdt0 = dt * width;
+        var Hdt0 = dt * height;
+        var Wp5 = width + 0.5;
+        var Hp5 = height + 0.5;
+        for (var j = 1; j<= height; j++) {
+            var pos = j * rowSize;
+            for (var i = 1; i <= width; i++) {
+                var x = i - Wdt0 * u[++pos];
+                var y = j - Hdt0 * v[pos];
+                if (x < 0.5)
+                    x = 0.5;
+                else if (x > Wp5)
+                    x = Wp5;
+                var i0 = x | 0;
+                var i1 = i0 + 1;
+                if (y < 0.5)
+                    y = 0.5;
+                else if (y > Hp5)
+                    y = Hp5;
+                var j0 = y | 0;
+                var j1 = j0 + 1;
+                var s1 = x - i0;
+                var s0 = 1 - s1;
+                var t1 = y - j0;
+                var t0 = 1 - t1;
+                var row1 = j0 * rowSize;
+                var row2 = j1 * rowSize;
+                d[pos] = s0 * (t0 * d0[i0 + row1] + t1 * d0[i0 + row2]) + s1 * (t0 * d0[i1 + row1] + t1 * d0[i1 + row2]);
+            }
+        }
+        set_bnd(b, d);
+    }
+
+    function project(u, v, p, div)
+    {
+        var h = -0.5 / Math.sqrt(width * height);
+        for (var j = 1 ; j <= height; j++ ) {
+            var row = j * rowSize;
+            var previousRow = (j - 1) * rowSize;
+            var prevValue = row - 1;
+            var currentRow = row;
+            var nextValue = row + 1;
+            var nextRow = (j + 1) * rowSize;
+            for (var i = 1; i <= width; i++ ) {
+                div[++currentRow] = h * (u[++nextValue] - u[++prevValue] + v[++nextRow] - v[++previousRow]);
+                p[currentRow] = 0;
+            }
+        }
+        set_bnd(0, div);
+        set_bnd(0, p);
+
+        lin_solve(0, p, div, 1, 4 );
+        var wScale = 0.5 * width;
+        var hScale = 0.5 * height;
+        for (var j = 1; j<= height; j++ ) {
+            var prevPos = j * rowSize - 1;
+            var currentPos = j * rowSize;
+            var nextPos = j * rowSize + 1;
+            var prevRow = (j - 1) * rowSize;
+            var currentRow = j * rowSize;
+            var nextRow = (j + 1) * rowSize;
+
+            for (var i = 1; i<= width; i++) {
+                u[++currentPos] -= wScale * (p[++nextPos] - p[++prevPos]);
+                v[currentPos]   -= hScale * (p[++nextRow] - p[++prevRow]);
+            }
+        }
+        set_bnd(1, u);
+        set_bnd(2, v);
+    }
+
+    function dens_step(x, x0, u, v, dt)
+    {
+        addFields(x, x0, dt);
+        diffuse(0, x0, x, dt );
+        advect(0, x, x0, u, v, dt );
+    }
+
+    function vel_step(u, v, u0, v0, dt)
+    {
+        addFields(u, u0, dt );
+        addFields(v, v0, dt );
+        var temp = u0; u0 = u; u = temp;
+        var temp = v0; v0 = v; v = temp;
+        diffuse2(u,u0,v,v0, dt);
+        project(u, v, u0, v0);
+        var temp = u0; u0 = u; u = temp;
+        var temp = v0; v0 = v; v = temp;
+        advect(1, u, u0, u0, v0, dt);
+        advect(2, v, v0, u0, v0, dt);
+        project(u, v, u0, v0 );
+    }
+    var uiCallback = function(d,u,v) {};
+
+    function Field(dens, u, v) {
+        // Just exposing the fields here rather than using accessors is a measurable win during display (maybe 5%)
+        // but makes the code ugly.
+        this.setDensity = function(x, y, d) {
+             dens[(x + 1) + (y + 1) * rowSize] = d;
+        }
+        this.getDensity = function(x, y) {
+             return dens[(x + 1) + (y + 1) * rowSize];
+        }
+        this.setVelocity = function(x, y, xv, yv) {
+             u[(x + 1) + (y + 1) * rowSize] = xv;
+             v[(x + 1) + (y + 1) * rowSize] = yv;
+        }
+        this.getXVelocity = function(x, y) {
+             return u[(x + 1) + (y + 1) * rowSize];
+        }
+        this.getYVelocity = function(x, y) {
+             return v[(x + 1) + (y + 1) * rowSize];
+        }
+        this.width = function() { return width; }
+        this.height = function() { return height; }
+    }
+    function queryUI(d, u, v)
+    {
+        for (var i = 0; i < size; i++)
+            u[i] = v[i] = d[i] = 0.0;
+        uiCallback(new Field(d, u, v));
+    }
+
+    this.update = function () {
+        queryUI(dens_prev, u_prev, v_prev);
+        vel_step(u, v, u_prev, v_prev, dt);
+        dens_step(dens, dens_prev, u, v, dt);
+        displayFunc(new Field(dens, u, v));
+    }
+    this.setDisplayFunction = function(func) {
+        displayFunc = func;
+    }
+
+    this.iterations = function() { return iterations; }
+    this.setIterations = function(iters) {
+        if (iters > 0 && iters <= 100)
+           iterations = iters;
+    }
+    this.setUICallback = function(callback) {
+        uiCallback = callback;
+    }
+    var iterations = 10;
+    var visc = 0.5;
+    var dt = 0.1;
+    var dens;
+    var dens_prev;
+    var u;
+    var u_prev;
+    var v;
+    var v_prev;
+    var width;
+    var height;
+    var rowSize;
+    var size;
+    var displayFunc;
+    function reset()
+    {
+        rowSize = width + 2;
+        size = (width+2)*(height+2);
+        dens = new Array(size);
+        dens_prev = new Array(size);
+        u = new Array(size);
+        u_prev = new Array(size);
+        v = new Array(size);
+        v_prev = new Array(size);
+        for (var i = 0; i < size; i++)
+            dens_prev[i] = u_prev[i] = v_prev[i] = dens[i] = u[i] = v[i] = 0;
+    }
+    this.reset = reset;
+    this.setResolution = function (hRes, wRes)
+    {
+        var res = wRes * hRes;
+        if (res > 0 && res < 1000000 && (wRes != width || hRes != height)) {
+            width = wRes;
+            height = hRes;
+            reset();
+            return true;
+        }
+        return false;
+    }
+    this.setResolution(64, 64);
+}
diff --git a/benchmarks/revisions.html b/benchmarks/revisions.html
index 6ff75be..3ce9889 100644
--- a/benchmarks/revisions.html
+++ b/benchmarks/revisions.html
@@ -19,6 +19,10 @@
 the benchmark suite.
 
 </p>
+<div class="subtitle"><h3>Version 7 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v7/run.html">link</a>)</h3></div>
+
+<p>This version includes the new Navier-Stokes benchmark, a 2D differential
+ equation solver that stresses arithmetic computations on double arrays.</p>
 
 <div class="subtitle"><h3>Version 6 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v6/run.html">link</a>)</h3></div>
 
diff --git a/benchmarks/run.html b/benchmarks/run.html
index 36d2ad5..f1d14c1 100644
--- a/benchmarks/run.html
+++ b/benchmarks/run.html
@@ -14,6 +14,7 @@
 <script type="text/javascript" src="earley-boyer.js"></script>
 <script type="text/javascript" src="regexp.js"></script>
 <script type="text/javascript" src="splay.js"></script>
+<script type="text/javascript" src="navier-stokes.js"></script>
 <link type="text/css" rel="stylesheet" href="style.css" />
 <script type="text/javascript">
 var completed = 0;
@@ -52,16 +53,16 @@
   BenchmarkSuite.RunSuites({ NotifyStep: ShowProgress,
                              NotifyError: AddError,
                              NotifyResult: AddResult,
-                             NotifyScore: AddScore }); 
+                             NotifyScore: AddScore });
 }
 
 function ShowWarningIfObsolete() {
-  // If anything goes wrong we will just catch the exception and no 
+  // If anything goes wrong we will just catch the exception and no
   // warning is shown, i.e., no harm is done.
   try {
     var xmlhttp;
-    var next_version = parseInt(BenchmarkSuite.version) + 1; 
-    var next_version_url = "../v" + next_version + "/run.html";  
+    var next_version = parseInt(BenchmarkSuite.version) + 1;
+    var next_version_url = "../v" + next_version + "/run.html";
     if (window.XMLHttpRequest) {
       xmlhttp = new window.XMLHttpRequest();
     } else if (window.ActiveXObject) {
@@ -75,7 +76,7 @@
     };
     xmlhttp.send(null);
   } catch(e) {
-    // Ignore exception if check for next version fails. 
+    // Ignore exception if check for next version fails.
     // Hence no warning is displayed.
   }
 }
@@ -83,7 +84,7 @@
 function Load() {
   var version = BenchmarkSuite.version;
   document.getElementById("version").innerHTML = version;
-  ShowWarningIfObsolete();  
+  ShowWarningIfObsolete();
   setTimeout(Run, 200);
 }
 </script>
@@ -91,11 +92,11 @@
 <body onload="Load()">
 <div>
   <div class="title"><h1>V8 Benchmark Suite - version <span id="version">?</span></h1></div>
-  <div class="warning" id="obsolete"> 
+  <div class="warning" id="obsolete">
 Warning! This is not the latest version of the V8 benchmark
-suite. Consider running the   
+suite. Consider running the
 <a href="http://v8.googlecode.com/svn/data/benchmarks/current/run.html">
-latest version</a>.  
+latest version</a>.
   </div>
   <table>
     <tr>
@@ -117,6 +118,7 @@
 (<i>1761 lines</i>).
 </li>
 <li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>394 lines</i>).</li>
+<li><b>NavierStokes</b><br>Solves NavierStokes equations in 2D, heavily manipulating double precision arrays. Based on Oliver Hunt's code (<i>387 lines</i>).</li>
 </ul>
 
 <p>
diff --git a/benchmarks/run.js b/benchmarks/run.js
index da95fb4..58f6265 100644
--- a/benchmarks/run.js
+++ b/benchmarks/run.js
@@ -34,6 +34,7 @@
 load('earley-boyer.js');
 load('regexp.js');
 load('splay.js');
+load('navier-stokes.js');
 
 var success = true;
 
diff --git a/benchmarks/spinning-balls/index.html b/benchmarks/spinning-balls/index.html
new file mode 100644
index 0000000..d01f31f
--- /dev/null
+++ b/benchmarks/spinning-balls/index.html
@@ -0,0 +1,11 @@
+<html>
+<head>
+  <style>
+    body { text-align: center; }
+  </style>
+</head>
+<body>
+  <script type="text/javascript" src="splay-tree.js"></script>
+  <script type="text/javascript" src="v.js"></script>
+</body>
+</html>
diff --git a/benchmarks/spinning-balls/splay-tree.js b/benchmarks/spinning-balls/splay-tree.js
new file mode 100644
index 0000000..a88e4cb
--- /dev/null
+++ b/benchmarks/spinning-balls/splay-tree.js
@@ -0,0 +1,326 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/**
+ * Constructs a Splay tree.  A splay tree is a self-balancing binary
+ * search tree with the additional property that recently accessed
+ * elements are quick to access again. It performs basic operations
+ * such as insertion, look-up and removal in O(log(n)) amortized time.
+ *
+ * @constructor
+ */
+function SplayTree() {
+};
+
+
+/**
+ * Pointer to the root node of the tree.
+ *
+ * @type {SplayTree.Node}
+ * @private
+ */
+SplayTree.prototype.root_ = null;
+
+
+/**
+ * @return {boolean} Whether the tree is empty.
+ */
+SplayTree.prototype.isEmpty = function() {
+  return !this.root_;
+};
+
+
+/**
+ * Inserts a node into the tree with the specified key and value if
+ * the tree does not already contain a node with the specified key. If
+ * the value is inserted, it becomes the root of the tree.
+ *
+ * @param {number} key Key to insert into the tree.
+ * @param {*} value Value to insert into the tree.
+ */
+SplayTree.prototype.insert = function(key, value) {
+  if (this.isEmpty()) {
+    this.root_ = new SplayTree.Node(key, value);
+    return;
+  }
+  // Splay on the key to move the last node on the search path for
+  // the key to the root of the tree.
+  this.splay_(key);
+  if (this.root_.key == key) {
+    return;
+  }
+  var node = new SplayTree.Node(key, value);
+  if (key > this.root_.key) {
+    node.left = this.root_;
+    node.right = this.root_.right;
+    this.root_.right = null;
+  } else {
+    node.right = this.root_;
+    node.left = this.root_.left;
+    this.root_.left = null;
+  }
+  this.root_ = node;
+};
+
+
+/**
+ * Removes a node with the specified key from the tree if the tree
+ * contains a node with this key. The removed node is returned. If the
+ * key is not found, an exception is thrown.
+ *
+ * @param {number} key Key to find and remove from the tree.
+ * @return {SplayTree.Node} The removed node.
+ */
+SplayTree.prototype.remove = function(key) {
+  if (this.isEmpty()) {
+    throw Error('Key not found: ' + key);
+  }
+  this.splay_(key);
+  if (this.root_.key != key) {
+    throw Error('Key not found: ' + key);
+  }
+  var removed = this.root_;
+  if (!this.root_.left) {
+    this.root_ = this.root_.right;
+  } else {
+    var right = this.root_.right;
+    this.root_ = this.root_.left;
+    // Splay to make sure that the new root has an empty right child.
+    this.splay_(key);
+    // Insert the original right child as the right child of the new
+    // root.
+    this.root_.right = right;
+  }
+  return removed;
+};
+
+
+/**
+ * Returns the node having the specified key or null if the tree doesn't contain
+ * a node with the specified key.
+ *
+ * @param {number} key Key to find in the tree.
+ * @return {SplayTree.Node} Node having the specified key.
+ */
+SplayTree.prototype.find = function(key) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  this.splay_(key);
+  return this.root_.key == key ? this.root_ : null;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the maximum key value.
+ */
+SplayTree.prototype.findMax = function(opt_startNode) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  var current = opt_startNode || this.root_;
+  while (current.right) {
+    current = current.right;
+  }
+  return current;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the maximum key value that
+ *     is less than the specified key value.
+ */
+SplayTree.prototype.findGreatestLessThan = function(key) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  // Splay on the key to move the node with the given key or the last
+  // node on the search path to the top of the tree.
+  this.splay_(key);
+  // Now the result is either the root node or the greatest node in
+  // the left subtree.
+  if (this.root_.key < key) {
+    return this.root_;
+  } else if (this.root_.left) {
+    return this.findMax(this.root_.left);
+  } else {
+    return null;
+  }
+};
+
+
+/**
+ * @return {Array<*>} An array containing all the keys of tree's nodes.
+ */
+SplayTree.prototype.exportKeys = function() {
+  var result = [];
+  if (!this.isEmpty()) {
+    this.root_.traverse_(function(node) { result.push(node.key); });
+  }
+  return result;
+};
+
+
+/**
+ * Perform the splay operation for the given key. Moves the node with
+ * the given key to the top of the tree.  If no node has the given
+ * key, the last node on the search path is moved to the top of the
+ * tree. This is the simplified top-down splaying algorithm from:
+ * "Self-adjusting Binary Search Trees" by Sleator and Tarjan
+ *
+ * @param {number} key Key to splay the tree on.
+ * @private
+ */
+SplayTree.prototype.splay_ = function(key) {
+  if (this.isEmpty()) {
+    return;
+  }
+  // Create a dummy node.  The use of the dummy node is a bit
+  // counter-intuitive: The right child of the dummy node will hold
+  // the L tree of the algorithm.  The left child of the dummy node
+  // will hold the R tree of the algorithm.  Using a dummy node, left
+  // and right will always be nodes and we avoid special cases.
+  var dummy, left, right;
+  dummy = left = right = new SplayTree.Node(null, null);
+  var current = this.root_;
+  while (true) {
+    if (key < current.key) {
+      if (!current.left) {
+        break;
+      }
+      if (key < current.left.key) {
+        // Rotate right.
+        var tmp = current.left;
+        current.left = tmp.right;
+        tmp.right = current;
+        current = tmp;
+        if (!current.left) {
+          break;
+        }
+      }
+      // Link right.
+      right.left = current;
+      right = current;
+      current = current.left;
+    } else if (key > current.key) {
+      if (!current.right) {
+        break;
+      }
+      if (key > current.right.key) {
+        // Rotate left.
+        var tmp = current.right;
+        current.right = tmp.left;
+        tmp.left = current;
+        current = tmp;
+        if (!current.right) {
+          break;
+        }
+      }
+      // Link left.
+      left.right = current;
+      left = current;
+      current = current.right;
+    } else {
+      break;
+    }
+  }
+  // Assemble.
+  left.right = current.left;
+  right.left = current.right;
+  current.left = dummy.right;
+  current.right = dummy.left;
+  this.root_ = current;
+};
+
+
+/**
+ * Constructs a Splay tree node.
+ *
+ * @param {number} key Key.
+ * @param {*} value Value.
+ */
+SplayTree.Node = function(key, value) {
+  this.key = key;
+  this.value = value;
+};
+
+
+/**
+ * @type {SplayTree.Node}
+ */
+SplayTree.Node.prototype.left = null;
+
+
+/**
+ * @type {SplayTree.Node}
+ */
+SplayTree.Node.prototype.right = null;
+
+
+/**
+ * Performs an ordered traversal of the subtree starting at
+ * this SplayTree.Node.
+ *
+ * @param {function(SplayTree.Node)} f Visitor function.
+ * @private
+ */
+SplayTree.Node.prototype.traverse_ = function(f) {
+  var current = this;
+  while (current) {
+    var left = current.left;
+    if (left) left.traverse_(f);
+    f(current);
+    current = current.right;
+  }
+};
+
+SplayTree.prototype.traverseBreadthFirst = function (f) {
+  if (f(this.root_.value)) return;
+
+  var stack = [this.root_];
+  var length = 1;
+
+  while (length > 0) {
+    var new_stack = new Array(stack.length * 2);
+    var new_length = 0;
+    for (var i = 0; i < length; i++) {
+      var n = stack[i];
+      var l = n.left;
+      var r = n.right;
+      if (l) {
+        if (f(l.value)) return;
+        new_stack[new_length++] = l;
+      }
+      if (r) {
+        if (f(r.value)) return;
+        new_stack[new_length++] = r;
+      }
+    }
+    stack = new_stack;
+    length = new_length;
+  }
+};
diff --git a/benchmarks/spinning-balls/v.js b/benchmarks/spinning-balls/v.js
new file mode 100644
index 0000000..5ae1194
--- /dev/null
+++ b/benchmarks/spinning-balls/v.js
@@ -0,0 +1,498 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+/**
+ * This function provides requestAnimationFrame in a cross browser way.
+ * http://paulirish.com/2011/requestanimationframe-for-smart-animating/
+ */
+if ( !window.requestAnimationFrame ) {
+  window.requestAnimationFrame = ( function() {
+    return window.webkitRequestAnimationFrame ||
+        window.mozRequestAnimationFrame ||
+        window.oRequestAnimationFrame ||
+        window.msRequestAnimationFrame ||
+        function(callback, element) {
+          window.setTimeout( callback, 1000 / 60 );
+        };
+  } )();
+}
+
+var kNPoints = 8000;
+var kNModifications = 20;
+var kNVisiblePoints = 200;
+var kDecaySpeed = 20;
+
+var kPointRadius = 4;
+var kInitialLifeForce = 100;
+
+var livePoints = void 0;
+var dyingPoints = void 0;
+var scene = void 0;
+var renderingStartTime = void 0;
+var scene = void 0;
+var pausePlot = void 0;
+var splayTree = void 0;
+var numberOfFrames = 0;
+var sumOfSquaredPauses = 0;
+var benchmarkStartTime = void 0;
+var benchmarkTimeLimit = void 0;
+var autoScale = void 0;
+var pauseDistribution = [];
+
+
+function Point(x, y, z, payload) {
+  this.x = x;
+  this.y = y;
+  this.z = z;
+
+  this.next = null;
+  this.prev = null;
+  this.payload = payload;
+  this.lifeForce = kInitialLifeForce;
+}
+
+
+Point.prototype.color = function () {
+  return "rgba(0, 0, 0, " + (this.lifeForce / kInitialLifeForce) + ")";
+};
+
+
+Point.prototype.decay = function () {
+  this.lifeForce -= kDecaySpeed;
+  return this.lifeForce <= 0;
+};
+
+
+function PointsList() {
+  this.head = null;
+  this.count = 0;
+}
+
+
+PointsList.prototype.add = function (point) {
+  if (this.head !== null) this.head.prev = point;
+  point.next = this.head;
+  this.head = point;
+  this.count++;
+}
+
+
+PointsList.prototype.remove = function (point) {
+  if (point.next !== null) {
+    point.next.prev = point.prev;
+  }
+  if (point.prev !== null) {
+    point.prev.next = point.next;
+  } else {
+    this.head = point.next;
+  }
+  this.count--;
+}
+
+
+function GeneratePayloadTree(depth, tag) {
+  if (depth == 0) {
+    return {
+      array  : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
+      string : 'String for key ' + tag + ' in leaf node'
+    };
+  } else {
+    return {
+      left:  GeneratePayloadTree(depth - 1, tag),
+      right: GeneratePayloadTree(depth - 1, tag)
+    };
+  }
+}
+
+
+// To make the benchmark results predictable, we replace Math.random
+// with a 100% deterministic alternative.
+Math.random = (function() {
+  var seed = 49734321;
+  return function() {
+    // Robert Jenkins' 32 bit integer hash function.
+    seed = ((seed + 0x7ed55d16) + (seed << 12))  & 0xffffffff;
+    seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
+    seed = ((seed + 0x165667b1) + (seed << 5))   & 0xffffffff;
+    seed = ((seed + 0xd3a2646c) ^ (seed << 9))   & 0xffffffff;
+    seed = ((seed + 0xfd7046c5) + (seed << 3))   & 0xffffffff;
+    seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff;
+    return (seed & 0xfffffff) / 0x10000000;
+  };
+})();
+
+
+function GenerateKey() {
+  // The benchmark framework guarantees that Math.random is
+  // deterministic; see base.js.
+  return Math.random();
+}
+
+function CreateNewPoint() {
+  // Insert new node with a unique key.
+  var key;
+  do { key = GenerateKey(); } while (splayTree.find(key) != null);
+
+  var point = new Point(Math.random() * 40 - 20,
+                        Math.random() * 40 - 20,
+                        Math.random() * 40 - 20,
+                        GeneratePayloadTree(5, "" + key));
+
+  livePoints.add(point);
+
+  splayTree.insert(key, point);
+  return key;
+}
+
+function ModifyPointsSet() {
+  if (livePoints.count < kNPoints) {
+    for (var i = 0; i < kNModifications; i++) {
+      CreateNewPoint();
+    }
+  } else if (kNModifications === 20) {
+    kNModifications = 80;
+    kDecay = 30;
+  }
+
+  for (var i = 0; i < kNModifications; i++) {
+    var key = CreateNewPoint();
+    var greatest = splayTree.findGreatestLessThan(key);
+    if (greatest == null) {
+      var point = splayTree.remove(key).value;
+    } else {
+      var point = splayTree.remove(greatest.key).value;
+    }
+    livePoints.remove(point);
+    point.payload = null;
+    dyingPoints.add(point);
+  }
+}
+
+
+function PausePlot(width, height, size, scale) {
+  var canvas = document.createElement("canvas");
+  canvas.width = this.width = width;
+  canvas.height = this.height = height;
+  document.body.appendChild(canvas);
+
+  this.ctx = canvas.getContext('2d');
+
+  if (typeof scale !== "number") {
+    this.autoScale = true;
+    this.maxPause = 0;
+  } else {
+    this.autoScale = false;
+    this.maxPause = scale;
+  }
+
+  this.size = size;
+
+  // Initialize cyclic buffer for pauses.
+  this.pauses = new Array(this.size);
+  this.start = this.size;
+  this.idx = 0;
+}
+
+
+PausePlot.prototype.addPause = function (p) {
+  if (this.idx === this.size) {
+    this.idx = 0;
+  }
+
+  if (this.idx === this.start) {
+    this.start++;
+  }
+
+  if (this.start === this.size) {
+    this.start = 0;
+  }
+
+  this.pauses[this.idx++] = p;
+};
+
+
+PausePlot.prototype.iteratePauses = function (f) {
+  if (this.start < this.idx) {
+    for (var i = this.start; i < this.idx; i++) {
+      f.call(this, i - this.start, this.pauses[i]);
+    }
+  } else {
+    for (var i = this.start; i < this.size; i++) {
+      f.call(this, i - this.start, this.pauses[i]);
+    }
+
+    var offs = this.size - this.start;
+    for (var i = 0; i < this.idx; i++) {
+      f.call(this, i + offs, this.pauses[i]);
+    }
+  }
+};
+
+
+PausePlot.prototype.draw = function () {
+  var first = null;
+
+  if (this.autoScale) {
+    this.iteratePauses(function (i, v) {
+      if (first === null) {
+        first = v;
+      }
+      this.maxPause = Math.max(v, this.maxPause);
+    });
+  }
+
+  var dx = this.width / this.size;
+  var dy = this.height / this.maxPause;
+
+  this.ctx.save();
+  this.ctx.clearRect(0, 0, this.width, this.height);
+  this.ctx.beginPath();
+  this.ctx.moveTo(1, dy * this.pauses[this.start]);
+  var p = first;
+  this.iteratePauses(function (i, v) {
+    var delta = v - p;
+    var x = 1 + dx * i;
+    var y = dy * v;
+    this.ctx.lineTo(x, y);
+    if (delta > 2 * (p / 3)) {
+      this.ctx.font = "bold 12px sans-serif";
+      this.ctx.textBaseline = "bottom";
+      this.ctx.fillText(v + "ms", x + 2, y);
+    }
+    p = v;
+  });
+  this.ctx.strokeStyle = "black";
+  this.ctx.stroke();
+  this.ctx.restore();
+}
+
+
+function Scene(width, height) {
+  var canvas = document.createElement("canvas");
+  canvas.width = width;
+  canvas.height = height;
+  document.body.appendChild(canvas);
+
+  this.ctx = canvas.getContext('2d');
+  this.width = canvas.width;
+  this.height = canvas.height;
+
+  // Projection configuration.
+  this.x0 = canvas.width / 2;
+  this.y0 = canvas.height / 2;
+  this.z0 = 100;
+  this.f  = 1000;  // Focal length.
+
+  // Camera is rotating around y-axis.
+  this.angle = 0;
+}
+
+
+Scene.prototype.drawPoint = function (x, y, z, color) {
+  // Rotate the camera around y-axis.
+  var rx = x * Math.cos(this.angle) - z * Math.sin(this.angle);
+  var ry = y;
+  var rz = x * Math.sin(this.angle) + z * Math.cos(this.angle);
+
+  // Perform perspective projection.
+  var px = (this.f * rx) / (rz - this.z0) + this.x0;
+  var py = (this.f * ry) / (rz - this.z0) + this.y0;
+
+  this.ctx.save();
+  this.ctx.fillStyle = color
+  this.ctx.beginPath();
+  this.ctx.arc(px, py, kPointRadius, 0, 2 * Math.PI, true);
+  this.ctx.fill();
+  this.ctx.restore();
+};
+
+
+Scene.prototype.drawDyingPoints = function () {
+  var point_next = null;
+  for (var point = dyingPoints.head; point !== null; point = point_next) {
+    // Rotate the scene around y-axis.
+    scene.drawPoint(point.x, point.y, point.z, point.color());
+
+    point_next = point.next;
+
+    // Decay the current point and remove it from the list
+    // if it's life-force ran out.
+    if (point.decay()) {
+      dyingPoints.remove(point);
+    }
+  }
+};
+
+
+Scene.prototype.draw = function () {
+  this.ctx.save();
+  this.ctx.clearRect(0, 0, this.width, this.height);
+  this.drawDyingPoints();
+  this.ctx.restore();
+
+  this.angle += Math.PI / 90.0;
+};
+
+
+function updateStats(pause) {
+  numberOfFrames++;
+  if (pause > 20) {
+    sumOfSquaredPauses += (pause - 20) * (pause - 20);
+  }
+  pauseDistribution[Math.floor(pause / 10)] |= 0;
+  pauseDistribution[Math.floor(pause / 10)]++;
+}
+
+
+function renderStats() {
+  var msg = document.createElement("p");
+  msg.innerHTML = "Score " +
+    Math.round(numberOfFrames * 1000 / sumOfSquaredPauses);
+  var table = document.createElement("table");
+  table.align = "center";
+  for (var i = 0; i < pauseDistribution.length; i++) {
+    if (pauseDistribution[i] > 0) {
+      var row = document.createElement("tr");
+      var time = document.createElement("td");
+      var count = document.createElement("td");
+      time.innerHTML = i*10 + "-" + (i+1)*10 + "ms";
+      count.innerHTML = " => " + pauseDistribution[i];
+      row.appendChild(time);
+      row.appendChild(count);
+      table.appendChild(row);
+    }
+  }
+  div.appendChild(msg);
+  div.appendChild(table);
+}
+
+
+function render() {
+  if (typeof renderingStartTime === 'undefined') {
+    renderingStartTime = Date.now();
+    benchmarkStartTime = renderingStartTime;
+  }
+
+  ModifyPointsSet();
+
+  scene.draw();
+
+  var renderingEndTime = Date.now();
+  var pause = renderingEndTime - renderingStartTime;
+  pausePlot.addPause(pause);
+  renderingStartTime = renderingEndTime;
+
+  pausePlot.draw();
+
+  updateStats(pause);
+
+  div.innerHTML =
+      livePoints.count + "/" + dyingPoints.count + " " +
+      pause + "(max = " + pausePlot.maxPause + ") ms " +
+      numberOfFrames + " frames";
+
+  if (renderingEndTime < benchmarkStartTime + benchmarkTimeLimit) {
+    // Schedule next frame.
+    requestAnimationFrame(render);
+  } else {
+    renderStats();
+  }
+}
+
+
+function Form() {
+  function create(tag) { return document.createElement(tag); }
+  function text(value) { return document.createTextNode(value); }
+
+  this.form = create("form");
+  this.form.setAttribute("action", "javascript:start()");
+
+  var table = create("table");
+  table.setAttribute("style", "margin-left: auto; margin-right: auto;");
+
+  function col(a) {
+    var td = create("td");
+    td.appendChild(a);
+    return td;
+  }
+
+  function row(a, b) {
+    var tr = create("tr");
+    tr.appendChild(col(a));
+    tr.appendChild(col(b));
+    return tr;
+  }
+
+  this.timelimit = create("input");
+  this.timelimit.setAttribute("value", "60");
+
+  table.appendChild(row(text("Time limit in seconds"), this.timelimit));
+
+  this.autoscale = create("input");
+  this.autoscale.setAttribute("type", "checkbox");
+  this.autoscale.setAttribute("checked", "true");
+  table.appendChild(row(text("Autoscale pauses plot"), this.autoscale));
+
+  var button = create("input");
+  button.setAttribute("type", "submit");
+  button.setAttribute("value", "Start");
+  this.form.appendChild(table);
+  this.form.appendChild(button);
+
+  document.body.appendChild(this.form);
+}
+
+
+Form.prototype.remove = function () {
+  document.body.removeChild(this.form);
+};
+
+
+function init() {
+  livePoints = new PointsList;
+  dyingPoints = new PointsList;
+
+  splayTree = new SplayTree();
+
+  scene = new Scene(640, 480);
+
+  div = document.createElement("div");
+  document.body.appendChild(div);
+
+  pausePlot = new PausePlot(480, autoScale ? 240 : 500, 160, autoScale ? void 0 : 500);
+}
+
+function start() {
+  benchmarkTimeLimit = form.timelimit.value * 1000;
+  autoScale = form.autoscale.checked;
+  form.remove();
+  init();
+  render();
+}
+
+var form = new Form();
diff --git a/build/android.gypi b/build/android.gypi
new file mode 100644
index 0000000..ffd0648
--- /dev/null
+++ b/build/android.gypi
@@ -0,0 +1,225 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Definitions for building standalone V8 binaries to run on Android.
+# This is mostly excerpted from:
+# http://src.chromium.org/viewvc/chrome/trunk/src/build/common.gypi
+
+{
+  'variables': {
+    # Location of Android NDK.
+    'variables': {
+      'variables': {
+        'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
+        'android_target_arch%': 'arm',  # target_arch in android terms.
+
+        # Switch between different build types, currently only '0' is
+        # supported.
+        'android_build_type%': 0,
+      },
+      'android_ndk_root%': '<(android_ndk_root)',
+      'android_ndk_sysroot': '<(android_ndk_root)/platforms/android-9/arch-<(android_target_arch)',
+      'android_build_type%': '<(android_build_type)',
+    },
+    'android_ndk_root%': '<(android_ndk_root)',
+    'android_ndk_sysroot': '<(android_ndk_sysroot)',
+    'android_ndk_include': '<(android_ndk_sysroot)/usr/include',
+    'android_ndk_lib': '<(android_ndk_sysroot)/usr/lib',
+    # Enable to use the system stlport, otherwise statically
+    # link the NDK one?
+    'use_system_stlport%': '<(android_build_type)',
+    'android_stlport_library': 'stlport_static',
+    # Copy it out one scope.
+    'android_build_type%': '<(android_build_type)',
+
+    'OS': 'android',
+    'target_arch': 'arm',
+    'v8_target_arch': 'arm',
+    'armv7': 1,
+    'arm_neon': 0,
+    'arm_fpu': 'vfpv3',
+  },  # variables
+  'target_defaults': {
+    'defines': [
+      'ANDROID',
+      'V8_ANDROID_LOG_STDOUT',
+    ],
+    'configurations': {
+      'Release': {
+        'cflags!': [
+          '-O2',
+          '-Os',
+        ],
+        'cflags': [
+          '-fdata-sections',
+          '-ffunction-sections',
+          '-fomit-frame-pointer',
+          '-O3',
+        ],
+      },  # Release
+    },  # configurations
+    'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter',
+                '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions', ],
+    'target_conditions': [
+      ['_toolset=="target"', {
+        'cflags!': [
+          '-pthread',  # Not supported by Android toolchain.
+        ],
+        'cflags': [
+          '-U__linux__',  # Don't allow toolchain to claim -D__linux__
+          '-ffunction-sections',
+          '-funwind-tables',
+          '-fstack-protector',
+          '-fno-short-enums',
+          '-finline-limit=64',
+          '-Wa,--noexecstack',
+          '-Wno-error=non-virtual-dtor',  # TODO(michaelbai): Fix warnings.
+          # Note: This include is in cflags to ensure that it comes after
+          # all of the includes.
+          '-I<(android_ndk_include)',
+          '-march=armv7-a',
+          '-mtune=cortex-a8',
+          '-mfpu=vfp3',
+        ],
+        'defines': [
+          'ANDROID',
+          #'__GNU_SOURCE=1',  # Necessary for clone()
+          'USE_STLPORT=1',
+          '_STLP_USE_PTR_SPECIALIZATIONS=1',
+          'HAVE_OFF64_T',
+          'HAVE_SYS_UIO_H',
+          'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
+        ],
+        'ldflags!': [
+          '-pthread',  # Not supported by Android toolchain.
+        ],
+        'ldflags': [
+          '-nostdlib',
+          '-Wl,--no-undefined',
+          '-Wl,--icf=safe',  # Enable identical code folding to reduce size
+          # Don't export symbols from statically linked libraries.
+          '-Wl,--exclude-libs=ALL',
+        ],
+        'libraries!': [
+            '-lrt',  # librt is built into Bionic.
+            # Not supported by Android toolchain.
+            # Where do these come from?  Can't find references in
+            # any Chromium gyp or gypi file.  Maybe they come from
+            # gyp itself?
+            '-lpthread', '-lnss3', '-lnssutil3', '-lsmime3', '-lplds4', '-lplc4', '-lnspr4',
+          ],
+          'libraries': [
+            '-l<(android_stlport_library)',
+            # Manually link the libgcc.a that the cross compiler uses.
+            '<!($CC -print-libgcc-file-name)',
+            '-lc',
+            '-ldl',
+            '-lstdc++',
+            '-lm',
+        ],
+        'conditions': [
+          ['android_build_type==0', {
+            'ldflags': [
+              '-Wl,-rpath-link=<(android_ndk_lib)',
+              '-L<(android_ndk_lib)',
+            ],
+          }],
+          # NOTE: The stlport header include paths below are specified in
+          # cflags rather than include_dirs because they need to come
+          # after include_dirs. Think of them like system headers, but
+          # don't use '-isystem' because the arm-linux-androideabi-4.4.3
+          # toolchain (circa Gingerbread) will exhibit strange errors.
+          # The include ordering here is important; change with caution.
+          ['use_system_stlport==0', {
+            'cflags': [
+              '-I<(android_ndk_root)/sources/cxx-stl/stlport/stlport',
+            ],
+            'conditions': [
+              ['target_arch=="arm" and armv7==1', {
+                'ldflags': [
+                  '-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/armeabi-v7a',
+                ],
+              }],
+              ['target_arch=="arm" and armv7==0', {
+                'ldflags': [
+                  '-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/armeabi',
+                ],
+              }],
+              ['target_arch=="ia32"', {
+                'ldflags': [
+                  '-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/x86',
+                ],
+              }],
+            ],
+          }],
+          ['target_arch=="ia32"', {
+            # The x86 toolchain currently has problems with stack-protector.
+            'cflags!': [
+              '-fstack-protector',
+            ],
+            'cflags': [
+              '-fno-stack-protector',
+            ],
+          }],
+        ],
+        'target_conditions': [
+          ['_type=="executable"', {
+            'ldflags': [
+              '-Bdynamic',
+              '-Wl,-dynamic-linker,/system/bin/linker',
+              '-Wl,--gc-sections',
+              '-Wl,-z,nocopyreloc',
+              # crtbegin_dynamic.o should be the last item in ldflags.
+              '<(android_ndk_lib)/crtbegin_dynamic.o',
+            ],
+            'libraries': [
+              # crtend_android.o needs to be the last item in libraries.
+              # Do not add any libraries after this!
+              '<(android_ndk_lib)/crtend_android.o',
+            ],
+          }],
+          ['_type=="shared_library"', {
+            'ldflags': [
+              '-Wl,-shared,-Bsymbolic',
+            ],
+          }],
+        ],
+      }],  # _toolset=="target"
+      # Settings for building host targets using the system toolchain.
+      ['_toolset=="host"', {
+        'cflags': [ '-m32', '-pthread' ],
+        'ldflags': [ '-m32', '-pthread' ],
+        'ldflags!': [
+          '-Wl,-z,noexecstack',
+          '-Wl,--gc-sections',
+          '-Wl,-O1',
+          '-Wl,--as-needed',
+        ],
+      }],
+    ],  # target_conditions
+  },  # target_defaults
+}
\ No newline at end of file
diff --git a/build/common.gypi b/build/common.gypi
index 4e896e0..5c0c323 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -50,21 +50,35 @@
     # probing when running on the target.
     'v8_can_use_vfp_instructions%': 'false',
 
+    # Similar to vfp but on MIPS.
+    'v8_can_use_fpu_instructions%': 'true',
+
     # Setting v8_use_arm_eabi_hardfloat to true will turn on V8 support for ARM
     # EABI calling convention where double arguments are passed in VFP
     # registers. Note that the GCC flag '-mfloat-abi=hard' should be used as
     # well when compiling for the ARM target.
     'v8_use_arm_eabi_hardfloat%': 'false',
 
+    # Similar to the ARM hard float ABI but on MIPS.
+    'v8_use_mips_abi_hardfloat%': 'true',
+
+    # Default arch variant for MIPS.
+    'mips_arch_variant%': 'mips32r2',
+
     'v8_enable_debugger_support%': 1,
 
     'v8_enable_disassembler%': 0,
 
+    'v8_object_print%': 0,
+
     'v8_enable_gdbjit%': 0,
 
     # Enable profiling support. Only required on Windows.
     'v8_enable_prof%': 0,
 
+    # Some versions of GCC 4.5 seem to need -fno-strict-aliasing.
+    'v8_no_strict_aliasing%': 0,
+
     # Chrome needs this definition unconditionally. For standalone V8 builds,
     # it's handled in build/standalone.gypi.
     'want_separate_host_toolset%': 1,
@@ -72,6 +86,12 @@
     'v8_use_snapshot%': 'true',
     'host_os%': '<(OS)',
     'v8_use_liveobjectlist%': 'false',
+    'werror%': '-Werror',
+
+    # With post mortem support enabled, metadata is embedded into libv8 that
+    # describes various parameters of the VM for use by debuggers. See
+    # tools/gen-postmortem-metadata.py for details.
+    'v8_postmortem_support%': 'false',
 
     # For a shared library build, results in "libv8-<(soname_version).so".
     'soname_version%': '',
@@ -84,6 +104,9 @@
       ['v8_enable_disassembler==1', {
         'defines': ['ENABLE_DISASSEMBLER',],
       }],
+      ['v8_object_print==1', {
+        'defines': ['OBJECT_PRINT',],
+      }],
       ['v8_enable_gdbjit==1', {
         'defines': ['ENABLE_GDB_JIT_INTERFACE',],
       }],
@@ -129,7 +152,7 @@
               }],
               # The ARM assembler assumes the host is 32 bits,
               # so force building 32-bit host tools.
-              ['host_arch=="x64"', {
+              ['host_arch=="x64" or OS=="android"', {
                 'target_conditions': [
                   ['_toolset=="host"', {
                     'cflags': ['-m32'],
@@ -148,6 +171,64 @@
             'defines': [
               'V8_TARGET_ARCH_MIPS',
             ],
+            'conditions': [
+              [ 'target_arch=="mips"', {
+                'target_conditions': [
+                  ['_toolset=="target"', {
+                    'cflags': ['-EL'],
+                    'ldflags': ['-EL'],
+                    'conditions': [
+                      [ 'v8_use_mips_abi_hardfloat=="true"', {
+                        'cflags': ['-mhard-float'],
+                        'ldflags': ['-mhard-float'],
+                      }, {
+                        'cflags': ['-msoft-float'],
+                        'ldflags': ['-msoft-float'],
+                      }],
+                      ['mips_arch_variant=="mips32r2"', {
+                        'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+                      }],
+                      ['mips_arch_variant=="loongson"', {
+                        'cflags': ['-mips3', '-Wa,-mips3'],
+                      }, {
+                        'cflags': ['-mips32', '-Wa,-mips32'],
+                      }],
+                    ],
+                  }],
+                ],
+              }],
+              [ 'v8_can_use_fpu_instructions=="true"', {
+                'defines': [
+                  'CAN_USE_FPU_INSTRUCTIONS',
+                ],
+              }],
+              [ 'v8_use_mips_abi_hardfloat=="true"', {
+                'defines': [
+                  '__mips_hard_float=1',
+                  'CAN_USE_FPU_INSTRUCTIONS',
+                ],
+              }, {
+                'defines': [
+                  '__mips_soft_float=1'
+                ],
+              }],
+              ['mips_arch_variant=="mips32r2"', {
+                'defines': ['_MIPS_ARCH_MIPS32R2',],
+              }],
+              ['mips_arch_variant=="loongson"', {
+                'defines': ['_MIPS_ARCH_LOONGSON',],
+              }],
+              # The MIPS assembler assumes the host is 32 bits,
+              # so force building 32-bit host tools.
+              ['host_arch=="x64"', {
+                'target_conditions': [
+                  ['_toolset=="host"', {
+                    'cflags': ['-m32'],
+                    'ldflags': ['-m32'],
+                  }],
+                ],
+              }],
+            ],
           }],
           ['v8_target_arch=="x64"', {
             'defines': [
@@ -169,6 +250,11 @@
           'COMPRESS_STARTUP_DATA_BZ2',
         ],
       }],
+      ['OS=="win"', {
+        'defines': [
+          'WIN32',
+        ],
+      }],
       ['OS=="win" and v8_enable_prof==1', {
         'msvs_settings': {
           'VCLinkerTool': {
@@ -176,15 +262,22 @@
           },
         },
       }],
-      ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
+      ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
+         or OS=="netbsd"', {
         'conditions': [
           [ 'target_arch=="ia32"', {
             'cflags': [ '-m32' ],
             'ldflags': [ '-m32' ],
           }],
-        ],
+          [ 'v8_no_strict_aliasing==1', {
+            'cflags': [ '-fno-strict-aliasing' ],
+          }],
+        ],  # conditions
       }],
-    ],
+      ['OS=="solaris"', {
+        'defines': [ '__C99FEATURES__=1' ],  # isinf() etc.
+      }],
+    ],  # conditions
     'configurations': {
       'Debug': {
         'defines': [
@@ -217,15 +310,19 @@
           ['OS=="freebsd" or OS=="openbsd"', {
             'cflags': [ '-I/usr/local/include' ],
           }],
-          ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
-            'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
-                        '-Wnon-virtual-dtor' ],
+          ['OS=="netbsd"', {
+            'cflags': [ '-I/usr/pkg/include' ],
+          }],
+          ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
+            'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
+                        '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
           }],
         ],
-      },
+      },  # Debug
       'Release': {
         'conditions': [
-          ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+          ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
+            or OS=="android"', {
             'cflags!': [
               '-O2',
               '-Os',
@@ -237,7 +334,7 @@
               '-O3',
             ],
             'conditions': [
-              [ 'gcc_version==44', {
+              [ 'gcc_version==44 and clang==0', {
                 'cflags': [
                   # Avoid crashes with gcc 4.4 in the v8 test suite.
                   '-fno-tree-vrp',
@@ -248,6 +345,9 @@
           ['OS=="freebsd" or OS=="openbsd"', {
             'cflags': [ '-I/usr/local/include' ],
           }],
+          ['OS=="netbsd"', {
+            'cflags': [ '-I/usr/pkg/include' ],
+          }],
           ['OS=="mac"', {
             'xcode_settings': {
               'GCC_OPTIMIZATION_LEVEL': '3',  # -O3
@@ -258,10 +358,10 @@
               # is specified explicitly.
               'GCC_STRICT_ALIASING': 'YES',
             },
-          }],
+          }],  # OS=="mac"
           ['OS=="win"', {
             'msvs_configuration_attributes': {
-              'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
+              'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
               'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
               'CharacterSet': '1',
             },
@@ -293,9 +393,9 @@
                 # 'StackReserveSize': '297152',
               },
             },
-          }],
-        ],
-      },
-    },
-  },
+          }],  # OS=="win"
+        ],  # conditions
+      },  # Release
+    },  # configurations
+  },  # target_defaults
 }
diff --git a/build/gyp_v8 b/build/gyp_v8
index dfdbe3f..4293e76 100755
--- a/build/gyp_v8
+++ b/build/gyp_v8
@@ -171,3 +171,8 @@
     gyp_args.append('-I' + v8_root + '/build/armu.gypi')
     gyp_args.append('-S-armu')
     run_gyp(gyp_args)
+
+    gyp_args = list(args)
+    gyp_args.append('-I' + v8_root + '/build/mipsu.gypi')
+    gyp_args.append('-S-mipsu')
+    run_gyp(gyp_args)
diff --git a/build/mipsu.gypi b/build/mipsu.gypi
new file mode 100644
index 0000000..637ff84
--- /dev/null
+++ b/build/mipsu.gypi
@@ -0,0 +1,33 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'variables': {
+    'target_arch': 'ia32',
+    'v8_target_arch': 'mips',
+  },
+}
diff --git a/build/standalone.gypi b/build/standalone.gypi
index cb5e133..e9b0565 100644
--- a/build/standalone.gypi
+++ b/build/standalone.gypi
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -35,27 +35,36 @@
     'msvs_multi_core_compile%': '1',
     'variables': {
       'variables': {
-        'conditions': [
-          [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
-            # This handles the Linux platforms we generally deal with. Anything
-            # else gets passed through, which probably won't work very well; such
-            # hosts should pass an explicit target_arch to gyp.
-            'host_arch%':
-              '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
-          }, {  # OS!="linux" and OS!="freebsd" and OS!="openbsd"
-            'host_arch%': 'ia32',
-          }],
-        ],
+        'variables': {
+          'conditions': [
+            ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
+              # This handles the Linux platforms we generally deal with.
+              # Anything else gets passed through, which probably won't work
+              # very well; such hosts should pass an explicit target_arch
+              # to gyp.
+              'host_arch%':
+                '<!(uname -m | sed -e "s/i.86/ia32/;\
+                  s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
+            }, {
+              # OS!="linux" and OS!="freebsd" and OS!="openbsd" and OS!="netbsd"
+              'host_arch%': 'ia32',
+            }],
+          ],
+        },
+        'host_arch%': '<(host_arch)',
+        'target_arch%': '<(host_arch)',
       },
       'host_arch%': '<(host_arch)',
-      'target_arch%': '<(host_arch)',
+      'target_arch%': '<(target_arch)',
       'v8_target_arch%': '<(target_arch)',
     },
     'host_arch%': '<(host_arch)',
     'target_arch%': '<(target_arch)',
     'v8_target_arch%': '<(v8_target_arch)',
+    'werror%': '-Werror',
     'conditions': [
       ['(v8_target_arch=="arm" and host_arch!="arm") or \
+        (v8_target_arch=="mips" and host_arch!="mips") or \
         (v8_target_arch=="x64" and host_arch!="x64")', {
         'want_separate_host_toolset': 1,
       }, {
@@ -72,9 +81,10 @@
     },
   },
   'conditions': [
-    [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
+    ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
+       or OS=="netbsd"', {
       'target_defaults': {
-        'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
+        'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
                     '-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
                     '-fno-exceptions', '-pedantic' ],
         'ldflags': [ '-pthread', ],
@@ -90,11 +100,12 @@
           }],
         ],
       },
-    }],  # 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"'
+    }],
+    # 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"
+    #  or OS=="netbsd"'
     ['OS=="win"', {
       'target_defaults': {
         'defines': [
-          'WIN32',
           '_CRT_SECURE_NO_DEPRECATE',
           '_CRT_NONSTDC_NO_DEPRECATE',
         ],
diff --git a/include/v8-debug.h b/include/v8-debug.h
old mode 100644
new mode 100755
index 504cbfe..9e85dc4
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -340,6 +340,11 @@
                           bool wait_for_connection = false);
 
   /**
+    * Disable the V8 builtin debug agent. The TCP/IP connection will be closed.
+    */
+  static void DisableAgent();
+
+  /**
    * Makes V8 process all pending debug messages.
    *
    * From V8 point of view all debug messages come asynchronously (e.g. from
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index f67646f..2499bbf 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -219,8 +219,9 @@
                            // (e.g. parts of a ConsString).
     kHidden = 4,           // A link that is needed for proper sizes
                            // calculation, but may be hidden from user.
-    kShortcut = 5          // A link that must not be followed during
+    kShortcut = 5,         // A link that must not be followed during
                            // sizes calculation.
+    kWeak = 6              // A weak reference (ignored by the GC).
   };
 
   /** Returns edge type (see HeapGraphEdge::Type). */
@@ -254,7 +255,9 @@
     kClosure = 5,     // Function closure.
     kRegExp = 6,      // RegExp.
     kHeapNumber = 7,  // Number stored in the heap.
-    kNative = 8       // Native object (not from V8 heap).
+    kNative = 8,      // Native object (not from V8 heap).
+    kSynthetic = 9    // Synthetic object, usualy used for grouping
+                      // snapshot items together.
   };
 
   /** Returns node type (see HeapGraphNode::Type). */
@@ -281,14 +284,8 @@
    * the objects that are reachable only from this object. In other
    * words, the size of memory that will be reclaimed having this node
    * collected.
-   *
-   * Exact retained size calculation has O(N) (number of nodes)
-   * computational complexity, while approximate has O(1). It is
-   * assumed that initially heap profiling tools provide approximate
-   * sizes for all nodes, and then exact sizes are calculated for the
-   * most 'interesting' nodes.
    */
-  int GetRetainedSize(bool exact) const;
+  int GetRetainedSize() const;
 
   /** Returns child nodes count of the node. */
   int GetChildrenCount() const;
@@ -433,6 +430,9 @@
    * handle.
    */
   static const uint16_t kPersistentHandleNoClassId = 0;
+
+  /** Returns the number of currently existing persistent handles. */
+  static int GetPersistentHandleCount();
 };
 
 
@@ -475,12 +475,23 @@
   virtual intptr_t GetHash() = 0;
 
   /**
-   * Returns human-readable label. It must be a NUL-terminated UTF-8
+   * Returns human-readable label. It must be a null-terminated UTF-8
    * encoded string. V8 copies its contents during a call to GetLabel.
    */
   virtual const char* GetLabel() = 0;
 
   /**
+   * Returns human-readable group label. It must be a null-terminated UTF-8
+   * encoded string. V8 copies its contents during a call to GetGroupLabel.
+   * Heap snapshot generator will collect all the group names, create
+   * top level entries with these names and attach the objects to the
+   * corresponding top level group objects. There is a default
+   * implementation which is required because embedders don't have their
+   * own implementation yet.
+   */
+  virtual const char* GetGroupLabel() { return GetLabel(); }
+
+  /**
    * Returns element count in case if a global handle retains
    * a subgraph by holding one of its nodes.
    */
diff --git a/include/v8.h b/include/v8.h
index 4b7f6e7..33179f5 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1021,6 +1021,14 @@
   V8EXPORT int Utf8Length() const;
 
   /**
+   * A fast conservative check for non-ASCII characters.  May
+   * return true even for ASCII strings, but if it returns
+   * false you can be sure that all characters are in the range
+   * 0-127.
+   */
+  V8EXPORT bool MayContainNonAscii() const;
+
+  /**
    * Write the contents of the string to an external buffer.
    * If no arguments are given, expects the buffer to be large
    * enough to hold the entire string and NULL terminator. Copies
@@ -1171,7 +1179,8 @@
    * Get the ExternalAsciiStringResource for an external ASCII string.
    * Returns NULL if IsExternalAscii() doesn't return true.
    */
-  V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
+  V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource()
+      const;
 
   static inline String* Cast(v8::Value* obj);
 
@@ -1197,7 +1206,7 @@
    * passed in as parameters.
    */
   V8EXPORT static Local<String> Concat(Handle<String> left,
-                                       Handle<String>right);
+                                       Handle<String> right);
 
   /**
    * Creates a new external string using the data defined in the given
@@ -1731,13 +1740,28 @@
   V8EXPORT Handle<Value> GetName() const;
 
   /**
+   * Name inferred from variable or property assignment of this function.
+   * Used to facilitate debugging and profiling of JavaScript code written
+   * in an OO style, where many functions are anonymous but are assigned
+   * to object properties.
+   */
+  V8EXPORT Handle<Value> GetInferredName() const;
+
+  /**
    * Returns zero based line number of function body and
    * kLineOffsetNotFound if no information available.
    */
   V8EXPORT int GetScriptLineNumber() const;
+  /**
+   * Returns zero based column number of function body and
+   * kLineOffsetNotFound if no information available.
+   */
+  V8EXPORT int GetScriptColumnNumber() const;
+  V8EXPORT Handle<Value> GetScriptId() const;
   V8EXPORT ScriptOrigin GetScriptOrigin() const;
   static inline Function* Cast(Value* obj);
   V8EXPORT static const int kLineOffsetNotFound;
+
  private:
   V8EXPORT Function();
   V8EXPORT static void CheckCast(Value* obj);
@@ -2451,24 +2475,42 @@
 
 // --- Extensions ---
 
+class V8EXPORT ExternalAsciiStringResourceImpl
+    : public String::ExternalAsciiStringResource {
+ public:
+  ExternalAsciiStringResourceImpl() : data_(0), length_(0) {}
+  ExternalAsciiStringResourceImpl(const char* data, size_t length)
+      : data_(data), length_(length) {}
+  const char* data() const { return data_; }
+  size_t length() const { return length_; }
+
+ private:
+  const char* data_;
+  size_t length_;
+};
 
 /**
  * Ignore
  */
 class V8EXPORT Extension {  // NOLINT
  public:
+  // Note that the strings passed into this constructor must live as long
+  // as the Extension itself.
   Extension(const char* name,
             const char* source = 0,
             int dep_count = 0,
-            const char** deps = 0);
+            const char** deps = 0,
+            int source_length = -1);
   virtual ~Extension() { }
   virtual v8::Handle<v8::FunctionTemplate>
       GetNativeFunction(v8::Handle<v8::String> name) {
     return v8::Handle<v8::FunctionTemplate>();
   }
 
-  const char* name() { return name_; }
-  const char* source() { return source_; }
+  const char* name() const { return name_; }
+  size_t source_length() const { return source_length_; }
+  const String::ExternalAsciiStringResource* source() const {
+    return &source_; }
   int dependency_count() { return dep_count_; }
   const char** dependencies() { return deps_; }
   void set_auto_enable(bool value) { auto_enable_ = value; }
@@ -2476,7 +2518,8 @@
 
  private:
   const char* name_;
-  const char* source_;
+  size_t source_length_;  // expected to initialize before source_
+  ExternalAsciiStringResourceImpl source_;
   int dep_count_;
   const char** deps_;
   bool auto_enable_;
@@ -2608,6 +2651,9 @@
                                          AllocationAction action,
                                          int size);
 
+// --- Leave Script Callback ---
+typedef void (*CallCompletedCallback)();
+
 // --- Failed Access Check Callback ---
 typedef void (*FailedAccessCheckCallback)(Local<Object> target,
                                           AccessType type,
@@ -2687,7 +2733,7 @@
  * default isolate is implicitly created and entered.  The embedder
  * can create additional isolates and use them in parallel in multiple
  * threads.  An isolate can be entered by at most one thread at any
- * given time.  The Locker/Unlocker API can be used to synchronize.
+ * given time.  The Locker/Unlocker API must be used to synchronize.
  */
 class V8EXPORT Isolate {
  public:
@@ -2818,6 +2864,31 @@
  */
 typedef bool (*EntropySource)(unsigned char* buffer, size_t length);
 
+
+/**
+ * ReturnAddressLocationResolver is used as a callback function when v8 is
+ * resolving the location of a return address on the stack. Profilers that
+ * change the return address on the stack can use this to resolve the stack
+ * location to whereever the profiler stashed the original return address.
+ * When invoked, return_addr_location will point to a location on stack where
+ * a machine return address resides, this function should return either the
+ * same pointer, or a pointer to the profiler's copy of the original return
+ * address.
+ */
+typedef uintptr_t (*ReturnAddressLocationResolver)(
+    uintptr_t return_addr_location);
+
+
+/**
+ * Interface for iterating though all external resources in the heap.
+ */
+class V8EXPORT ExternalResourceVisitor {  // NOLINT
+ public:
+  virtual ~ExternalResourceVisitor() {}
+  virtual void VisitExternalString(Handle<String> string) {}
+};
+
+
 /**
  * Container class for static utility functions.
  */
@@ -3006,12 +3077,25 @@
                                           AllocationAction action);
 
   /**
-   * This function removes callback which was installed by
-   * AddMemoryAllocationCallback function.
+   * Removes callback that was installed by AddMemoryAllocationCallback.
    */
   static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
 
   /**
+   * Adds a callback to notify the host application when a script finished
+   * running.  If a script re-enters the runtime during executing, the
+   * CallCompletedCallback is only invoked when the outer-most script
+   * execution ends.  Executing scripts inside the callback do not trigger
+   * further callbacks.
+   */
+  static void AddCallCompletedCallback(CallCompletedCallback callback);
+
+  /**
+   * Removes callback that was installed by AddCallCompletedCallback.
+   */
+  static void RemoveCallCompletedCallback(CallCompletedCallback callback);
+
+  /**
    * Allows the host application to group objects together. If one
    * object in the group is alive, all objects in the group are alive.
    * After each garbage collection, object groups are removed. It is
@@ -3049,6 +3133,13 @@
   static void SetEntropySource(EntropySource source);
 
   /**
+   * Allows the host application to provide a callback that allows v8 to
+   * cooperate with a profiler that rewrites return addresses on stack.
+   */
+  static void SetReturnAddressLocationResolver(
+      ReturnAddressLocationResolver return_address_resolver);
+
+  /**
    * Adjusts the amount of registered external memory.  Used to give
    * V8 an indication of the amount of externally allocated memory
    * that is kept alive by JavaScript objects.  V8 uses this to decide
@@ -3161,14 +3252,25 @@
   static void GetHeapStatistics(HeapStatistics* heap_statistics);
 
   /**
+   * Iterates through all external resources referenced from current isolate
+   * heap. This method is not expected to be used except for debugging purposes
+   * and may be quite slow.
+   */
+  static void VisitExternalResources(ExternalResourceVisitor* visitor);
+
+  /**
    * Optional notification that the embedder is idle.
    * V8 uses the notification to reduce memory footprint.
    * This call can be used repeatedly if the embedder remains idle.
    * Returns true if the embedder should stop calling IdleNotification
    * until real work has been done.  This indicates that V8 has done
    * as much cleanup as it will be able to do.
+   *
+   * The hint argument specifies the amount of work to be done in the function
+   * on scale from 1 to 1000. There is no guarantee that the actual work will
+   * match the hint.
    */
-  static bool IdleNotification();
+  static bool IdleNotification(int hint = 1000);
 
   /**
    * Optional notification that the system is running low on memory.
@@ -3466,6 +3568,12 @@
   void AllowCodeGenerationFromStrings(bool allow);
 
   /**
+   * Returns true if code generation from strings is allowed for the context.
+   * For more details see AllowCodeGenerationFromStrings(bool) documentation.
+   */
+  bool IsCodeGenerationFromStringsAllowed();
+
+  /**
    * Stack-allocated class which sets the execution context for all
    * operations executed within a local scope.
    */
@@ -3494,13 +3602,15 @@
  * accessing handles or holding onto object pointers obtained
  * from V8 handles while in the particular V8 isolate.  It is up
  * to the user of V8 to ensure (perhaps with locking) that this
- * constraint is not violated.
+ * constraint is not violated.  In addition to any other synchronization
+ * mechanism that may be used, the v8::Locker and v8::Unlocker classes
+ * must be used to signal thead switches to V8.
  *
  * v8::Locker is a scoped lock object. While it's
  * active (i.e. between its construction and destruction) the current thread is
- * allowed to use the locked isolate. V8 guarantees that an isolate can be locked
- * by at most one thread at any time. In other words, the scope of a v8::Locker is
- * a critical section.
+ * allowed to use the locked isolate. V8 guarantees that an isolate can be
+ * locked by at most one thread at any time. In other words, the scope of a
+ * v8::Locker is a critical section.
  *
  * Sample usage:
 * \code
@@ -3602,8 +3712,8 @@
   static void StopPreemption();
 
   /**
-   * Returns whether or not the locker for a given isolate, or default isolate if NULL is given,
-   * is locked by the current thread.
+   * Returns whether or not the locker for a given isolate, or default isolate
+   * if NULL is given, is locked by the current thread.
    */
   static bool IsLocked(Isolate* isolate = NULL);
 
@@ -3677,8 +3787,8 @@
 
 namespace internal {
 
-static const int kApiPointerSize = sizeof(void*);  // NOLINT
-static const int kApiIntSize = sizeof(int);  // NOLINT
+const int kApiPointerSize = sizeof(void*);  // NOLINT
+const int kApiIntSize = sizeof(int);  // NOLINT
 
 // Tag information for HeapObject.
 const int kHeapObjectTag = 1;
@@ -3769,7 +3879,7 @@
   static const int kFullStringRepresentationMask = 0x07;
   static const int kExternalTwoByteRepresentationTag = 0x02;
 
-  static const int kJSObjectType = 0xa3;
+  static const int kJSObjectType = 0xaa;
   static const int kFirstNonstringType = 0x80;
   static const int kForeignType = 0x85;
 
diff --git a/include/v8stdint.h b/include/v8stdint.h
index 50b4f29..7c12e1f 100644
--- a/include/v8stdint.h
+++ b/include/v8stdint.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 #ifndef V8STDINT_H_
 #define V8STDINT_H_
 
+#include <stddef.h>
 #include <stdio.h>
 
 #if defined(_WIN32) && !defined(__MINGW32__)
diff --git a/preparser/preparser-process.cc b/preparser/preparser-process.cc
index e67851c..368f63f 100644
--- a/preparser/preparser-process.cc
+++ b/preparser/preparser-process.cc
@@ -200,12 +200,14 @@
   vfprintf(stderr, message, args);
   va_end(args);
   fflush(stderr);
-  // Print preparser data to stdout.
-  uint32_t size = data->size();
-  fprintf(stderr, "LOG: data size: %u\n", size);
-  if (!WriteBuffer(stdout, data->data(), size)) {
-    perror("ERROR: Writing data");
-    fflush(stderr);
+  if (data != NULL) {
+    // Print preparser data to stdout.
+    uint32_t size = data->size();
+    fprintf(stderr, "LOG: data size: %u\n", size);
+    if (!WriteBuffer(stdout, data->data(), size)) {
+      perror("ERROR: Writing data");
+      fflush(stderr);
+    }
   }
   exit(EXIT_FAILURE);
 }
@@ -267,34 +269,22 @@
 
 
 ExceptionExpectation ParseExpectation(int argc, const char* argv[]) {
+  // Parse ["throws" [<exn-type> [<start> [<end>]]]].
   ExceptionExpectation expects;
-
-  // Parse exception expectations from (the remainder of) the command line.
   int arg_index = 0;
-  // Skip any flags.
-  while (argc > arg_index && IsFlag(argv[arg_index])) arg_index++;
+  while (argc > arg_index && strncmp("throws", argv[arg_index], 7)) {
+    arg_index++;
+  }
   if (argc > arg_index) {
-    if (strncmp("throws", argv[arg_index], 7)) {
-      // First argument after filename, if present, must be the verbatim
-      // "throws", marking that the preparsing should fail with an exception.
-      fail(NULL, "ERROR: Extra arguments not prefixed by \"throws\".\n");
-    }
     expects.throws = true;
-    do {
-      arg_index++;
-    } while (argc > arg_index && IsFlag(argv[arg_index]));
-    if (argc > arg_index) {
-      // Next argument is the exception type identifier.
+    arg_index++;
+    if (argc > arg_index && !IsFlag(argv[arg_index])) {
       expects.type = argv[arg_index];
-      do {
-        arg_index++;
-      } while (argc > arg_index && IsFlag(argv[arg_index]));
-      if (argc > arg_index) {
+      arg_index++;
+      if (argc > arg_index && !IsFlag(argv[arg_index])) {
         expects.beg_pos = atoi(argv[arg_index]);  // NOLINT
-        do {
-          arg_index++;
-        } while (argc > arg_index && IsFlag(argv[arg_index]));
-        if (argc > arg_index) {
+        arg_index++;
+        if (argc > arg_index && !IsFlag(argv[arg_index])) {
           expects.end_pos = atoi(argv[arg_index]);  // NOLINT
         }
       }
@@ -308,7 +298,8 @@
   // Parse command line.
   // Format:  preparser (<scriptfile> | -e "<source>")
   //                    ["throws" [<exn-type> [<start> [<end>]]]]
-  // Any flags (except an initial -s) are ignored.
+  // Any flags (except an initial -e) are ignored.
+  // Flags must not separate "throws" and its arguments.
 
   // Check for mandatory filename argument.
   int arg_index = 1;
diff --git a/src/SConscript b/src/SConscript
old mode 100644
new mode 100755
index 52607f1..0d0b535
--- a/src/SConscript
+++ b/src/SConscript
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -59,6 +59,7 @@
     counters.cc
     cpu-profiler.cc
     data-flow.cc
+    date.cc
     dateparser.cc
     debug-agent.cc
     debug.cc
@@ -78,12 +79,13 @@
     fast-dtoa.cc
     fixed-dtoa.cc
     handles.cc
-    hashmap.cc
     heap-profiler.cc
     heap.cc
     hydrogen.cc
     hydrogen-instructions.cc
     ic.cc
+    incremental-marking.cc
+    interface.cc
     inspector.cc
     interpreter-irregexp.cc
     isolate.cc
@@ -99,6 +101,7 @@
     objects.cc
     objects-printer.cc
     objects-visiting.cc
+    once.cc
     parser.cc
     preparser.cc
     preparse-data.cc
@@ -133,6 +136,7 @@
     v8utils.cc
     variables.cc
     version.cc
+    store-buffer.cc
     zone.cc
     extensions/gc-extension.cc
     extensions/externalize-string-extension.cc
@@ -170,6 +174,9 @@
     mips/frames-mips.cc
     mips/full-codegen-mips.cc
     mips/ic-mips.cc
+    mips/lithium-codegen-mips.cc
+    mips/lithium-gap-resolver-mips.cc
+    mips/lithium-mips.cc
     mips/macro-assembler-mips.cc
     mips/regexp-macro-assembler-mips.cc
     mips/stub-cache-mips.cc
@@ -241,7 +248,6 @@
     dtoa.cc
     fast-dtoa.cc
     fixed-dtoa.cc
-    hashmap.cc
     preparse-data.cc
     preparser.cc
     preparser-api.cc
@@ -319,7 +325,7 @@
 
 EXPERIMENTAL_LIBRARY_FILES = '''
 proxy.js
-weakmap.js
+collection.js
 '''.split()
 
 
diff --git a/src/accessors.cc b/src/accessors.cc
index 951209d..8048738 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -26,15 +26,16 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "v8.h"
-
 #include "accessors.h"
-#include "ast.h"
+
+#include "contexts.h"
 #include "deoptimizer.h"
 #include "execution.h"
 #include "factory.h"
+#include "frames-inl.h"
+#include "isolate.h"
 #include "list-inl.h"
-#include "safepoint-table.h"
-#include "scopeinfo.h"
+#include "property-details.h"
 
 namespace v8 {
 namespace internal {
@@ -486,16 +487,6 @@
                                                     NONE);
   }
 
-  if (function->has_initial_map()) {
-    // If the function has allocated the initial map
-    // replace it with a copy containing the new prototype.
-    Object* new_map;
-    { MaybeObject* maybe_new_map =
-          function->initial_map()->CopyDropTransitions();
-      if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
-    }
-    function->set_initial_map(Map::cast(new_map));
-  }
   Object* prototype;
   { MaybeObject* maybe_prototype = function->SetPrototype(value);
     if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
@@ -527,7 +518,9 @@
     // correctly yet. Compile it now and return the right length.
     HandleScope scope;
     Handle<JSFunction> handle(function);
-    if (!CompileLazy(handle, KEEP_EXCEPTION)) return Failure::Exception();
+    if (!JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
+      return Failure::Exception();
+    }
     return Smi::FromInt(handle->shared()->length());
   } else {
     return Smi::FromInt(function->shared()->length());
@@ -572,11 +565,12 @@
     Handle<JSFunction> inlined_function,
     int inlined_frame_index) {
   Factory* factory = Isolate::Current()->factory();
-  int args_count = inlined_function->shared()->formal_parameter_count();
-  ScopedVector<SlotRef> args_slots(args_count);
-  SlotRef::ComputeSlotMappingForArguments(frame,
-                                          inlined_frame_index,
-                                          &args_slots);
+  Vector<SlotRef> args_slots =
+      SlotRef::ComputeSlotMappingForArguments(
+          frame,
+          inlined_frame_index,
+          inlined_function->shared()->formal_parameter_count());
+  int args_count = args_slots.length();
   Handle<JSObject> arguments =
       factory->NewArgumentsObject(inlined_function, args_count);
   Handle<FixedArray> array = factory->NewFixedArray(args_count);
@@ -585,6 +579,7 @@
     array->set(i, *value);
   }
   arguments->set_elements(*array);
+  args_slots.Dispose();
 
   // Return the freshly allocated arguments object.
   return *arguments;
@@ -619,8 +614,9 @@
 
       if (!frame->is_optimized()) {
         // If there is an arguments variable in the stack, we return that.
-        Handle<SerializedScopeInfo> info(function->shared()->scope_info());
-        int index = info->StackSlotIndex(isolate->heap()->arguments_symbol());
+        Handle<ScopeInfo> scope_info(function->shared()->scope_info());
+        int index = scope_info->StackSlotIndex(
+            isolate->heap()->arguments_symbol());
         if (index >= 0) {
           Handle<Object> arguments(frame->GetExpression(index), isolate);
           if (!arguments->IsArgumentsMarker()) return *arguments;
@@ -672,7 +668,7 @@
     Isolate* isolate,
     JSFunction* caller) {
   DisableAssertNoAllocation enable_allocation;
-  if (caller->shared()->strict_mode()) {
+  if (!caller->shared()->is_classic_mode()) {
     return isolate->Throw(
         *isolate->factory()->NewTypeError("strict_caller",
                                           HandleVector<Object>(NULL, 0)));
@@ -759,7 +755,12 @@
     caller = potential_caller;
     potential_caller = it.next();
   }
-
+  // If caller is bound, return null. This is compatible with JSC, and
+  // allows us to make bound functions use the strict function map
+  // and its associated throwing caller and arguments.
+  if (caller->shared()->bound()) {
+    return isolate->heap()->null_value();
+  }
   return CheckNonStrictCallerOrThrow(isolate, caller);
 }
 
diff --git a/src/accessors.h b/src/accessors.h
index 385536d..36b9a99 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_ACCESSORS_H_
 
 #include "allocation.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/allocation.cc b/src/allocation.cc
index 119b087..6c7a08c 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
 #include "allocation.h"
+
+#include <stdlib.h>  // For free, malloc.
+#include <string.h>  // For memcpy.
+#include "checks.h"
 #include "utils.h"
 
 namespace v8 {
diff --git a/src/allocation.h b/src/allocation.h
index 75aba35..31067dd 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,6 @@
 #ifndef V8_ALLOCATION_H_
 #define V8_ALLOCATION_H_
 
-#include "checks.h"
 #include "globals.h"
 
 namespace v8 {
@@ -81,7 +80,7 @@
 
 
 template <typename T>
-static T* NewArray(int size) {
+T* NewArray(size_t size) {
   T* result = new T[size];
   if (result == NULL) Malloced::FatalProcessOutOfMemory();
   return result;
@@ -89,7 +88,7 @@
 
 
 template <typename T>
-static void DeleteArray(T* array) {
+void DeleteArray(T* array) {
   delete[] array;
 }
 
diff --git a/src/api.cc b/src/api.cc
index 39c0d02..4e731fb 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,34 +25,39 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
-
 #include "api.h"
 
-#include "arguments.h"
+#include <math.h>  // For isnan.
+#include <string.h>  // For memcpy, strlen.
+#include "../include/v8-debug.h"
+#include "../include/v8-profiler.h"
+#include "../include/v8-testing.h"
 #include "bootstrapper.h"
 #include "compiler.h"
+#include "conversions-inl.h"
+#include "counters.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "execution.h"
-#include "flags.h"
 #include "global-handles.h"
 #include "heap-profiler.h"
 #include "messages.h"
+#ifdef COMPRESS_STARTUP_DATA_BZ2
 #include "natives.h"
+#endif
 #include "parser.h"
 #include "platform.h"
 #include "profile-generator-inl.h"
+#include "property-details.h"
+#include "property.h"
 #include "runtime-profiler.h"
 #include "scanner-character-streams.h"
-#include "serialize.h"
 #include "snapshot.h"
+#include "unicode-inl.h"
 #include "v8threads.h"
 #include "version.h"
 #include "vm-state-inl.h"
 
-#include "../include/v8-profiler.h"
-#include "../include/v8-testing.h"
 
 #define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
 
@@ -78,7 +83,7 @@
   bool has_pending_exception = false
 
 
-#define EXCEPTION_BAILOUT_CHECK(isolate, value)                                \
+#define EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, do_callback)           \
   do {                                                                         \
     i::HandleScopeImplementer* handle_scope_implementer =                      \
         (isolate)->handle_scope_implementer();                                 \
@@ -91,11 +96,22 @@
       }                                                                        \
       bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero();   \
       (isolate)->OptionalRescheduleException(call_depth_is_zero);              \
+      do_callback                                                              \
       return value;                                                            \
     }                                                                          \
+    do_callback                                                                \
   } while (false)
 
 
+#define EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, value)                    \
+  EXCEPTION_BAILOUT_CHECK_GENERIC(                                             \
+      isolate, value, i::V8::FireCallCompletedCallback(isolate);)
+
+
+#define EXCEPTION_BAILOUT_CHECK(isolate, value)                                \
+  EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, ;)
+
+
 #define API_ENTRY_CHECK(isolate, msg)                                          \
   do {                                                                         \
     if (v8::Locker::IsActive()) {                                              \
@@ -185,7 +201,10 @@
   int end_marker;
   heap_stats.end_marker = &end_marker;
   i::Isolate* isolate = i::Isolate::Current();
-  isolate->heap()->RecordStats(&heap_stats, take_snapshot);
+  // BUG(1718):
+  // Don't use the take_snapshot since we don't support HeapIterator here
+  // without doing a special GC.
+  isolate->heap()->RecordStats(&heap_stats, false);
   i::V8::SetFatalError();
   FatalErrorCallback callback = GetFatalErrorHandler();
   {
@@ -341,6 +360,7 @@
     compressed_data[i].data = decompressed;
   }
   V8::SetDecompressedStartupData(compressed_data);
+  i::DeleteArray(compressed_data);
   return 0;
 }
 
@@ -483,7 +503,7 @@
 
 
 RegisteredExtension::RegisteredExtension(Extension* extension)
-    : extension_(extension), state_(UNVISITED) { }
+    : extension_(extension) { }
 
 
 void RegisteredExtension::Register(RegisteredExtension* that) {
@@ -501,9 +521,13 @@
 Extension::Extension(const char* name,
                      const char* source,
                      int dep_count,
-                     const char** deps)
+                     const char** deps,
+                     int source_length)
     : name_(name),
-      source_(source),
+      source_length_(source_length >= 0 ?
+                     source_length :
+                     (source ? static_cast<int>(strlen(source)) : 0)),
+      source_(source, source_length_),
       dep_count_(dep_count),
       deps_(deps),
       auto_enable_(false) { }
@@ -724,6 +748,7 @@
   i::Context* last_context =
       isolate->handle_scope_implementer()->RestoreContext();
   isolate->set_context(last_context);
+  isolate->set_context_exit_happened(true);
 }
 
 
@@ -1405,21 +1430,21 @@
 
 
 ScriptData* ScriptData::PreCompile(const char* input, int length) {
-  i::Utf8ToUC16CharacterStream stream(
+  i::Utf8ToUtf16CharacterStream stream(
       reinterpret_cast<const unsigned char*>(input), length);
-  return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
+  return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
 }
 
 
 ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
   i::Handle<i::String> str = Utils::OpenHandle(*source);
   if (str->IsExternalTwoByteString()) {
-    i::ExternalTwoByteStringUC16CharacterStream stream(
+    i::ExternalTwoByteStringUtf16CharacterStream stream(
       i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
-    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
+    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
   } else {
-    i::GenericStringUC16CharacterStream stream(str, 0, str->length());
-    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
+    i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
+    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
   }
 }
 
@@ -1562,7 +1587,7 @@
         isolate->context()->global_proxy(), isolate);
     i::Handle<i::Object> result =
         i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
-    EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+    EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
     raw_result = *result;
   }
   i::Handle<i::Object> result(raw_result, isolate);
@@ -1788,7 +1813,7 @@
 static i::Handle<i::Object> CallV8HeapFunction(const char* name,
                                                i::Handle<i::Object> recv,
                                                int argc,
-                                               i::Object** argv[],
+                                               i::Handle<i::Object> argv[],
                                                bool* has_pending_exception) {
   i::Isolate* isolate = i::Isolate::Current();
   i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
@@ -1805,10 +1830,10 @@
 static i::Handle<i::Object> CallV8HeapFunction(const char* name,
                                                i::Handle<i::Object> data,
                                                bool* has_pending_exception) {
-  i::Object** argv[1] = { data.location() };
+  i::Handle<i::Object> argv[] = { data };
   return CallV8HeapFunction(name,
                             i::Isolate::Current()->js_builtins_object(),
-                            1,
+                            ARRAY_SIZE(argv),
                             argv,
                             has_pending_exception);
 }
@@ -2148,6 +2173,11 @@
   if (obj->IsSmi()) return true;
   if (obj->IsNumber()) {
     double value = obj->Number();
+    static const i::DoubleRepresentation minus_zero(-0.0);
+    i::DoubleRepresentation rep(value);
+    if (rep.bits == minus_zero.bits) {
+      return false;
+    }
     return i::FastI2D(i::FastD2I(value)) == value;
   }
   return false;
@@ -2160,6 +2190,11 @@
   if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
   if (obj->IsNumber()) {
     double value = obj->Number();
+    static const i::DoubleRepresentation minus_zero(-0.0);
+    i::DoubleRepresentation rep(value);
+    if (rep.bits == minus_zero.bits) {
+      return false;
+    }
     return i::FastUI2D(i::FastD2UI(value)) == value;
   }
   return false;
@@ -2628,10 +2663,11 @@
   if (obj->IsJSObject() && other->IsJSObject()) {
     return *obj == *other;
   }
-  i::Object** args[1] = { other.location() };
+  i::Handle<i::Object> args[] = { other };
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result =
-      CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
+      CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args,
+                         &has_pending_exception);
   EXCEPTION_BAILOUT_CHECK(isolate, false);
   return *result == i::Smi::FromInt(i::EQUAL);
 }
@@ -2721,10 +2757,11 @@
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> obj = i::SetElement(
+  i::Handle<i::Object> obj = i::JSObject::SetElement(
       self,
       index,
       value_obj,
+      NONE,
       i::kNonStrictMode);
   has_pending_exception = obj.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, false);
@@ -2794,7 +2831,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> result = i::GetElement(self, index);
+  i::Handle<i::Object> result = i::Object::GetElement(self, index);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
   return Utils::ToLocal(result);
@@ -2827,7 +2864,7 @@
              return Local<v8::Value>());
   ENTER_V8(isolate);
   i::Handle<i::Object> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> result = i::GetPrototype(self);
+  i::Handle<i::Object> result(self->GetPrototype());
   return Utils::ToLocal(result);
 }
 
@@ -2874,8 +2911,10 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  bool threw = false;
   i::Handle<i::FixedArray> value =
-      i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS);
+      i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS, &threw);
+  if (threw) return Local<v8::Array>();
   // Because we use caching to speed up enumeration it is important
   // to never change the result of the basic enumeration function so
   // we clone the result.
@@ -2893,8 +2932,10 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  bool threw = false;
   i::Handle<i::FixedArray> value =
-      i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY);
+      i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY, &threw);
+  if (threw) return Local<v8::Array>();
   // Because we use caching to speed up enumeration it is important
   // to never change the result of the basic enumeration function so
   // we clone the result.
@@ -2977,7 +3018,7 @@
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  return i::DeleteProperty(self, key_obj)->IsTrue();
+  return i::JSObject::DeleteProperty(self, key_obj)->IsTrue();
 }
 
 
@@ -2998,7 +3039,7 @@
   ENTER_V8(isolate);
   HandleScope scope;
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  return i::DeleteElement(self, index)->IsTrue();
+  return i::JSObject::DeleteElement(self, index)->IsTrue();
 }
 
 
@@ -3023,8 +3064,11 @@
   i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
                                                      getter, setter, data,
                                                      settings, attributes);
+  bool fast = Utils::OpenHandle(this)->HasFastProperties();
   i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
-  return !result.is_null() && !result->IsUndefined();
+  if (result.is_null() || result->IsUndefined()) return false;
+  if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(this), 0);
+  return true;
 }
 
 
@@ -3093,7 +3137,10 @@
   // If the property being looked up is a callback, it can throw
   // an exception.
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> result = i::GetProperty(receiver, name, lookup);
+  PropertyAttributes ignored;
+  i::Handle<i::Object> result =
+      i::Object::GetProperty(receiver, receiver, lookup, name,
+                             &ignored);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
 
@@ -3110,7 +3157,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::LookupResult lookup;
+  i::LookupResult lookup(isolate);
   self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
   return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
 }
@@ -3123,7 +3170,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::LookupResult lookup;
+  i::LookupResult lookup(isolate);
   self_obj->LookupRealNamedProperty(*key_obj, &lookup);
   return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
 }
@@ -3200,7 +3247,7 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  return i::GetIdentityHash(self);
+  return i::JSObject::GetIdentityHash(self);
 }
 
 
@@ -3211,21 +3258,11 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
-      self,
-      i::JSObject::ALLOW_CREATION));
-  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
-  EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> obj = i::SetProperty(
-      hidden_props,
-      key_obj,
-      value_obj,
-      static_cast<PropertyAttributes>(None),
-      i::kNonStrictMode);
-  has_pending_exception = obj.is_null();
-  EXCEPTION_BAILOUT_CHECK(isolate, false);
-  return true;
+  i::Handle<i::Object> result =
+      i::JSObject::SetHiddenProperty(self, key_obj, value_obj);
+  return *result == *self;
 }
 
 
@@ -3235,20 +3272,9 @@
              return Local<v8::Value>());
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
-      self,
-      i::JSObject::OMIT_CREATION));
-  if (hidden_props->IsUndefined()) {
-    return v8::Local<v8::Value>();
-  }
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
-  has_pending_exception = result.is_null();
-  EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
-  if (result->IsUndefined()) {
-    return v8::Local<v8::Value>();
-  }
+  i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
+  if (result->IsUndefined()) return v8::Local<v8::Value>();
   return Utils::ToLocal(result);
 }
 
@@ -3259,15 +3285,9 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
-      self,
-      i::JSObject::OMIT_CREATION));
-  if (hidden_props->IsUndefined()) {
-    return true;
-  }
-  i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  return i::DeleteProperty(js_obj, key_obj)->IsTrue();
+  self->DeleteHiddenProperty(*key_obj);
+  return true;
 }
 
 
@@ -3317,22 +3337,12 @@
   i::Handle<i::ExternalArray> array =
       isolate->factory()->NewExternalArray(length, array_type, data);
 
-  // If the object already has external elements, create a new, unique
-  // map if the element type is now changing, because assumptions about
-  // generated code based on the receiver's map will be invalid.
-  i::Handle<i::HeapObject> elements(object->elements());
-  bool cant_reuse_map =
-      elements->map()->IsUndefined() ||
-      !elements->map()->has_external_array_elements() ||
-      elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
-  if (cant_reuse_map) {
-    i::Handle<i::Map> external_array_map =
-        isolate->factory()->GetElementsTransitionMap(
-            i::Handle<i::Map>(object->map()),
-            GetElementsKindFromExternalArrayType(array_type),
-            object->HasFastProperties());
-    object->set_map(*external_array_map);
-  }
+  i::Handle<i::Map> external_array_map =
+      isolate->factory()->GetElementsTransitionMap(
+          object,
+          GetElementsKindFromExternalArrayType(array_type));
+
+  object->set_map(*external_array_map);
   object->set_elements(*array);
 }
 
@@ -3491,7 +3501,8 @@
 }
 
 
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
+                                        int argc,
                                         v8::Handle<v8::Value> argv[]) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
@@ -3502,7 +3513,7 @@
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
   i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
   i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
   if (obj->IsJSFunction()) {
     fun = i::Handle<i::JSFunction>::cast(obj);
@@ -3517,7 +3528,7 @@
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> returned =
       i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
-  EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+  EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
   return Utils::ToLocal(scope.CloseAndEscape(returned));
 }
 
@@ -3532,13 +3543,13 @@
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
   if (obj->IsJSFunction()) {
     i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
     EXCEPTION_PREAMBLE(isolate);
     i::Handle<i::Object> returned =
         i::Execution::New(fun, argc, args, &has_pending_exception);
-    EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+    EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
     return Utils::ToLocal(scope.CloseAndEscape(
         i::Handle<i::JSObject>::cast(returned)));
   }
@@ -3551,7 +3562,7 @@
     EXCEPTION_PREAMBLE(isolate);
     i::Handle<i::Object> returned =
         i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
-    EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+    EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
     ASSERT(!delegate->IsUndefined());
     return Utils::ToLocal(scope.CloseAndEscape(returned));
   }
@@ -3574,11 +3585,11 @@
   HandleScope scope;
   i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> returned =
       i::Execution::New(function, argc, args, &has_pending_exception);
-  EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+  EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
   return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
 }
 
@@ -3595,11 +3606,11 @@
     i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
     i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
     STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-    i::Object*** args = reinterpret_cast<i::Object***>(argv);
+    i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
     EXCEPTION_PREAMBLE(isolate);
     i::Handle<i::Object> returned =
         i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
-    EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
+    EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
     raw_result = *returned;
   }
   i::Handle<i::Object> result(raw_result);
@@ -3622,6 +3633,12 @@
 }
 
 
+Handle<Value> Function::GetInferredName() const {
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name()));
+}
+
+
 ScriptOrigin Function::GetScriptOrigin() const {
   i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
   if (func->shared()->script()->IsScript()) {
@@ -3649,6 +3666,23 @@
 }
 
 
+int Function::GetScriptColumnNumber() const {
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  if (func->shared()->script()->IsScript()) {
+    i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+    return i::GetScriptColumnNumber(script, func->shared()->start_position());
+  }
+  return kLineOffsetNotFound;
+}
+
+Handle<Value> Function::GetScriptId() const {
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  if (!func->shared()->script()->IsScript())
+    return v8::Undefined();
+  i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+  return Utils::ToLocal(i::Handle<i::Object>(script->id()));
+}
+
 int String::Length() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
@@ -3659,7 +3693,104 @@
 int String::Utf8Length() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(), "v8::String::Utf8Length()")) return 0;
-  return str->Utf8Length();
+  return i::Utf8Length(str);
+}
+
+
+// Will fail with a negative answer if the recursion depth is too high.
+static int RecursivelySerializeToUtf8(i::String* string,
+                                      char* buffer,
+                                      int start,
+                                      int end,
+                                      int recursion_budget,
+                                      int32_t previous_character,
+                                      int32_t* last_character) {
+  int utf8_bytes = 0;
+  while (true) {
+    if (string->IsAsciiRepresentation()) {
+      i::String::WriteToFlat(string, buffer, start, end);
+      *last_character = unibrow::Utf16::kNoPreviousCharacter;
+      return utf8_bytes + end - start;
+    }
+    switch (i::StringShape(string).representation_tag()) {
+      case i::kExternalStringTag: {
+        const uint16_t* data = i::ExternalTwoByteString::cast(string)->
+          ExternalTwoByteStringGetData(0);
+        char* current = buffer;
+        for (int i = start; i < end; i++) {
+          uint16_t character = data[i];
+          current +=
+              unibrow::Utf8::Encode(current, character, previous_character);
+          previous_character = character;
+        }
+        *last_character = previous_character;
+        return static_cast<int>(utf8_bytes + current - buffer);
+      }
+      case i::kSeqStringTag: {
+        const uint16_t* data =
+            i::SeqTwoByteString::cast(string)->SeqTwoByteStringGetData(0);
+        char* current = buffer;
+        for (int i = start; i < end; i++) {
+          uint16_t character = data[i];
+          current +=
+              unibrow::Utf8::Encode(current, character, previous_character);
+          previous_character = character;
+        }
+        *last_character = previous_character;
+        return static_cast<int>(utf8_bytes + current - buffer);
+      }
+      case i::kSlicedStringTag: {
+        i::SlicedString* slice = i::SlicedString::cast(string);
+        unsigned offset = slice->offset();
+        string = slice->parent();
+        start += offset;
+        end += offset;
+        continue;
+      }
+      case i::kConsStringTag: {
+        i::ConsString* cons_string = i::ConsString::cast(string);
+        i::String* first = cons_string->first();
+        int boundary = first->length();
+        if (start >= boundary) {
+          // Only need RHS.
+          string = cons_string->second();
+          start -= boundary;
+          end -= boundary;
+          continue;
+        } else if (end <= boundary) {
+          // Only need LHS.
+          string = first;
+        } else {
+          if (recursion_budget == 0) return -1;
+          int extra_utf8_bytes =
+              RecursivelySerializeToUtf8(first,
+                                         buffer,
+                                         start,
+                                         boundary,
+                                         recursion_budget - 1,
+                                         previous_character,
+                                         &previous_character);
+          if (extra_utf8_bytes < 0) return extra_utf8_bytes;
+          buffer += extra_utf8_bytes;
+          utf8_bytes += extra_utf8_bytes;
+          string = cons_string->second();
+          start = 0;
+          end -= boundary;
+        }
+      }
+    }
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
+bool String::MayContainNonAscii() const {
+  i::Handle<i::String> str = Utils::OpenHandle(this);
+  if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
+    return false;
+  }
+  return !str->HasOnlyAsciiChars();
 }
 
 
@@ -3671,13 +3802,67 @@
   if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
   LOG_API(isolate, "String::WriteUtf8");
   ENTER_V8(isolate);
-  i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
   i::Handle<i::String> str = Utils::OpenHandle(this);
+  int string_length = str->length();
+  if (str->IsAsciiRepresentation()) {
+    int len;
+    if (capacity == -1) {
+      capacity = str->length() + 1;
+      len = string_length;
+    } else {
+      len = i::Min(capacity, str->length());
+    }
+    i::String::WriteToFlat(*str, buffer, 0, len);
+    if (nchars_ref != NULL) *nchars_ref = len;
+    if (!(options & NO_NULL_TERMINATION) && capacity > len) {
+      buffer[len] = '\0';
+      return len + 1;
+    }
+    return len;
+  }
+
+  if (capacity == -1 || capacity / 3 >= string_length) {
+    int32_t previous = unibrow::Utf16::kNoPreviousCharacter;
+    const int kMaxRecursion = 100;
+    int utf8_bytes =
+        RecursivelySerializeToUtf8(*str,
+                                   buffer,
+                                   0,
+                                   string_length,
+                                   kMaxRecursion,
+                                   previous,
+                                   &previous);
+    if (utf8_bytes >= 0) {
+      // Success serializing with recursion.
+      if ((options & NO_NULL_TERMINATION) == 0 &&
+          (capacity > utf8_bytes || capacity == -1)) {
+        buffer[utf8_bytes++] = '\0';
+      }
+      if (nchars_ref != NULL) *nchars_ref = string_length;
+      return utf8_bytes;
+    }
+    FlattenString(str);
+    // Recurse once.  This time around the string is flat and the serializing
+    // with recursion will certainly succeed.
+    return WriteUtf8(buffer, capacity, nchars_ref, options);
+  } else if (capacity >= string_length) {
+    // First check that the buffer is large enough.  If it is, then recurse
+    // once without a capacity limit, which will get into the other branch of
+    // this 'if'.
+    int utf8_bytes = i::Utf8Length(str);
+    if ((options & NO_NULL_TERMINATION) == 0) utf8_bytes++;
+    if (utf8_bytes <= capacity) {
+      return WriteUtf8(buffer, -1, nchars_ref, options);
+    }
+  }
+
+  // Slow case.
+  i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
   isolate->string_tracker()->RecordWrite(str);
   if (options & HINT_MANY_WRITES_EXPECTED) {
     // Flatten the string for efficiency.  This applies whether we are
     // using StringInputBuffer or Get(i) to access the characters.
-    str->TryFlatten();
+    FlattenString(str);
   }
   write_input_buffer.Reset(0, *str);
   int len = str->length();
@@ -3688,11 +3873,13 @@
   int i;
   int pos = 0;
   int nchars = 0;
+  int previous = unibrow::Utf16::kNoPreviousCharacter;
   for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) {
     i::uc32 c = write_input_buffer.GetNext();
-    int written = unibrow::Utf8::Encode(buffer + pos, c);
+    int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
     pos += written;
     nchars++;
+    previous = c;
   }
   if (i < len) {
     // For the last characters we need to check the length for each one
@@ -3701,16 +3888,33 @@
     char intermediate[unibrow::Utf8::kMaxEncodedSize];
     for (; i < len && pos < capacity; i++) {
       i::uc32 c = write_input_buffer.GetNext();
-      int written = unibrow::Utf8::Encode(intermediate, c);
-      if (pos + written <= capacity) {
-        for (int j = 0; j < written; j++)
-          buffer[pos + j] = intermediate[j];
+      if (unibrow::Utf16::IsTrailSurrogate(c) &&
+          unibrow::Utf16::IsLeadSurrogate(previous)) {
+        // We can't use the intermediate buffer here because the encoding
+        // of surrogate pairs is done under assumption that you can step
+        // back and fix the UTF8 stream.  Luckily we only need space for one
+        // more byte, so there is always space.
+        ASSERT(pos < capacity);
+        int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
+        ASSERT(written == 1);
         pos += written;
         nchars++;
       } else {
-        // We've reached the end of the buffer
-        break;
+        int written =
+            unibrow::Utf8::Encode(intermediate,
+                                  c,
+                                  unibrow::Utf16::kNoPreviousCharacter);
+        if (pos + written <= capacity) {
+          for (int j = 0; j < written; j++)
+            buffer[pos + j] = intermediate[j];
+          pos += written;
+          nchars++;
+        } else {
+          // We've reached the end of the buffer
+          break;
+        }
       }
+      previous = c;
     }
   }
   if (nchars_ref != NULL) *nchars_ref = nchars;
@@ -3806,10 +4010,11 @@
 void v8::String::VerifyExternalStringResource(
     v8::String::ExternalStringResource* value) const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
-  v8::String::ExternalStringResource* expected;
+  const v8::String::ExternalStringResource* expected;
   if (i::StringShape(*str).IsExternalTwoByte()) {
-    void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
-    expected = reinterpret_cast<ExternalStringResource*>(resource);
+    const void* resource =
+        i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+    expected = reinterpret_cast<const ExternalStringResource*>(resource);
   } else {
     expected = NULL;
   }
@@ -3817,7 +4022,7 @@
 }
 
 
-v8::String::ExternalAsciiStringResource*
+const v8::String::ExternalAsciiStringResource*
       v8::String::GetExternalAsciiStringResource() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(),
@@ -3825,8 +4030,9 @@
     return NULL;
   }
   if (i::StringShape(*str).IsExternalAscii()) {
-    void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
-    return reinterpret_cast<ExternalAsciiStringResource*>(resource);
+    const void* resource =
+        i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+    return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
   } else {
     return NULL;
   }
@@ -3977,6 +4183,12 @@
 }
 
 
+void v8::V8::SetReturnAddressLocationResolver(
+      ReturnAddressLocationResolver return_address_resolver) {
+  i::V8::SetReturnAddressLocationResolver(return_address_resolver);
+}
+
+
 bool v8::V8::Dispose() {
   i::Isolate* isolate = i::Isolate::Current();
   if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
@@ -3996,6 +4208,15 @@
 
 
 void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
+  if (!i::Isolate::Current()->IsInitialized()) {
+    // Isolate is unitialized thus heap is not configured yet.
+    heap_statistics->set_total_heap_size(0);
+    heap_statistics->set_total_heap_size_executable(0);
+    heap_statistics->set_used_heap_size(0);
+    heap_statistics->set_heap_size_limit(0);
+    return;
+  }
+
   i::Heap* heap = i::Isolate::Current()->heap();
   heap_statistics->set_total_heap_size(heap->CommittedMemory());
   heap_statistics->set_total_heap_size_executable(
@@ -4005,18 +4226,26 @@
 }
 
 
-bool v8::V8::IdleNotification() {
+void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
+  i::Isolate* isolate = i::Isolate::Current();
+  IsDeadCheck(isolate, "v8::V8::VisitExternalResources");
+  isolate->heap()->VisitExternalResources(visitor);
+}
+
+
+bool v8::V8::IdleNotification(int hint) {
   // Returning true tells the caller that it need not
   // continue to call IdleNotification.
-  if (!i::Isolate::Current()->IsInitialized()) return true;
-  return i::V8::IdleNotification();
+  i::Isolate* isolate = i::Isolate::Current();
+  if (isolate == NULL || !isolate->IsInitialized()) return true;
+  return i::V8::IdleNotification(hint);
 }
 
 
 void v8::V8::LowMemoryNotification() {
   i::Isolate* isolate = i::Isolate::Current();
-  if (!isolate->IsInitialized()) return;
-  isolate->heap()->CollectAllGarbage(true);
+  if (isolate == NULL || !isolate->IsInitialized()) return;
+  isolate->heap()->CollectAllAvailableGarbage("low memory notification");
 }
 
 
@@ -4048,6 +4277,7 @@
     v8::ExtensionConfiguration* extensions,
     v8::Handle<ObjectTemplate> global_template,
     v8::Handle<Value> global_object) {
+  i::Isolate::EnsureDefaultIsolate();
   i::Isolate* isolate = i::Isolate::Current();
   EnsureInitializedForIsolate(isolate, "v8::Context::New()");
   LOG_API(isolate, "Context::New");
@@ -4110,8 +4340,9 @@
   }
   // Leave V8.
 
-  if (env.is_null())
+  if (env.is_null()) {
     return Persistent<Context>();
+  }
   return Persistent<Context>(Utils::ToLocal(env));
 }
 
@@ -4251,6 +4482,20 @@
 }
 
 
+bool Context::IsCodeGenerationFromStringsAllowed() {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate,
+                  "v8::Context::IsCodeGenerationFromStringsAllowed()")) {
+    return false;
+  }
+  ENTER_V8(isolate);
+  i::Object** ctx = reinterpret_cast<i::Object**>(this);
+  i::Handle<i::Context> context =
+      i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+  return !context->allow_code_gen_from_strings()->IsFalse();
+}
+
+
 void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
   i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
 }
@@ -4299,7 +4544,7 @@
 }
 
 static void* ExternalValueImpl(i::Handle<i::Object> obj) {
-  return reinterpret_cast<void*>(i::Foreign::cast(*obj)->address());
+  return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
 }
 
 
@@ -4325,7 +4570,7 @@
   if (value->IsSmi()) {
     return i::Internals::GetExternalPointerFromSmi(value);
   } else if (value->IsForeign()) {
-    return reinterpret_cast<void*>(i::Foreign::cast(value)->address());
+    return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
   } else {
     return NULL;
   }
@@ -4535,15 +4780,13 @@
 
 
 bool v8::String::CanMakeExternal() {
+  if (!internal::FLAG_clever_optimizations) return false;
   i::Handle<i::String> obj = Utils::OpenHandle(this);
   i::Isolate* isolate = obj->GetIsolate();
   if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
-  if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
-    return false;
-  }
+  if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
   int size = obj->Size();  // Byte size of the original string.
-  if (size < i::ExternalString::kSize)
-    return false;
+  if (size < i::ExternalString::kShortSize) return false;
   i::StringShape shape(*obj);
   return !shape.IsExternal();
 }
@@ -4649,8 +4892,8 @@
   if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0;
   LOG_API(isolate, "Date::NumberValue");
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
-  i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
-  return jsvalue->value()->Number();
+  i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
+  return jsdate->value()->Number();
 }
 
 
@@ -4661,8 +4904,10 @@
   LOG_API(isolate, "Date::DateTimeConfigurationChangeNotification");
   ENTER_V8(isolate);
 
+  isolate->date_cache()->ResetDateCache();
+
   i::HandleScope scope(isolate);
-  // Get the function ResetDateCache (defined in date-delay.js).
+  // Get the function ResetDateCache (defined in date.js).
   i::Handle<i::String> func_name_str =
       isolate->factory()->LookupAsciiSymbol("ResetDateCache");
   i::MaybeObject* result =
@@ -4877,7 +5122,7 @@
 
     NeanderObject listener(i::JSObject::cast(listeners.get(i)));
     i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
-    if (callback_obj->address() == FUNCTION_ADDR(that)) {
+    if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
       listeners.set(i, isolate->heap()->undefined_value());
     }
   }
@@ -5023,6 +5268,23 @@
 }
 
 
+void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
+  if (callback == NULL) return;
+  i::Isolate::EnsureDefaultIsolate();
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::V8::AddLeaveScriptCallback()")) return;
+  i::V8::AddCallCompletedCallback(callback);
+}
+
+
+void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
+  i::Isolate::EnsureDefaultIsolate();
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::V8::RemoveLeaveScriptCallback()")) return;
+  i::V8::RemoveCallCompletedCallback(callback);
+}
+
+
 void V8::PauseProfiler() {
   i::Isolate* isolate = i::Isolate::Current();
   isolate->logger()->PauseProfiler();
@@ -5137,7 +5399,8 @@
   TryCatch try_catch;
   Handle<String> str = obj->ToString();
   if (str.IsEmpty()) return;
-  length_ = str->Utf8Length();
+  i::Handle<i::String> i_str = Utils::OpenHandle(*str);
+  length_ = i::Utf8Length(i_str);
   str_ = i::NewArray<char>(length_ + 1);
   str->WriteUtf8(str_);
 }
@@ -5487,8 +5750,14 @@
                                                        wait_for_connection);
 }
 
+
+void Debug::DisableAgent() {
+  return i::Isolate::Current()->debugger()->StopAgent();
+}
+
+
 void Debug::ProcessDebugMessages() {
-  i::Execution::ProcessDebugMesssages(true);
+  i::Execution::ProcessDebugMessages(true);
 }
 
 Local<Context> Debug::GetDebugContext() {
@@ -5767,10 +6036,10 @@
 }
 
 
-int HeapGraphNode::GetRetainedSize(bool exact) const {
+int HeapGraphNode::GetRetainedSize() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
-  return ToInternal(this)->RetainedSize(exact);
+  return ToInternal(this)->retained_size();
 }
 
 
@@ -5872,7 +6141,7 @@
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
   return reinterpret_cast<const HeapGraphNode*>(
-      ToInternal(this)->GetEntryById(id));
+      ToInternal(this)->GetEntryById(static_cast<i::SnapshotObjectId>(id)));
 }
 
 
@@ -5965,6 +6234,11 @@
 }
 
 
+int HeapProfiler::GetPersistentHandleCount() {
+  i::Isolate* isolate = i::Isolate::Current();
+  return isolate->global_handles()->NumberOfGlobalHandles();
+}
+
 
 v8::Testing::StressType internal::Testing::stress_type_ =
     v8::Testing::kStressTypeOpt;
@@ -5993,9 +6267,7 @@
 
 void Testing::PrepareStressRun(int run) {
   static const char* kLazyOptimizations =
-      "--prepare-always-opt --nolimit-inlining "
-      "--noalways-opt --noopt-eagerly";
-  static const char* kEagerOptimizations = "--opt-eagerly";
+      "--prepare-always-opt --nolimit-inlining --noalways-opt";
   static const char* kForcedOptimizations = "--always-opt";
 
   // If deoptimization stressed turn on frequent deoptimization. If no value
@@ -6012,15 +6284,12 @@
   if (run == GetStressRuns() - 1) {
     SetFlagsFromString(kForcedOptimizations);
   } else {
-    SetFlagsFromString(kEagerOptimizations);
     SetFlagsFromString(kLazyOptimizations);
   }
 #else
   if (run == GetStressRuns() - 1) {
     SetFlagsFromString(kForcedOptimizations);
-  } else if (run == GetStressRuns() - 2) {
-    SetFlagsFromString(kEagerOptimizations);
-  } else {
+  } else if (run != GetStressRuns() - 2) {
     SetFlagsFromString(kLazyOptimizations);
   }
 #endif
diff --git a/src/api.h b/src/api.h
index 07723cb..89cf0c8 100644
--- a/src/api.h
+++ b/src/api.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,10 +28,14 @@
 #ifndef V8_API_H_
 #define V8_API_H_
 
-#include "apiutils.h"
-#include "factory.h"
+#include "v8.h"
 
 #include "../include/v8-testing.h"
+#include "apiutils.h"
+#include "contexts.h"
+#include "factory.h"
+#include "isolate.h"
+#include "list-inl.h"
 
 namespace v8 {
 
@@ -112,15 +116,16 @@
 }
 
 
-template <typename T> static inline T ToCData(v8::internal::Object* obj) {
+template <typename T> inline T ToCData(v8::internal::Object* obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
   return reinterpret_cast<T>(
-      reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address()));
+      reinterpret_cast<intptr_t>(
+          v8::internal::Foreign::cast(obj)->foreign_address()));
 }
 
 
 template <typename T>
-static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
+inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
   return FACTORY->NewForeign(
       reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
@@ -136,10 +141,6 @@
 };
 
 
-enum ExtensionTraversalState {
-  UNVISITED, VISITED, INSTALLED
-};
-
 
 class RegisteredExtension {
  public:
@@ -148,14 +149,11 @@
   Extension* extension() { return extension_; }
   RegisteredExtension* next() { return next_; }
   RegisteredExtension* next_auto() { return next_auto_; }
-  ExtensionTraversalState state() { return state_; }
-  void set_state(ExtensionTraversalState value) { state_ = value; }
   static RegisteredExtension* first_extension() { return first_extension_; }
  private:
   Extension* extension_;
   RegisteredExtension* next_;
   RegisteredExtension* next_auto_;
-  ExtensionTraversalState state_;
   static RegisteredExtension* first_extension_;
 };
 
@@ -242,7 +240,7 @@
 
 
 template <class T>
-static inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
+inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
   return reinterpret_cast<T*>(obj.location());
 }
 
@@ -483,7 +481,7 @@
 };
 
 
-static const int kHandleBlockSize = v8::internal::KB - 2;  // fit in one page
+const int kHandleBlockSize = v8::internal::KB - 2;  // fit in one page
 
 
 void HandleScopeImplementer::SaveContext(Context* context) {
diff --git a/src/apinatives.js b/src/apinatives.js
index e94da9f..79b41dd 100644
--- a/src/apinatives.js
+++ b/src/apinatives.js
@@ -37,8 +37,8 @@
 }
 
 
-const kApiFunctionCache = {};
-const functionCache = kApiFunctionCache;
+var kApiFunctionCache = {};
+var functionCache = kApiFunctionCache;
 
 
 function Instantiate(data, name) {
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 5ad7b5a..d5db686 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -38,6 +38,7 @@
 #define V8_ARM_ASSEMBLER_ARM_INL_H_
 
 #include "arm/assembler-arm.h"
+
 #include "cpu.h"
 #include "debug.h"
 
@@ -71,19 +72,26 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+                              || rmode_ == EMBEDDED_OBJECT
+                              || rmode_ == EXTERNAL_REFERENCE);
   return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
 }
 
 
 int RelocInfo::target_address_size() {
-  return Assembler::kExternalTargetSize;
+  return kPointerSize;
 }
 
 
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -105,9 +113,15 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+  if (mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -134,10 +148,17 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+                                WriteBarrierMode mode) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -154,6 +175,11 @@
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -202,13 +228,13 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitPointer(target_object_address());
+    visitor->VisitEmbeddedPointer(this);
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(target_reference_address());
+    visitor->VisitExternalReference(this);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
   } else if (((RelocInfo::IsJSReturn(mode) &&
@@ -228,13 +254,13 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(target_reference_address());
+    StaticVisitor::VisitExternalReference(this);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
              ((RelocInfo::IsJSReturn(mode) &&
@@ -338,8 +364,14 @@
 }
 
 
-void Assembler::set_target_at(Address constant_pool_entry,
-                              Address target) {
+void Assembler::deserialization_set_special_target_at(
+    Address constant_pool_entry, Address target) {
+  Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_external_target_at(Address constant_pool_entry,
+                                       Address target) {
   Memory::Address_at(constant_pool_entry) = target;
 }
 
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 0ec3692..ec28da4 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -66,11 +66,13 @@
 
 #ifdef __arm__
   // If the compiler is allowed to use VFP then we can use VFP too in our code
-  // generation even when generating snapshots.  This won't work for cross
-  // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
-#if defined(__VFP_FP__) && !defined(__SOFTFP__)
+  // generation even when generating snapshots. ARMv7 and hardware floating
+  // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
+#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
+    && !defined(__SOFTFP__)
   answer |= 1u << VFP3 | 1u << ARMv7;
-#endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
+#endif  // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
+        // && !defined(__SOFTFP__)
 #endif  // def __arm__
 
   return answer;
@@ -78,7 +80,9 @@
 
 
 void CpuFeatures::Probe() {
-  ASSERT(!initialized_);
+  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
+                                CpuFeaturesImpliedByCompiler());
+  ASSERT(supported_ == 0 || supported_ == standard_features);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -86,8 +90,7 @@
   // Get the features implied by the OS and the compiler settings. This is the
   // minimal set of features which is also alowed for generated code in the
   // snapshot.
-  supported_ |= OS::CpuFeaturesImpliedByPlatform();
-  supported_ |= CpuFeaturesImpliedByCompiler();
+  supported_ |= standard_features;
 
   if (Serializer::enabled()) {
     // No probing for features if we might serialize (generate snapshot).
@@ -136,7 +139,6 @@
 }
 
 
-
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   // Patch the code at the current address with the supplied instructions.
   Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -235,25 +237,27 @@
 
 // add(sp, sp, 4) instruction (aka Pop())
 const Instr kPopInstruction =
-    al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+    al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
+        kRegister_sp_Code * B12;
 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
 // register r is not encoded.
 const Instr kPushRegPattern =
-    al | B26 | 4 | NegPreIndex | sp.code() * B16;
+    al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
 // register r is not encoded.
 const Instr kPopRegPattern =
-    al | B26 | L | 4 | PostIndex | sp.code() * B16;
+    al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
 // mov lr, pc
-const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
+const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
 // ldr rd, [pc, #offset]
 const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
+const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
 // blxcc rm
 const Instr kBlxRegMask =
     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
 const Instr kBlxRegPattern =
     B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
+const Instr kBlxIp = al | kBlxRegPattern | ip.code();
 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
 const Instr kMovMvnPattern = 0xd * B21;
 const Instr kMovMvnFlip = B22;
@@ -270,13 +274,13 @@
 
 // A mask for the Rd register for push, pop, ldr, str instructions.
 const Instr kLdrRegFpOffsetPattern =
-    al | B26 | L | Offset | fp.code() * B16;
+    al | B26 | L | Offset | kRegister_fp_Code * B16;
 const Instr kStrRegFpOffsetPattern =
-    al | B26 | Offset | fp.code() * B16;
+    al | B26 | Offset | kRegister_fp_Code * B16;
 const Instr kLdrRegFpNegOffsetPattern =
-    al | B26 | L | NegOffset | fp.code() * B16;
+    al | B26 | L | NegOffset | kRegister_fp_Code * B16;
 const Instr kStrRegFpNegOffsetPattern =
-    al | B26 | NegOffset | fp.code() * B16;
+    al | B26 | NegOffset | kRegister_fp_Code * B16;
 const Instr kLdrStrInstrTypeMask = 0xffff0000;
 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
 const Instr kLdrStrOffsetMask = 0x00000fff;
@@ -316,7 +320,7 @@
     own_buffer_ = false;
   }
 
-  // Setup buffer pointers.
+  // Set up buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -348,7 +352,7 @@
   CheckConstPool(true, false);
   ASSERT(num_pending_reloc_info_ == 0);
 
-  // Setup code descriptor.
+  // Set up code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -2445,7 +2449,7 @@
   }
   CHECK_GT(desc.buffer_size, 0);  // no overflow
 
-  // Setup new buffer.
+  // Set up new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
 
   desc.instr_size = pc_offset();
@@ -2505,7 +2509,8 @@
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
+  // We do not try to reuse pool constants.
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
     // Adjust code for new modes.
     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2537,7 +2542,7 @@
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
-      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
       ClearRecordedAstId();
       reloc_info_writer.Write(&reloc_info_with_ast_id);
     } else {
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index eeadaca..e2d5f59 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -124,24 +124,47 @@
   int code_;
 };
 
-const Register no_reg = { -1 };
+// These constants are used in several locations, including static initializers
+const int kRegister_no_reg_Code = -1;
+const int kRegister_r0_Code = 0;
+const int kRegister_r1_Code = 1;
+const int kRegister_r2_Code = 2;
+const int kRegister_r3_Code = 3;
+const int kRegister_r4_Code = 4;
+const int kRegister_r5_Code = 5;
+const int kRegister_r6_Code = 6;
+const int kRegister_r7_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_fp_Code = 11;
+const int kRegister_ip_Code = 12;
+const int kRegister_sp_Code = 13;
+const int kRegister_lr_Code = 14;
+const int kRegister_pc_Code = 15;
 
-const Register r0  = {  0 };
-const Register r1  = {  1 };
-const Register r2  = {  2 };
-const Register r3  = {  3 };
-const Register r4  = {  4 };
-const Register r5  = {  5 };
-const Register r6  = {  6 };
-const Register r7  = {  7 };
-const Register r8  = {  8 };  // Used as context register.
-const Register r9  = {  9 };  // Used as lithium codegen scratch register.
-const Register r10 = { 10 };  // Used as roots register.
-const Register fp  = { 11 };
-const Register ip  = { 12 };
-const Register sp  = { 13 };
-const Register lr  = { 14 };
-const Register pc  = { 15 };
+const Register no_reg = { kRegister_no_reg_Code };
+
+const Register r0  = { kRegister_r0_Code };
+const Register r1  = { kRegister_r1_Code };
+const Register r2  = { kRegister_r2_Code };
+const Register r3  = { kRegister_r3_Code };
+const Register r4  = { kRegister_r4_Code };
+const Register r5  = { kRegister_r5_Code };
+const Register r6  = { kRegister_r6_Code };
+const Register r7  = { kRegister_r7_Code };
+// Used as context register.
+const Register r8  = { kRegister_r8_Code };
+// Used as lithium codegen scratch register.
+const Register r9  = { kRegister_r9_Code };
+// Used as roots register.
+const Register r10 = { kRegister_r10_Code };
+const Register fp  = { kRegister_fp_Code };
+const Register ip  = { kRegister_ip_Code };
+const Register sp  = { kRegister_sp_Code };
+const Register lr  = { kRegister_lr_Code };
+const Register pc  = { kRegister_pc_Code };
+
 
 // Single word VFP register.
 struct SwVfpRegister {
@@ -300,11 +323,13 @@
 const DwVfpRegister d14 = { 14 };
 const DwVfpRegister d15 = { 15 };
 
-// Aliases for double registers.
-const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
-const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
-const DwVfpRegister kDoubleRegZero = d14;
-const DwVfpRegister kScratchDoubleReg = d15;
+// Aliases for double registers.  Defined using #define instead of
+// "static const DwVfpRegister&" because Clang complains otherwise when a
+// compilation unit that includes this header doesn't use the variables.
+#define kFirstCalleeSavedDoubleReg d8
+#define kLastCalleeSavedDoubleReg d15
+#define kDoubleRegZero d14
+#define kScratchDoubleReg d15
 
 
 // Coprocessor register
@@ -579,6 +604,7 @@
 extern const Instr kLdrPCPattern;
 extern const Instr kBlxRegMask;
 extern const Instr kBlxRegPattern;
+extern const Instr kBlxIp;
 
 extern const Instr kMovMvnMask;
 extern const Instr kMovMvnPattern;
@@ -660,20 +686,18 @@
 
   // This sets the branch destination (which is in the constant pool on ARM).
   // This is for calls and branches within generated code.
-  inline static void set_target_at(Address constant_pool_entry, Address target);
+  inline static void deserialization_set_special_target_at(
+      Address constant_pool_entry, Address target);
 
   // This sets the branch destination (which is in the constant pool on ARM).
   // This is for calls and branches to runtime code.
   inline static void set_external_target_at(Address constant_pool_entry,
-                                            Address target) {
-    set_target_at(constant_pool_entry, target);
-  }
+                                            Address target);
 
   // Here we are patching the address in the constant pool, not the actual call
   // instruction.  The address in the constant pool is the same size as a
   // pointer.
-  static const int kCallTargetSize = kPointerSize;
-  static const int kExternalTargetSize = kPointerSize;
+  static const int kSpecialTargetSize = kPointerSize;
 
   // Size of an instruction.
   static const int kInstrSize = sizeof(Instr);
@@ -1207,6 +1231,10 @@
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
   // Read/patch instructions
+  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
   static void instr_at_put(byte* pc, Instr instr) {
     *reinterpret_cast<Instr*>(pc) = instr;
@@ -1261,12 +1289,6 @@
 
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
 
-  // Read/patch instructions
-  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
-  void instr_at_put(int pos, Instr instr) {
-    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
-  }
-
   // Decode branch instruction at pos and return branch target pos
   int target_at(int pos);
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index ae8cb56..c99e778 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -72,6 +72,22 @@
 }
 
 
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the global context.
+
+  __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ ldr(result,
+         FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the InternalArray function from the global context.
+  __ ldr(result,
+         MemOperand(result,
+                    Context::SlotOffset(
+                        Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+}
+
+
 // Load the built-in Array function from the current context.
 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
   // Load the global context.
@@ -86,12 +102,6 @@
 }
 
 
-// This constant has the same value as JSArray::kPreallocatedArrayElements and
-// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
-// below should be reconsidered.
-static const int kLoopUnfoldLimit = 4;
-
-
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. An elements backing store is allocated with size initial_capacity
 // and filled with the hole values.
@@ -101,16 +111,17 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
-                                 int initial_capacity,
                                  Label* gc_required) {
-  ASSERT(initial_capacity > 0);
-  // Load the initial map from the array function.
-  __ ldr(scratch1, FieldMemOperand(array_function,
-                                   JSFunction::kPrototypeOrInitialMapOffset));
+  const int initial_capacity = JSArray::kPreallocatedArrayElements;
+  STATIC_ASSERT(initial_capacity >= 0);
+  __ LoadInitialArrayMap(array_function, scratch2, scratch1);
 
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
-  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+  int size = JSArray::kSize;
+  if (initial_capacity > 0) {
+    size += FixedArray::SizeFor(initial_capacity);
+  }
   __ AllocateInNewSpace(size,
                         result,
                         scratch2,
@@ -130,6 +141,11 @@
   __ mov(scratch3,  Operand(0, RelocInfo::NONE));
   __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
 
+  if (initial_capacity == 0) {
+    __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+    return;
+  }
+
   // Calculate the location of the elements array and set elements array member
   // of the JSArray.
   // result: JSObject
@@ -138,7 +154,6 @@
   __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
 
   // Clear the heap tag on the elements array.
-  STATIC_ASSERT(kSmiTag == 0);
   __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
 
   // Initialize the FixedArray and fill it with holes. FixedArray length is
@@ -147,18 +162,29 @@
   // scratch1: elements array (untagged)
   // scratch2: start of next object
   __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
-  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
   __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
   __ mov(scratch3,  Operand(Smi::FromInt(initial_capacity)));
-  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
   __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
 
-  // Fill the FixedArray with the hole value.
-  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-  ASSERT(initial_capacity <= kLoopUnfoldLimit);
+  // Fill the FixedArray with the hole value. Inline the code if short.
+  STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
   __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  for (int i = 0; i < initial_capacity; i++) {
+  static const int kLoopUnfoldLimit = 4;
+  if (initial_capacity <= kLoopUnfoldLimit) {
+    for (int i = 0; i < initial_capacity; i++) {
+      __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+    }
+  } else {
+    Label loop, entry;
+    __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
+    __ b(&entry);
+    __ bind(&loop);
     __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+    __ bind(&entry);
+    __ cmp(scratch1, scratch2);
+    __ b(lt, &loop);
   }
 }
 
@@ -173,7 +199,7 @@
 // register elements_array_storage is scratched.
 static void AllocateJSArray(MacroAssembler* masm,
                             Register array_function,  // Array function.
-                            Register array_size,  // As a smi.
+                            Register array_size,  // As a smi, cannot be 0.
                             Register result,
                             Register elements_array_storage,
                             Register elements_array_end,
@@ -181,32 +207,16 @@
                             Register scratch2,
                             bool fill_with_hole,
                             Label* gc_required) {
-  Label not_empty, allocated;
-
   // Load the initial map from the array function.
-  __ ldr(elements_array_storage,
-         FieldMemOperand(array_function,
-                         JSFunction::kPrototypeOrInitialMapOffset));
+  __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
 
-  // Check whether an empty sized array is requested.
-  __ tst(array_size, array_size);
-  __ b(ne, &not_empty);
-
-  // If an empty array is requested allocate a small elements array anyway. This
-  // keeps the code below free of special casing for the empty array.
-  int size = JSArray::kSize +
-             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
-  __ AllocateInNewSpace(size,
-                        result,
-                        elements_array_end,
-                        scratch1,
-                        gc_required,
-                        TAG_OBJECT);
-  __ jmp(&allocated);
+  if (FLAG_debug_code) {  // Assert that array size is not zero.
+    __ tst(array_size, array_size);
+    __ Assert(ne, "array size is unexpectedly 0");
+  }
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested number of elements.
-  __ bind(&not_empty);
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ mov(elements_array_end,
          Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
@@ -226,7 +236,6 @@
   // result: JSObject
   // elements_array_storage: initial map
   // array_size: size of array (smi)
-  __ bind(&allocated);
   __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
   __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
   __ str(elements_array_storage,
@@ -256,14 +265,6 @@
   ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
   __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
   STATIC_ASSERT(kSmiTag == 0);
-  __ tst(array_size, array_size);
-  // Length of the FixedArray is the number of pre-allocated elements if
-  // the actual JSArray has length 0 and the size of the JSArray for non-empty
-  // JSArrays. The length of a FixedArray is stored as a smi.
-  __ mov(array_size,
-         Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
-         LeaveCC,
-         eq);
   ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
   __ str(array_size,
          MemOperand(elements_array_storage, kPointerSize, PostIndex));
@@ -311,23 +312,24 @@
 static void ArrayNativeCode(MacroAssembler* masm,
                             Label* call_generic_code) {
   Counters* counters = masm->isolate()->counters();
-  Label argc_one_or_more, argc_two_or_more;
+  Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
+      has_non_smi_element, finish, cant_transition_map, not_double;
 
   // Check for array construction with zero arguments or one.
   __ cmp(r0, Operand(0, RelocInfo::NONE));
   __ b(ne, &argc_one_or_more);
 
   // Handle construction of an empty array.
+  __ bind(&empty_array);
   AllocateEmptyJSArray(masm,
                        r1,
                        r2,
                        r3,
                        r4,
                        r5,
-                       JSArray::kPreallocatedArrayElements,
                        call_generic_code);
   __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
-  // Setup return value, remove receiver from stack and return.
+  // Set up return value, remove receiver from stack and return.
   __ mov(r0, r2);
   __ add(sp, sp, Operand(kPointerSize));
   __ Jump(lr);
@@ -339,6 +341,13 @@
   __ b(ne, &argc_two_or_more);
   STATIC_ASSERT(kSmiTag == 0);
   __ ldr(r2, MemOperand(sp));  // Get the argument from the stack.
+  __ tst(r2, r2);
+  __ b(ne, &not_empty_array);
+  __ Drop(1);  // Adjust stack.
+  __ mov(r0, Operand(0));  // Treat this as a call with argc of zero.
+  __ b(&empty_array);
+
+  __ bind(&not_empty_array);
   __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
   __ b(ne, call_generic_code);
 
@@ -363,7 +372,7 @@
                   true,
                   call_generic_code);
   __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
-  // Setup return value, remove receiver and argument from stack and return.
+  // Set up return value, remove receiver and argument from stack and return.
   __ mov(r0, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
   __ Jump(lr);
@@ -398,14 +407,21 @@
   // r5: elements_array_end (untagged)
   // sp[0]: last argument
   Label loop, entry;
+  __ mov(r7, sp);
   __ jmp(&entry);
   __ bind(&loop);
-  __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+  __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
+  if (FLAG_smi_only_arrays) {
+    __ JumpIfNotSmi(r2, &has_non_smi_element);
+  }
   __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
   __ bind(&entry);
   __ cmp(r4, r5);
   __ b(lt, &loop);
 
+  __ bind(&finish);
+  __ mov(sp, r7);
+
   // Remove caller arguments and receiver from the stack, setup return value and
   // return.
   // r0: argc
@@ -414,6 +430,75 @@
   __ add(sp, sp, Operand(kPointerSize));
   __ mov(r0, r3);
   __ Jump(lr);
+
+  __ bind(&has_non_smi_element);
+  // Double values are handled by the runtime.
+  __ CheckMap(
+      r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
+  __ bind(&cant_transition_map);
+  __ UndoAllocationInNewSpace(r3, r4);
+  __ b(call_generic_code);
+
+  __ bind(&not_double);
+  // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+  // r3: JSArray
+  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         r2,
+                                         r9,
+                                         &cant_transition_map);
+  __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ RecordWriteField(r3,
+                      HeapObject::kMapOffset,
+                      r2,
+                      r9,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  Label loop2;
+  __ sub(r7, r7, Operand(kPointerSize));
+  __ bind(&loop2);
+  __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
+  __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+  __ cmp(r4, r5);
+  __ b(lt, &loop2);
+  __ b(&finish);
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, r1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ tst(r2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for InternalArray function");
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ Assert(eq, "Unexpected initial map for InternalArray function");
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code if the specialized code cannot handle the
+  // construction.
+  __ bind(&generic_array_code);
+
+  Handle<Code> array_code =
+      masm->isolate()->builtins()->InternalArrayCodeGeneric();
+  __ Jump(array_code, RelocInfo::CODE_TARGET);
 }
 
 
@@ -582,10 +667,11 @@
   __ bind(&convert_argument);
   __ push(function);  // Preserve the function.
   __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
-  __ EnterInternalFrame();
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  }
   __ pop(function);
   __ mov(argument, r0);
   __ b(&argument_is_string);
@@ -601,15 +687,18 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
-  __ EnterInternalFrame();
-  __ push(argument);
-  __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(argument);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
   __ Ret();
 }
 
 
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool count_constructions) {
   // ----------- S t a t e -------------
   //  -- r0     : number of arguments
   //  -- r1     : constructor function
@@ -617,354 +706,324 @@
   //  -- sp[...]: constructor arguments
   // -----------------------------------
 
-  Label non_function_call;
-  // Check that the function is not a smi.
-  __ JumpIfSmi(r1, &non_function_call);
-  // Check that the function is a JSFunction.
-  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &non_function_call);
-
-  // Jump to the function-specific construct stub.
-  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
-  __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  // r0: number of arguments
-  // r1: called object
-  __ bind(&non_function_call);
-  // Set expected number of arguments to zero (not changing r0).
-  __ mov(r2, Operand(0, RelocInfo::NONE));
-  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-  __ SetCallKind(r5, CALL_AS_METHOD);
-  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool count_constructions) {
   // Should never count constructions for api objects.
   ASSERT(!is_api_function || !count_constructions);
 
   Isolate* isolate = masm->isolate();
 
   // Enter a construct frame.
-  __ EnterConstructFrame();
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Preserve the two incoming parameters on the stack.
-  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-  __ push(r0);  // Smi-tagged arguments count.
-  __ push(r1);  // Constructor function.
+    // Preserve the two incoming parameters on the stack.
+    __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+    __ push(r0);  // Smi-tagged arguments count.
+    __ push(r1);  // Constructor function.
 
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  Label rt_call, allocated;
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    Label rt_call, allocated;
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(isolate);
-    __ mov(r2, Operand(debug_step_in_fp));
-    __ ldr(r2, MemOperand(r2));
-    __ tst(r2, r2);
-    __ b(ne, &rt_call);
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(isolate);
+      __ mov(r2, Operand(debug_step_in_fp));
+      __ ldr(r2, MemOperand(r2));
+      __ tst(r2, r2);
+      __ b(ne, &rt_call);
 #endif
 
-    // Load the initial map and verify that it is in fact a map.
-    // r1: constructor function
-    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ JumpIfSmi(r2, &rt_call);
-    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
-    __ b(ne, &rt_call);
+      // Load the initial map and verify that it is in fact a map.
+      // r1: constructor function
+      __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+      __ JumpIfSmi(r2, &rt_call);
+      __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+      __ b(ne, &rt_call);
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // r1: constructor function
-    // r2: initial map
-    __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
-    __ b(eq, &rt_call);
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // r1: constructor function
+      // r2: initial map
+      __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+      __ b(eq, &rt_call);
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-      MemOperand constructor_count =
-          FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
-      __ ldrb(r4, constructor_count);
-      __ sub(r4, r4, Operand(1), SetCC);
-      __ strb(r4, constructor_count);
-      __ b(ne, &allocate);
-
-      __ Push(r1, r2);
-
-      __ push(r1);  // constructor
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(r2);
-      __ pop(r1);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    // r1: constructor function
-    // r2: initial map
-    __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-    __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
-
-    // Allocated the JSObject, now initialize the fields. Map is set to initial
-    // map and properties and elements are set to empty fixed array.
-    // r1: constructor function
-    // r2: initial map
-    // r3: object size
-    // r4: JSObject (not tagged)
-    __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
-    __ mov(r5, r4);
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
-    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
-    __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-    __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
-    // Fill all the in-object properties with the appropriate filler.
-    // r1: constructor function
-    // r2: initial map
-    // r3: object size (in words)
-    // r4: JSObject (not tagged)
-    // r5: First in-object property of JSObject (not tagged)
-    __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
-    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-    { Label loop, entry;
       if (count_constructions) {
+        Label allocate;
+        // Decrease generous allocation count.
+        __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+        MemOperand constructor_count =
+            FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
+        __ ldrb(r4, constructor_count);
+        __ sub(r4, r4, Operand(1), SetCC);
+        __ strb(r4, constructor_count);
+        __ b(ne, &allocate);
+
+        __ Push(r1, r2);
+
+        __ push(r1);  // constructor
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(r2);
+        __ pop(r1);
+
+        __ bind(&allocate);
+      }
+
+      // Now allocate the JSObject on the heap.
+      // r1: constructor function
+      // r2: initial map
+      __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+      __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+
+      // Allocated the JSObject, now initialize the fields. Map is set to
+      // initial map and properties and elements are set to empty fixed array.
+      // r1: constructor function
+      // r2: initial map
+      // r3: object size
+      // r4: JSObject (not tagged)
+      __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+      __ mov(r5, r4);
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+      __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+      __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+
+      // Fill all the in-object properties with the appropriate filler.
+      // r1: constructor function
+      // r2: initial map
+      // r3: object size (in words)
+      // r4: JSObject (not tagged)
+      // r5: First in-object property of JSObject (not tagged)
+      __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
+      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+      __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+      if (count_constructions) {
+        __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+        __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+                kBitsPerByte);
+        __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
+        // r0: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ cmp(r0, r6);
+          __ Assert(le, "Unexpected number of pre-allocated property fields.");
+        }
+        __ InitializeFieldsWithFiller(r5, r0, r7);
         // To allow for truncation.
         __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
-      } else {
-        __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
       }
-      __ b(&entry);
-      __ bind(&loop);
-      __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
-      __ bind(&entry);
-      __ cmp(r5, r6);
-      __ b(lt, &loop);
+      __ InitializeFieldsWithFiller(r5, r6, r7);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      __ add(r4, r4, Operand(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed. Continue with
+      // allocated object if not fall through to runtime call if it is.
+      // r1: constructor function
+      // r4: JSObject
+      // r5: start of next object (not tagged)
+      __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+      // The field instance sizes contains both pre-allocated property fields
+      // and in-object properties.
+      __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+      __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+              kBitsPerByte);
+      __ add(r3, r3, Operand(r6));
+      __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
+              kBitsPerByte);
+      __ sub(r3, r3, Operand(r6), SetCC);
+
+      // Done if no extra properties are to be allocated.
+      __ b(eq, &allocated);
+      __ Assert(pl, "Property allocation count failed.");
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // r1: constructor
+      // r3: number of elements in properties array
+      // r4: JSObject
+      // r5: start of next object
+      __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
+      __ AllocateInNewSpace(
+          r0,
+          r5,
+          r6,
+          r2,
+          &undo_allocation,
+          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+      // Initialize the FixedArray.
+      // r1: constructor
+      // r3: number of elements in properties array
+      // r4: JSObject
+      // r5: FixedArray (not tagged)
+      __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+      __ mov(r2, r5);
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
+      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+      __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+      __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
+
+      // Initialize the fields to undefined.
+      // r1: constructor function
+      // r2: First element of FixedArray (not tagged)
+      // r3: number of elements in properties array
+      // r4: JSObject
+      // r5: FixedArray (not tagged)
+      __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
+      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+      { Label loop, entry;
+        if (count_constructions) {
+          __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+        } else if (FLAG_debug_code) {
+          __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+          __ cmp(r7, r8);
+          __ Assert(eq, "Undefined value not loaded.");
+        }
+        __ b(&entry);
+        __ bind(&loop);
+        __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+        __ bind(&entry);
+        __ cmp(r2, r6);
+        __ b(lt, &loop);
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject
+      // r1: constructor function
+      // r4: JSObject
+      // r5: FixedArray (not tagged)
+      __ add(r5, r5, Operand(kHeapObjectTag));  // Add the heap tag.
+      __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+
+      // Continue with JSObject being successfully allocated
+      // r1: constructor function
+      // r4: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // r4: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(r4, r5);
     }
 
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    __ add(r4, r4, Operand(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed. Continue with allocated
-    // object if not fall through to runtime call if it is.
+    // Allocate the new receiver object using the runtime call.
     // r1: constructor function
+    __ bind(&rt_call);
+    __ push(r1);  // argument for Runtime_NewObject
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ mov(r4, r0);
+
+    // Receiver for constructor call allocated.
     // r4: JSObject
-    // r5: start of next object (not tagged)
-    __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
-    // The field instance sizes contains both pre-allocated property fields and
-    // in-object properties.
-    __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
-    __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
-    __ add(r3, r3, Operand(r6));
-    __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
-    __ sub(r3, r3, Operand(r6), SetCC);
+    __ bind(&allocated);
+    __ push(r4);
+    __ push(r4);
 
-    // Done if no extra properties are to be allocated.
-    __ b(eq, &allocated);
-    __ Assert(pl, "Property allocation count failed.");
+    // Reload the number of arguments and the constructor from the stack.
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: constructor function
+    // sp[3]: number of arguments (smi-tagged)
+    __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+    __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
 
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // r1: constructor
-    // r3: number of elements in properties array
-    // r4: JSObject
-    // r5: start of next object
-    __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
-    __ AllocateInNewSpace(
-        r0,
-        r5,
-        r6,
-        r2,
-        &undo_allocation,
-        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+    // Set up pointer to last argument.
+    __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
 
-    // Initialize the FixedArray.
-    // r1: constructor
-    // r3: number of elements in properties array
-    // r4: JSObject
-    // r5: FixedArray (not tagged)
-    __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
-    __ mov(r2, r5);
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
-    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-    __ mov(r0, Operand(r3, LSL, kSmiTagSize));
-    __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
+    // Set up number of arguments for function call below
+    __ mov(r0, Operand(r3, LSR, kSmiTagSize));
 
-    // Initialize the fields to undefined.
+    // Copy arguments and receiver to the expression stack.
+    // r0: number of arguments
     // r1: constructor function
-    // r2: First element of FixedArray (not tagged)
-    // r3: number of elements in properties array
-    // r4: JSObject
-    // r5: FixedArray (not tagged)
-    __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
-    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-    { Label loop, entry;
-      if (count_constructions) {
-        __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-      } else if (FLAG_debug_code) {
-        __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
-        __ cmp(r7, r8);
-        __ Assert(eq, "Undefined value not loaded.");
-      }
-      __ b(&entry);
-      __ bind(&loop);
-      __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
-      __ bind(&entry);
-      __ cmp(r2, r6);
-      __ b(lt, &loop);
+    // r2: address of last argument (caller sp)
+    // r3: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: constructor function
+    // sp[3]: number of arguments (smi-tagged)
+    Label loop, entry;
+    __ b(&entry);
+    __ bind(&loop);
+    __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+    __ push(ip);
+    __ bind(&entry);
+    __ sub(r3, r3, Operand(2), SetCC);
+    __ b(ge, &loop);
+
+    // Call the function.
+    // r0: number of arguments
+    // r1: constructor function
+    if (is_api_function) {
+      __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected,
+                    RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(r0);
+      __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject
-    // r1: constructor function
-    // r4: JSObject
-    // r5: FixedArray (not tagged)
-    __ add(r5, r5, Operand(kHeapObjectTag));  // Add the heap tag.
-    __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+    // Store offset of return address for deoptimizer.
+    if (!is_api_function && !count_constructions) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
 
-    // Continue with JSObject being successfully allocated
-    // r1: constructor function
-    // r4: JSObject
-    __ jmp(&allocated);
+    // Restore context from the frame.
+    // r0: result
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // r4: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(r4, r5);
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    // r0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ JumpIfSmi(r0, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+    __ b(ge, &exit);
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ ldr(r0, MemOperand(sp));
+
+    // Remove receiver from the stack, remove caller arguments, and
+    // return.
+    __ bind(&exit);
+    // r0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+
+    // Leave construct frame.
   }
 
-  // Allocate the new receiver object using the runtime call.
-  // r1: constructor function
-  __ bind(&rt_call);
-  __ push(r1);  // argument for Runtime_NewObject
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ mov(r4, r0);
-
-  // Receiver for constructor call allocated.
-  // r4: JSObject
-  __ bind(&allocated);
-  __ push(r4);
-
-  // Push the function and the allocated receiver from the stack.
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ ldr(r1, MemOperand(sp, kPointerSize));
-  __ push(r1);  // Constructor function.
-  __ push(r4);  // Receiver.
-
-  // Reload the number of arguments from the stack.
-  // r1: constructor function
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
-
-  // Setup pointer to last argument.
-  __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Setup number of arguments for function call below
-  __ mov(r0, Operand(r3, LSR, kSmiTagSize));
-
-  // Copy arguments and receiver to the expression stack.
-  // r0: number of arguments
-  // r2: address of last argument (caller sp)
-  // r1: constructor function
-  // r3: number of arguments (smi-tagged)
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  Label loop, entry;
-  __ b(&entry);
-  __ bind(&loop);
-  __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
-  __ push(ip);
-  __ bind(&entry);
-  __ sub(r3, r3, Operand(2), SetCC);
-  __ b(ge, &loop);
-
-  // Call the function.
-  // r0: number of arguments
-  // r1: constructor function
-  if (is_api_function) {
-    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(r0);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Pop the function from the stack.
-  // sp[0]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ pop();
-
-  // Restore context from the frame.
-  // r0: result
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  // r0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ JumpIfSmi(r0, &use_receiver);
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
-  __ b(ge, &exit);
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ ldr(r0, MemOperand(sp));
-
-  // Remove receiver from the stack, remove caller arguments, and
-  // return.
-  __ bind(&exit);
-  // r0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ LeaveConstructFrame();
   __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
   __ add(sp, sp, Operand(kPointerSize));
   __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
@@ -997,60 +1056,62 @@
   // r4: argv
   // r5-r7, cp may be clobbered
 
-  // Clear the context before we push it when entering the JS frame.
+  // Clear the context before we push it when entering the internal frame.
   __ mov(cp, Operand(0, RelocInfo::NONE));
 
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Set up the context from the function argument.
-  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+    // Set up the context from the function argument.
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
-  __ InitializeRootRegister();
+    __ InitializeRootRegister();
 
-  // Push the function and the receiver onto the stack.
-  __ push(r1);
-  __ push(r2);
+    // Push the function and the receiver onto the stack.
+    __ push(r1);
+    __ push(r2);
 
-  // Copy arguments to the stack in a loop.
-  // r1: function
-  // r3: argc
-  // r4: argv, i.e. points to first arg
-  Label loop, entry;
-  __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
-  // r2 points past last arg.
-  __ b(&entry);
-  __ bind(&loop);
-  __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex));  // read next parameter
-  __ ldr(r0, MemOperand(r0));  // dereference handle
-  __ push(r0);  // push parameter
-  __ bind(&entry);
-  __ cmp(r4, r2);
-  __ b(ne, &loop);
+    // Copy arguments to the stack in a loop.
+    // r1: function
+    // r3: argc
+    // r4: argv, i.e. points to first arg
+    Label loop, entry;
+    __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+    // r2 points past last arg.
+    __ b(&entry);
+    __ bind(&loop);
+    __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex));  // read next parameter
+    __ ldr(r0, MemOperand(r0));  // dereference handle
+    __ push(r0);  // push parameter
+    __ bind(&entry);
+    __ cmp(r4, r2);
+    __ b(ne, &loop);
 
-  // Initialize all JavaScript callee-saved registers, since they will be seen
-  // by the garbage collector as part of handlers.
-  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
-  __ mov(r5, Operand(r4));
-  __ mov(r6, Operand(r4));
-  __ mov(r7, Operand(r4));
-  if (kR9Available == 1) {
-    __ mov(r9, Operand(r4));
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+    __ mov(r5, Operand(r4));
+    __ mov(r6, Operand(r4));
+    __ mov(r7, Operand(r4));
+    if (kR9Available == 1) {
+      __ mov(r9, Operand(r4));
+    }
+
+    // Invoke the code and pass argc as r0.
+    __ mov(r0, Operand(r3));
+    if (is_construct) {
+      CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+      __ CallStub(&stub);
+    } else {
+      ParameterCount actual(r0);
+      __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+    // Exit the JS frame and remove the parameters (except function), and
+    // return.
+    // Respect ABI stack constraint.
   }
-
-  // Invoke the code and pass argc as r0.
-  __ mov(r0, Operand(r3));
-  if (is_construct) {
-    __ Call(masm->isolate()->builtins()->JSConstructCall());
-  } else {
-    ParameterCount actual(r0);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Exit the JS frame and remove the parameters (except function), and return.
-  // Respect ABI stack constraint.
-  __ LeaveInternalFrame();
   __ Jump(lr);
 
   // r0: result
@@ -1069,26 +1130,27 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(r1);
-  // Push call kind information.
-  __ push(r5);
+    // Preserve the function.
+    __ push(r1);
+    // Push call kind information.
+    __ push(r5);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(r1);
-  __ CallRuntime(Runtime::kLazyCompile, 1);
-  // Calculate the entry point.
-  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(r1);
+    __ CallRuntime(Runtime::kLazyCompile, 1);
+    // Calculate the entry point.
+    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-  // Restore call kind information.
-  __ pop(r5);
-  // Restore saved function.
-  __ pop(r1);
+    // Restore call kind information.
+    __ pop(r5);
+    // Restore saved function.
+    __ pop(r1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(r2);
@@ -1097,26 +1159,27 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(r1);
-  // Push call kind information.
-  __ push(r5);
+    // Preserve the function.
+    __ push(r1);
+    // Push call kind information.
+    __ push(r5);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(r1);
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
-  // Calculate the entry point.
-  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(r1);
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
+    // Calculate the entry point.
+    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-  // Restore call kind information.
-  __ pop(r5);
-  // Restore saved function.
-  __ pop(r1);
+    // Restore call kind information.
+    __ pop(r5);
+    // Restore saved function.
+    __ pop(r1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(r2);
@@ -1125,12 +1188,13 @@
 
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
-  __ EnterInternalFrame();
-  // Pass the function and deoptimization type to the runtime system.
-  __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
-  __ push(r0);
-  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(r0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  }
 
   // Get the full codegen state from the stack and untag it -> r6.
   __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
@@ -1170,9 +1234,10 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
-  __ EnterInternalFrame();
-  __ CallRuntime(Runtime::kNotifyOSR, 0);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
   __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
   __ Ret();
 }
@@ -1188,10 +1253,11 @@
   // Lookup the function in the JavaScript frame and push it as an
   // argument to the on-stack replacement function.
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ EnterInternalFrame();
-  __ push(r0);
-  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(r0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
@@ -1216,7 +1282,7 @@
   // 1. Make sure we have at least one argument.
   // r0: actual number of arguments
   { Label done;
-    __ tst(r0, Operand(r0));
+    __ cmp(r0, Operand(0));
     __ b(ne, &done);
     __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
     __ push(r2);
@@ -1273,17 +1339,23 @@
     __ b(ge, &shift_arguments);
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Smi-tagged.
-    __ push(r0);
 
-    __ push(r2);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(r2, r0);
+    {
+      // Enter an internal frame in order to preserve argument count.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Smi-tagged.
+      __ push(r0);
 
-    __ pop(r0);
-    __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-    __ LeaveInternalFrame();
+      __ push(r2);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mov(r2, r0);
+
+      __ pop(r0);
+      __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+
+      // Exit the internal frame.
+    }
+
     // Restore the function to r1, and the flag to r4.
     __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
     __ mov(r4, Operand(0, RelocInfo::NONE));
@@ -1403,156 +1475,157 @@
   const int kRecvOffset     =  3 * kPointerSize;
   const int kFunctionOffset =  4 * kPointerSize;
 
-  __ EnterInternalFrame();
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
 
-  __ ldr(r0, MemOperand(fp, kFunctionOffset));  // get the function
-  __ push(r0);
-  __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    __ ldr(r0, MemOperand(fp, kFunctionOffset));  // get the function
+    __ push(r0);
+    __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
-  // Make r2 the space we have left. The stack might already be overflowed
-  // here which will cause r2 to become negative.
-  __ sub(r2, sp, r2);
-  // Check if the arguments will overflow the stack.
-  __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ b(gt, &okay);  // Signed comparison.
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+    // Make r2 the space we have left. The stack might already be overflowed
+    // here which will cause r2 to become negative.
+    __ sub(r2, sp, r2);
+    // Check if the arguments will overflow the stack.
+    __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+    __ b(gt, &okay);  // Signed comparison.
 
-  // Out of stack space.
-  __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ push(r1);
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  // End of stack check.
+    // Out of stack space.
+    __ ldr(r1, MemOperand(fp, kFunctionOffset));
+    __ push(r1);
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    // End of stack check.
 
-  // Push current limit and index.
-  __ bind(&okay);
-  __ push(r0);  // limit
-  __ mov(r1, Operand(0, RelocInfo::NONE));  // initial index
-  __ push(r1);
+    // Push current limit and index.
+    __ bind(&okay);
+    __ push(r0);  // limit
+    __ mov(r1, Operand(0, RelocInfo::NONE));  // initial index
+    __ push(r1);
 
-  // Get the receiver.
-  __ ldr(r0, MemOperand(fp, kRecvOffset));
+    // Get the receiver.
+    __ ldr(r0, MemOperand(fp, kRecvOffset));
 
-  // Check that the function is a JS function (otherwise it must be a proxy).
-  Label push_receiver;
-  __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &push_receiver);
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ ldr(r1, MemOperand(fp, kFunctionOffset));
+    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+    __ b(ne, &push_receiver);
 
-  // Change context eagerly to get the right global object if necessary.
-  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-  // Load the shared function info while the function is still in r1.
-  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    // Change context eagerly to get the right global object if necessary.
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+    // Load the shared function info while the function is still in r1.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
 
-  // Compute the receiver.
-  // Do not transform the receiver for strict mode functions.
-  Label call_to_object, use_global_receiver;
-  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
-  __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
-                           kSmiTagSize)));
-  __ b(ne, &push_receiver);
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                             kSmiTagSize)));
+    __ b(ne, &push_receiver);
 
-  // Do not transform the receiver for strict mode functions.
-  __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-  __ b(ne, &push_receiver);
+    // Do not transform the receiver for strict mode functions.
+    __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ b(ne, &push_receiver);
 
-  // Compute the receiver in non-strict mode.
-  __ JumpIfSmi(r0, &call_to_object);
-  __ LoadRoot(r1, Heap::kNullValueRootIndex);
-  __ cmp(r0, r1);
-  __ b(eq, &use_global_receiver);
-  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
-  __ cmp(r0, r1);
-  __ b(eq, &use_global_receiver);
+    // Compute the receiver in non-strict mode.
+    __ JumpIfSmi(r0, &call_to_object);
+    __ LoadRoot(r1, Heap::kNullValueRootIndex);
+    __ cmp(r0, r1);
+    __ b(eq, &use_global_receiver);
+    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+    __ cmp(r0, r1);
+    __ b(eq, &use_global_receiver);
 
-  // Check if the receiver is already a JavaScript object.
-  // r0: receiver
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
-  __ b(ge, &push_receiver);
+    // Check if the receiver is already a JavaScript object.
+    // r0: receiver
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+    __ b(ge, &push_receiver);
 
-  // Convert the receiver to a regular object.
-  // r0: receiver
-  __ bind(&call_to_object);
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ b(&push_receiver);
+    // Convert the receiver to a regular object.
+    // r0: receiver
+    __ bind(&call_to_object);
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ b(&push_receiver);
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
-  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
-  __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
-  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+    __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
+    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
 
-  // Push the receiver.
-  // r0: receiver
-  __ bind(&push_receiver);
-  __ push(r0);
+    // Push the receiver.
+    // r0: receiver
+    __ bind(&push_receiver);
+    __ push(r0);
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ ldr(r0, MemOperand(fp, kIndexOffset));
-  __ b(&entry);
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ ldr(r0, MemOperand(fp, kIndexOffset));
+    __ b(&entry);
 
-  // Load the current argument from the arguments array and push it to the
-  // stack.
-  // r0: current argument index
-  __ bind(&loop);
-  __ ldr(r1, MemOperand(fp, kArgsOffset));
-  __ push(r1);
-  __ push(r0);
+    // Load the current argument from the arguments array and push it to the
+    // stack.
+    // r0: current argument index
+    __ bind(&loop);
+    __ ldr(r1, MemOperand(fp, kArgsOffset));
+    __ push(r1);
+    __ push(r0);
 
-  // Call the runtime to access the property in the arguments array.
-  __ CallRuntime(Runtime::kGetProperty, 2);
-  __ push(r0);
+    // Call the runtime to access the property in the arguments array.
+    __ CallRuntime(Runtime::kGetProperty, 2);
+    __ push(r0);
 
-  // Use inline caching to access the arguments.
-  __ ldr(r0, MemOperand(fp, kIndexOffset));
-  __ add(r0, r0, Operand(1 << kSmiTagSize));
-  __ str(r0, MemOperand(fp, kIndexOffset));
+    // Use inline caching to access the arguments.
+    __ ldr(r0, MemOperand(fp, kIndexOffset));
+    __ add(r0, r0, Operand(1 << kSmiTagSize));
+    __ str(r0, MemOperand(fp, kIndexOffset));
 
-  // Test if the copy loop has finished copying all the elements from the
-  // arguments object.
-  __ bind(&entry);
-  __ ldr(r1, MemOperand(fp, kLimitOffset));
-  __ cmp(r0, r1);
-  __ b(ne, &loop);
+    // Test if the copy loop has finished copying all the elements from the
+    // arguments object.
+    __ bind(&entry);
+    __ ldr(r1, MemOperand(fp, kLimitOffset));
+    __ cmp(r0, r1);
+    __ b(ne, &loop);
 
-  // Invoke the function.
-  Label call_proxy;
-  ParameterCount actual(r0);
-  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-  __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &call_proxy);
-  __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(r0);
+    __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+    __ ldr(r1, MemOperand(fp, kFunctionOffset));
+    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+    __ b(ne, &call_proxy);
+    __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
 
-  // Tear down the internal frame and remove function, receiver and args.
-  __ LeaveInternalFrame();
-  __ add(sp, sp, Operand(3 * kPointerSize));
-  __ Jump(lr);
+    frame_scope.GenerateLeaveFrame();
+    __ add(sp, sp, Operand(3 * kPointerSize));
+    __ Jump(lr);
 
-  // Invoke the function proxy.
-  __ bind(&call_proxy);
-  __ push(r1);  // add function proxy as last argument
-  __ add(r0, r0, Operand(1));
-  __ mov(r2, Operand(0, RelocInfo::NONE));
-  __ SetCallKind(r5, CALL_AS_METHOD);
-  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
-  __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(r1);  // add function proxy as last argument
+    __ add(r0, r0, Operand(1));
+    __ mov(r2, Operand(0, RelocInfo::NONE));
+    __ SetCallKind(r5, CALL_AS_METHOD);
+    __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
 
-  __ LeaveInternalFrame();
+    // Tear down the internal frame and remove function, receiver and args.
+  }
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Jump(lr);
 }
@@ -1672,6 +1745,9 @@
   __ bind(&invoke);
   __ Call(r3);
 
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
   // Exit frame and return.
   LeaveArgumentsAdaptorFrame(masm);
   __ Jump(lr);
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 36450c9..f772db9 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -98,9 +98,9 @@
                         &gc,
                         TAG_OBJECT);
 
-  int map_index = strict_mode_ == kStrictMode
-      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
-      : Context::FUNCTION_MAP_INDEX;
+  int map_index = (language_mode_ == CLASSIC_MODE)
+      ? Context::FUNCTION_MAP_INDEX
+      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -122,7 +122,6 @@
   __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
   __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
 
-
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
   __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
@@ -156,21 +155,19 @@
   // Load the function from the stack.
   __ ldr(r3, MemOperand(sp, 0));
 
-  // Setup the object header.
-  __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
-  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+  // Set up the object header.
+  __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
   __ mov(r2, Operand(Smi::FromInt(length)));
   __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+  __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
 
-  // Setup the fixed slots.
+  // Set up the fixed slots, copy the global object from the previous context.
+  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(r1, Operand(Smi::FromInt(0)));
   __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
   __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-
-  // Copy the global object from the previous context.
-  __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
 
   // Initialize the rest of the slots to undefined.
   __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@@ -189,6 +186,119 @@
 }
 
 
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: function.
+  // [sp + kPointerSize]: serialized scope info
+
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        r0, r1, r2, &gc, TAG_OBJECT);
+
+  // Load the function from the stack.
+  __ ldr(r3, MemOperand(sp, 0));
+
+  // Load the serialized scope info from the stack.
+  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+
+  // Set up the object header.
+  __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
+  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ mov(r2, Operand(Smi::FromInt(length)));
+  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+
+  // If this block context is nested in the global context we get a smi
+  // sentinel instead of a function. The block context should get the
+  // canonical empty function of the global context as its closure which
+  // we still have to look up.
+  Label after_sentinel;
+  __ JumpIfNotSmi(r3, &after_sentinel);
+  if (FLAG_debug_code) {
+    const char* message = "Expected 0 as a Smi sentinel";
+    __ cmp(r3, Operand::Zero());
+    __ Assert(eq, message);
+  }
+  __ ldr(r3, GlobalObjectOperand());
+  __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+  __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
+  __ bind(&after_sentinel);
+
+  // Set up the fixed slots, copy the global object from the previous context.
+  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
+  __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
+  __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
+  __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
+
+  // Initialize the rest of the slots to the hole value.
+  __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < slots_; i++) {
+    __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
+  }
+
+  // Remove the on-stack argument and return.
+  __ mov(cp, r0);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
+  //
+  // r3: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = 0;
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
+  }
+  int size = JSArray::kSize + elements_size;
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size,
+                        r0,
+                        r1,
+                        r2,
+                        fail,
+                        TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
+      __ ldr(r1, FieldMemOperand(r3, i));
+      __ str(r1, FieldMemOperand(r0, i));
+    }
+  }
+
+  if (length > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+    __ add(r2, r0, Operand(JSArray::kSize));
+    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+    // Copy the elements array.
+    ASSERT((elements_size % kPointerSize) == 0);
+    __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+  }
+}
+
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
   //
@@ -196,10 +306,6 @@
   // [sp + kPointerSize]: literal index.
   // [sp + (2 * kPointerSize)]: literals array.
 
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
-  int size = JSArray::kSize + elements_size;
-
   // Load boilerplate object into r3 and check if we need to create a
   // boilerplate.
   Label slow_case;
@@ -207,57 +313,59 @@
   __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
   __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r3, ip);
+  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
   __ b(eq, &slow_case);
 
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
+    __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+    __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
+    __ b(ne, &check_fast_elements);
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    // Return and remove the on-stack parameters.
+    __ add(sp, sp, Operand(3 * kPointerSize));
+    __ Ret();
+
+    __ bind(&check_fast_elements);
+    __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
+    __ b(ne, &double_elements);
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    // Return and remove the on-stack parameters.
+    __ add(sp, sp, Operand(3 * kPointerSize));
+    __ Ret();
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
   if (FLAG_debug_code) {
     const char* message;
     Heap::RootListIndex expected_map_index;
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map_index = Heap::kFixedArrayMapRootIndex;
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+      message = "Expected (writable) fixed double array";
+      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
     } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
     }
     __ push(r3);
     __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
     __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
-    __ LoadRoot(ip, expected_map_index);
-    __ cmp(r3, ip);
+    __ CompareRoot(r3, expected_map_index);
     __ Assert(eq, message);
     __ pop(r3);
   }
 
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size,
-                        r0,
-                        r1,
-                        r2,
-                        &slow_case,
-                        TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
-      __ ldr(r1, FieldMemOperand(r3, i));
-      __ str(r1, FieldMemOperand(r0, i));
-    }
-  }
-
-  if (length_ > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
-    __ add(r2, r0, Operand(JSArray::kSize));
-    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
-    // Copy the elements array.
-    __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
-  }
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
 
   // Return and remove the on-stack parameters.
   __ add(sp, sp, Operand(3 * kPointerSize));
@@ -268,6 +376,49 @@
 }
 
 
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: object literal flags.
+  // [sp + kPointerSize]: constant properties.
+  // [sp + (2 * kPointerSize)]: literal index.
+  // [sp + (3 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into r3 and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
+  __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
+  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+  __ b(eq, &slow_case);
+
+  // Check that the boilerplate contains only fast properties and we can
+  // statically determine the instance size.
+  int size = JSObject::kHeaderSize + length_ * kPointerSize;
+  __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
+  __ cmp(r0, Operand(size >> kPointerSizeLog2));
+  __ b(ne, &slow_case);
+
+  // Allocate the JS object and copy header together with all in-object
+  // properties from the boilerplate.
+  __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
+  for (int i = 0; i < size; i += kPointerSize) {
+    __ ldr(r1, FieldMemOperand(r3, i));
+    __ str(r1, FieldMemOperand(r0, i));
+  }
+
+  // Return and remove the on-stack parameters.
+  __ add(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
+
+
 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
 // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
@@ -329,7 +480,7 @@
   __ b(gt, &not_special);
 
   // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
-  static const uint32_t exponent_word_for_1 =
+  const uint32_t exponent_word_for_1 =
       HeapNumber::kExponentBias << HeapNumber::kExponentShift;
   __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
   // 1, 0 and -1 all have 0 for the second word.
@@ -432,7 +583,9 @@
 
   Label is_smi, done;
 
-  __ JumpIfSmi(object, &is_smi);
+  // Smi-check
+  __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
+  // Heap number check
   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
 
   // Handle loading a double from a heap number.
@@ -454,7 +607,6 @@
   if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Convert smi to double using VFP instructions.
-    __ SmiUntag(scratch1, object);
     __ vmov(dst.high(), scratch1);
     __ vcvt_f64_s32(dst, dst.high());
     if (destination == kCoreRegisters) {
@@ -489,11 +641,10 @@
                            Heap::kHeapNumberMapRootIndex,
                            "HeapNumberMap register clobbered.");
   }
-  Label is_smi;
   Label done;
   Label not_in_int32_range;
 
-  __ JumpIfSmi(object, &is_smi);
+  __ UntagAndJumpIfSmi(dst, object, &done);
   __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
   __ cmp(scratch1, heap_number_map);
   __ b(ne, not_number);
@@ -513,10 +664,6 @@
                                  scratch1,
                                  scratch2,
                                  scratch3);
-  __ jmp(&done);
-
-  __ bind(&is_smi);
-  __ SmiUntag(dst, object);
   __ bind(&done);
 }
 
@@ -559,7 +706,7 @@
     // Get the absolute value of the object (as an unsigned integer).
     __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
 
-    // Get mantisssa[51:20].
+    // Get mantissa[51:20].
 
     // Get the position of the first set bit.
     __ CountLeadingZeros(dst1, int_scratch, scratch2);
@@ -689,10 +836,7 @@
 
   Label done;
 
-  // Untag the object into the destination register.
-  __ SmiUntag(dst, object);
-  // Just return if the object is a smi.
-  __ JumpIfSmi(object, &done);
+  __ UntagAndJumpIfSmi(dst, object, &done);
 
   if (FLAG_debug_code) {
     __ AbortIfNotRootValue(heap_number_map,
@@ -793,7 +937,7 @@
   // non zero bits left. So we need the (30 - exponent) last bits of the
   // 31 higher bits of the mantissa to be null.
   // Because bits [21:0] are null, we can check instead that the
-  // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+  // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
 
   // Get the 32 higher bits of the mantissa in dst.
   __ Ubfx(dst,
@@ -838,9 +982,11 @@
     __ vmov(d0, r0, r1);
     __ vmov(d1, r2, r3);
   }
-  // Call C routine that may not cause GC or other trouble.
-  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
-                   0, 2);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm);
+    __ CallCFunction(
+        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+  }
   // Store answer in the overwritable heap number. Double returned in
   // registers r0 and r1 or in d0.
   if (masm->use_eabi_hardfloat()) {
@@ -857,6 +1003,29 @@
 }
 
 
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+  // These variants are compiled ahead of time.  See next method.
+  if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
+    return true;
+  }
+  if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
+    return true;
+  }
+  // Other register combinations are generated as and when they are needed,
+  // so it is unsafe to call them from stubs (we can't generate a stub while
+  // we are generating a stub).
+  return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+  WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
+  WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
+  stub1.GetCode()->set_is_pregenerated(true);
+  stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
 // See comment for class.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
   Label max_negative_int;
@@ -1197,6 +1366,8 @@
       __ vmov(d0, r0, r1);
       __ vmov(d1, r2, r3);
     }
+
+    AllowExternalCallThatCantCauseGC scope(masm);
     __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
                      0, 2);
     __ pop(pc);  // Return.
@@ -1214,7 +1385,7 @@
     // If either operand is a JS object or an oddball value, then they are
     // not equal since their pointers are different.
     // There is no test for undetectability in strict equality.
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
     Label first_non_object;
     // Get the type of the first operand into r2 and compare it with
     // FIRST_SPEC_OBJECT_TYPE.
@@ -1606,6 +1777,8 @@
 // The stub expects its argument in the tos_ register and returns its result in
 // it, too: zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // This stub uses VFP3 instructions.
   CpuFeatures::Scope scope(VFP3);
 
@@ -1713,6 +1886,41 @@
 }
 
 
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ stm(db_w, sp, kCallerSaved | lr.bit());
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(VFP3);
+    __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+      DwVfpRegister reg = DwVfpRegister::from_code(i);
+      __ vstr(reg, MemOperand(sp, i * kDoubleSize));
+    }
+  }
+  const int argument_count = 1;
+  const int fp_argument_count = 0;
+  const Register scratch = r1;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+  __ mov(r0, Operand(ExternalReference::isolate_address()));
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(VFP3);
+    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+      DwVfpRegister reg = DwVfpRegister::from_code(i);
+      __ vldr(reg, MemOperand(sp, i * kDoubleSize));
+    }
+    __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+  }
+  __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0).
+}
+
+
 void UnaryOpStub::PrintName(StringStream* stream) {
   const char* op_name = Token::Name(op_);
   const char* overwrite_name = NULL;  // Make g++ happy.
@@ -1866,12 +2074,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(r0);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(r1, Operand(r0));
-    __ pop(r0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(r0);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(r1, Operand(r0));
+      __ pop(r0);
+    }
 
     __ bind(&heapnumber_allocated);
     __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@@ -1912,13 +2121,14 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(r0);  // Push the heap number, not the untagged int32.
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(r2, r0);  // Move the new heap number into r2.
-    // Get the heap number into r0, now that the new heap number is in r2.
-    __ pop(r0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(r0);  // Push the heap number, not the untagged int32.
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(r2, r0);  // Move the new heap number into r2.
+      // Get the heap number into r0, now that the new heap number is in r2.
+      __ pop(r0);
+    }
 
     // Convert the heap number in r0 to an untagged integer in r1.
     // This can't go slow-case because it's the same number we already
@@ -2028,6 +2238,10 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
+
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -2110,7 +2324,7 @@
       __ cmp(ip, Operand(scratch2));
       __ b(ne, &not_smi_result);
       // Go slow on zero result to handle -0.
-      __ tst(scratch1, Operand(scratch1));
+      __ cmp(scratch1, Operand(0));
       __ mov(right, Operand(scratch1), LeaveCC, ne);
       __ Ret(ne);
       // We need -0 if we were multiplying a negative number with 0 to get 0.
@@ -3082,10 +3296,12 @@
     // Check if cache matches: Double value is stored in uint32_t[2] array.
     __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
     __ cmp(r2, r4);
-    __ b(ne, &calculate);
-    __ cmp(r3, r5);
+    __ cmp(r3, r5, eq);
     __ b(ne, &calculate);
     // Cache hit. Load result, cleanup and return.
+    Counters* counters = masm->isolate()->counters();
+    __ IncrementCounter(
+        counters->transcendental_cache_hit(), 1, scratch0, scratch1);
     if (tagged) {
       // Pop input value from stack and load result into r0.
       __ pop();
@@ -3098,6 +3314,9 @@
   }  // if (CpuFeatures::IsSupported(VFP3))
 
   __ bind(&calculate);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(
+      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
   if (tagged) {
     __ bind(&invalid_cache);
     ExternalReference runtime_function =
@@ -3133,10 +3352,11 @@
     __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
     __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
-    __ EnterInternalFrame();
-    __ push(r0);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(r0);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -3149,14 +3369,15 @@
 
     // We return the value in d2 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Allocate an aligned object larger than a HeapNumber.
-    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
-    __ mov(scratch0, Operand(4 * kPointerSize));
-    __ push(scratch0);
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+      // Allocate an aligned object larger than a HeapNumber.
+      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+      __ mov(scratch0, Operand(4 * kPointerSize));
+      __ push(scratch0);
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 }
@@ -3173,6 +3394,7 @@
   } else {
     __ vmov(r0, r1, d2);
   }
+  AllowExternalCallThatCantCauseGC scope(masm);
   switch (type_) {
     case TranscendentalCache::SIN:
       __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@@ -3182,6 +3404,10 @@
       __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
           0, 1);
       break;
+    case TranscendentalCache::TAN:
+      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
+          0, 1);
+      break;
     case TranscendentalCache::LOG:
       __ CallCFunction(ExternalReference::math_log_double_function(isolate),
           0, 1);
@@ -3199,6 +3425,7 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -3212,105 +3439,207 @@
 }
 
 
+void InterruptStub::Generate(MacroAssembler* masm) {
+  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
 void MathPowStub::Generate(MacroAssembler* masm) {
-  Label call_runtime;
+  CpuFeatures::Scope vfp3_scope(VFP3);
+  const Register base = r1;
+  const Register exponent = r2;
+  const Register heapnumbermap = r5;
+  const Register heapnumber = r0;
+  const DoubleRegister double_base = d1;
+  const DoubleRegister double_exponent = d2;
+  const DoubleRegister double_result = d3;
+  const DoubleRegister double_scratch = d0;
+  const SwVfpRegister single_scratch = s0;
+  const Register scratch = r9;
+  const Register scratch2 = r7;
 
-  if (CpuFeatures::IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
-
-    Label base_not_smi;
-    Label exponent_not_smi;
-    Label convert_exponent;
-
-    const Register base = r0;
-    const Register exponent = r1;
-    const Register heapnumbermap = r5;
-    const Register heapnumber = r6;
-    const DoubleRegister double_base = d0;
-    const DoubleRegister double_exponent = d1;
-    const DoubleRegister double_result = d2;
-    const SwVfpRegister single_scratch = s0;
-    const Register scratch = r9;
-    const Register scratch2 = r7;
-
-    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+  Label call_runtime, done, int_exponent;
+  if (exponent_type_ == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack to double registers.
     __ ldr(base, MemOperand(sp, 1 * kPointerSize));
     __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
 
-    // Convert base to double value and store it in d0.
-    __ JumpIfNotSmi(base, &base_not_smi);
-    // Base is a Smi. Untag and convert it.
-    __ SmiUntag(base);
-    __ vmov(single_scratch, base);
-    __ vcvt_f64_s32(double_base, single_scratch);
-    __ b(&convert_exponent);
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
 
-    __ bind(&base_not_smi);
+    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
     __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
     __ cmp(scratch, heapnumbermap);
     __ b(ne, &call_runtime);
-    // Base is a heapnumber. Load it into double register.
+
     __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+    __ jmp(&unpack_exponent);
 
-    __ bind(&convert_exponent);
-    __ JumpIfNotSmi(exponent, &exponent_not_smi);
-    __ SmiUntag(exponent);
+    __ bind(&base_is_smi);
+    __ vmov(single_scratch, scratch);
+    __ vcvt_f64_s32(double_base, single_scratch);
+    __ bind(&unpack_exponent);
 
-    // The base is in a double register and the exponent is
-    // an untagged smi. Allocate a heap number and call a
-    // C function for integer exponents. The register containing
-    // the heap number is callee-saved.
-    __ AllocateHeapNumber(heapnumber,
-                          scratch,
-                          scratch2,
-                          heapnumbermap,
-                          &call_runtime);
-    __ push(lr);
-    __ PrepareCallCFunction(1, 1, scratch);
-    __ SetCallCDoubleArguments(double_base, exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(masm->isolate()),
-        1, 1);
-    __ pop(lr);
-    __ GetCFunctionDoubleResult(double_result);
-    __ vstr(double_result,
-            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    __ mov(r0, heapnumber);
-    __ Ret(2 * kPointerSize);
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
 
-    __ bind(&exponent_not_smi);
     __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
     __ cmp(scratch, heapnumbermap);
     __ b(ne, &call_runtime);
-    // Exponent is a heapnumber. Load it into double register.
     __ vldr(double_exponent,
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type_ == TAGGED) {
+    // Base is already in double_base.
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
 
-    // The base and the exponent are in double registers.
-    // Allocate a heap number and call a C function for
-    // double exponents. The register containing
-    // the heap number is callee-saved.
-    __ AllocateHeapNumber(heapnumber,
-                          scratch,
-                          scratch2,
-                          heapnumbermap,
-                          &call_runtime);
-    __ push(lr);
-    __ PrepareCallCFunction(0, 2, scratch);
-    __ SetCallCDoubleArguments(double_base, double_exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(masm->isolate()),
-        0, 2);
-    __ pop(lr);
-    __ GetCFunctionDoubleResult(double_result);
-    __ vstr(double_result,
-            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    __ mov(r0, heapnumber);
-    __ Ret(2 * kPointerSize);
+    __ vldr(double_exponent,
+            FieldMemOperand(exponent, HeapNumber::kValueOffset));
   }
 
-  __ bind(&call_runtime);
-  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+  if (exponent_type_ != INTEGER) {
+    Label int_exponent_convert;
+    // Detect integer exponents stored as double.
+    __ vcvt_u32_f64(single_scratch, double_exponent);
+    // We do not check for NaN or Infinity here because comparing numbers on
+    // ARM correctly distinguishes NaNs.  We end up calling the built-in.
+    __ vcvt_f64_u32(double_scratch, single_scratch);
+    __ VFPCompareAndSetFlags(double_scratch, double_exponent);
+    __ b(eq, &int_exponent_convert);
+
+    if (exponent_type_ == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label not_plus_half;
+
+      // Test for 0.5.
+      __ vmov(double_scratch, 0.5);
+      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
+      __ b(ne, &not_plus_half);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      __ vmov(double_scratch, -V8_INFINITY);
+      __ VFPCompareAndSetFlags(double_base, double_scratch);
+      __ vneg(double_result, double_scratch, eq);
+      __ b(eq, &done);
+
+      // Add +0 to convert -0 to +0.
+      __ vadd(double_scratch, double_base, kDoubleRegZero);
+      __ vsqrt(double_result, double_scratch);
+      __ jmp(&done);
+
+      __ bind(&not_plus_half);
+      __ vmov(double_scratch, -0.5);
+      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
+      __ b(ne, &call_runtime);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      __ vmov(double_scratch, -V8_INFINITY);
+      __ VFPCompareAndSetFlags(double_base, double_scratch);
+      __ vmov(double_result, kDoubleRegZero, eq);
+      __ b(eq, &done);
+
+      // Add +0 to convert -0 to +0.
+      __ vadd(double_scratch, double_base, kDoubleRegZero);
+      __ vmov(double_result, 1);
+      __ vsqrt(double_scratch, double_scratch);
+      __ vdiv(double_result, double_result, double_scratch);
+      __ jmp(&done);
+    }
+
+    __ push(lr);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ SetCallCDoubleArguments(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0, 2);
+    }
+    __ pop(lr);
+    __ GetCFunctionDoubleResult(double_result);
+    __ jmp(&done);
+
+    __ bind(&int_exponent_convert);
+    __ vcvt_u32_f64(single_scratch, double_exponent);
+    __ vmov(scratch, single_scratch);
+  }
+
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+
+  // Get two copies of exponent in the registers scratch and exponent.
+  if (exponent_type_ == INTEGER) {
+    __ mov(scratch, exponent);
+  } else {
+    // Exponent has previously been stored into scratch as untagged integer.
+    __ mov(exponent, scratch);
+  }
+  __ vmov(double_scratch, double_base);  // Back up base.
+  __ vmov(double_result, 1.0);
+
+  // Get absolute value of exponent.
+  __ cmp(scratch, Operand(0));
+  __ mov(scratch2, Operand(0), LeaveCC, mi);
+  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
+
+  Label while_true;
+  __ bind(&while_true);
+  __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
+  __ vmul(double_result, double_result, double_scratch, cs);
+  __ vmul(double_scratch, double_scratch, double_scratch, ne);
+  __ b(ne, &while_true);
+
+  __ cmp(exponent, Operand(0));
+  __ b(ge, &done);
+  __ vmov(double_scratch, 1.0);
+  __ vdiv(double_result, double_scratch, double_result);
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ VFPCompareAndSetFlags(double_result, 0.0);
+  __ b(ne, &done);
+  // double_exponent may not containe the exponent value if the input was a
+  // smi.  We set it with exponent value before bailing out.
+  __ vmov(single_scratch, exponent);
+  __ vcvt_f64_s32(double_exponent, single_scratch);
+
+  // Returning or bailing out.
+  Counters* counters = masm->isolate()->counters();
+  if (exponent_type_ == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in exponent.
+    __ bind(&done);
+    __ AllocateHeapNumber(
+        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
+    __ vstr(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    ASSERT(heapnumber.is(r0));
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ Ret(2);
+  } else {
+    __ push(lr);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ SetCallCDoubleArguments(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0, 2);
+    }
+    __ pop(lr);
+    __ GetCFunctionDoubleResult(double_result);
+
+    __ bind(&done);
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ Ret();
+  }
 }
 
 
@@ -3319,14 +3648,34 @@
 }
 
 
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
-  __ Throw(r0);
+bool CEntryStub::IsPregenerated() {
+  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+          result_size_ == 1;
 }
 
 
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
-                                          UncatchableExceptionType type) {
-  __ ThrowUncatchable(type, r0);
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  Handle<Code> code = save_doubles.GetCode();
+  code->set_is_pregenerated(true);
+  StoreBufferOverflowStub stub(kSaveFPRegs);
+  stub.GetCode()->set_is_pregenerated(true);
+  code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  Handle<Code> code = stub.GetCode();
+  code->set_is_pregenerated(true);
 }
 
 
@@ -3430,8 +3779,7 @@
   __ b(eq, throw_out_of_memory_exception);
 
   // Retrieve the pending exception and clear the variable.
-  __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
-  __ ldr(r3, MemOperand(ip));
+  __ mov(r3, Operand(isolate->factory()->the_hole_value()));
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ ldr(r0, MemOperand(ip));
@@ -3469,9 +3817,10 @@
   __ sub(r6, r6, Operand(kPointerSize));
 
   // Enter the exit frame that transitions from JavaScript to C++.
+  FrameScope scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(save_doubles_);
 
-  // Setup argc and the builtin function in callee-saved registers.
+  // Set up argc and the builtin function in callee-saved registers.
   __ mov(r4, Operand(r0));
   __ mov(r5, Operand(r1));
 
@@ -3510,13 +3859,27 @@
                true);
 
   __ bind(&throw_out_of_memory_exception);
-  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+  // Set external caught exception to false.
+  Isolate* isolate = masm->isolate();
+  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+                                    isolate);
+  __ mov(r0, Operand(false, RelocInfo::NONE));
+  __ mov(r2, Operand(external_caught));
+  __ str(r0, MemOperand(r2));
+
+  // Set pending exception and r0 to out of memory exception.
+  Failure* out_of_memory = Failure::OutOfMemoryException();
+  __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                       isolate)));
+  __ str(r0, MemOperand(r2));
+  // Fall through to the next label.
 
   __ bind(&throw_termination_exception);
-  GenerateThrowUncatchable(masm, TERMINATION);
+  __ ThrowUncatchable(r0);
 
   __ bind(&throw_normal_exception);
-  GenerateThrowTOS(masm);
+  __ Throw(r0);
 }
 
 
@@ -3527,7 +3890,7 @@
   // r3: argc
   // [sp+0]: argv
 
-  Label invoke, exit;
+  Label invoke, handler_entry, exit;
 
   // Called from C, so do not pop argc and args on exit (preserve sp)
   // No need to save register-passed args
@@ -3548,7 +3911,7 @@
   // r2: receiver
   // r3: argc
 
-  // Setup argv in r4.
+  // Set up argv in r4.
   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
   if (CpuFeatures::IsSupported(VFP3)) {
     offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
@@ -3571,7 +3934,7 @@
   __ ldr(r5, MemOperand(r5));
   __ Push(r8, r7, r6, r5);
 
-  // Setup frame pointer for the frame to be pushed.
+  // Set up frame pointer for the frame to be pushed.
   __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
 
   // If this is the outermost JS call, set js_entry_sp value.
@@ -3590,31 +3953,33 @@
   __ bind(&cont);
   __ push(ip);
 
-  // Call a faked try-block that does the invoke.
-  __ bl(&invoke);
-
-  // Caught exception: Store result (exception) in the pending
-  // exception field in the JSEnv and return a failure sentinel.
-  // Coming in here the fp will be invalid because the PushTryHandler below
-  // sets it to 0 to signal the existence of the JSEntry frame.
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ jmp(&invoke);
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.  Coming in here the
+  // fp will be invalid because the PushTryHandler below sets it to 0 to
+  // signal the existence of the JSEntry frame.
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ str(r0, MemOperand(ip));
   __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
   __ b(&exit);
 
-  // Invoke: Link this frame into the handler chain.
+  // Invoke: Link this frame into the handler chain.  There's only one
+  // handler block in this code object, so its index is 0.
   __ bind(&invoke);
   // Must preserve r0-r4, r5-r7 are available.
-  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
   // If an exception not caught by another handler occurs, this handler
   // returns control to the code after the bl(&invoke) above, which
   // restores all kCalleeSaved registers (including cp and fp) to their
   // saved values before returning a failure to C.
 
   // Clear any pending exceptions.
-  __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
-  __ ldr(r5, MemOperand(ip));
+  __ mov(r5, Operand(isolate->factory()->the_hole_value()));
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ str(r5, MemOperand(ip));
@@ -3708,7 +4073,7 @@
   const Register inline_site = r9;
   const Register scratch = r2;
 
-  const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
+  const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
 
   Label slow, loop, is_instance, is_not_instance, not_js_object;
 
@@ -3725,11 +4090,9 @@
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     Label miss;
-    __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
-    __ cmp(function, ip);
+    __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ b(ne, &miss);
-    __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
-    __ cmp(map, ip);
+    __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
     __ b(ne, &miss);
     __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
     __ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -3738,7 +4101,7 @@
   }
 
   // Get the prototype of the function.
-  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(prototype, &slow);
@@ -3759,7 +4122,8 @@
     __ sub(inline_site, lr, scratch);
     // Get the map location in scratch and patch it.
     __ GetRelocatedValueLocation(inline_site, scratch);
-    __ str(map, MemOperand(scratch));
+    __ ldr(scratch, MemOperand(scratch));
+    __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
   }
 
   // Register mapping: r3 is object map and r4 is function prototype.
@@ -3851,10 +4215,11 @@
     }
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
-    __ EnterInternalFrame();
-    __ Push(r0, r1);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r0, r1);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
     __ cmp(r0, Operand::Zero());
     __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
     __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@@ -3872,7 +4237,7 @@
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The displacement is the offset of the last parameter (if any)
   // relative to the frame pointer.
-  static const int kDisplacement =
+  const int kDisplacement =
       StandardFrameConstants::kCallerSPOffset - kPointerSize;
 
   // Check that the key is a smi.
@@ -4027,7 +4392,7 @@
     __ str(r3, FieldMemOperand(r0, i));
   }
 
-  // Setup the callee in-object property.
+  // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
   const int kCalleeOffset = JSObject::kHeaderSize +
@@ -4040,7 +4405,7 @@
       Heap::kArgumentsLengthIndex * kPointerSize;
   __ str(r2, FieldMemOperand(r0, kLengthOffset));
 
-  // Setup the elements pointer in the allocated arguments object.
+  // Set up the elements pointer in the allocated arguments object.
   // If we allocated a parameter map, r4 will point there, otherwise
   // it will point to the backing store.
   __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
@@ -4135,7 +4500,7 @@
   __ Ret();
 
   // Do the runtime call to allocate the arguments object.
-  // r2 = argument count (taggged)
+  // r2 = argument count (tagged)
   __ bind(&runtime);
   __ str(r2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
   __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -4208,7 +4573,7 @@
   // Get the parameters pointer from the stack.
   __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
 
-  // Setup the elements pointer in the allocated arguments object and
+  // Set up the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
   __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
   __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
@@ -4220,7 +4585,7 @@
 
   // Copy the fixed array slots.
   Label loop;
-  // Setup r4 to point to the first array slot.
+  // Set up r4 to point to the first array slot.
   __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ bind(&loop);
   // Pre-decrement r2 with kPointerSize on each iteration.
@@ -4250,10 +4615,6 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
-  if (!FLAG_regexp_entry_native) {
-    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-    return;
-  }
 
   // Stack frame on entry.
   //  sp[0]: last_match_info (expected JSArray)
@@ -4261,10 +4622,10 @@
   //  sp[8]: subject string
   //  sp[12]: JSRegExp object
 
-  static const int kLastMatchInfoOffset = 0 * kPointerSize;
-  static const int kPreviousIndexOffset = 1 * kPointerSize;
-  static const int kSubjectOffset = 2 * kPointerSize;
-  static const int kJSRegExpOffset = 3 * kPointerSize;
+  const int kLastMatchInfoOffset = 0 * kPointerSize;
+  const int kPreviousIndexOffset = 1 * kPointerSize;
+  const int kSubjectOffset = 2 * kPointerSize;
+  const int kJSRegExpOffset = 3 * kPointerSize;
 
   Label runtime, invoke_regexp;
 
@@ -4285,7 +4646,7 @@
       ExternalReference::address_of_regexp_stack_memory_size(isolate);
   __ mov(r0, Operand(address_of_regexp_stack_memory_size));
   __ ldr(r0, MemOperand(r0, 0));
-  __ tst(r0, Operand(r0));
+  __ cmp(r0, Operand(0));
   __ b(eq, &runtime);
 
   // Check that the first argument is a JSRegExp object.
@@ -4356,8 +4717,7 @@
   __ ldr(last_match_info_elements,
          FieldMemOperand(r0, JSArray::kElementsOffset));
   __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(r0, ip);
+  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
   __ b(ne, &runtime);
   // Check that the last match info has space for the capture registers and the
   // additional information.
@@ -4375,25 +4735,39 @@
   Label seq_string;
   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
-  // First check for flat string.
-  __ and_(r1, r0, Operand(kIsNotStringMask | kStringRepresentationMask), SetCC);
+  // First check for flat string.  None of the following string type tests will
+  // succeed if subject is not a string or a short external string.
+  __ and_(r1,
+          r0,
+          Operand(kIsNotStringMask |
+                  kStringRepresentationMask |
+                  kShortExternalStringMask),
+          SetCC);
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ b(eq, &seq_string);
 
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
+  // r1: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, check_encoding;
+  Label cons_string, external_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmp(r1, Operand(kExternalStringTag));
   __ b(lt, &cons_string);
-  __ b(eq, &runtime);
+  __ b(eq, &external_string);
+
+  // Catch non-string subject or short external string.
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+  __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
+  __ b(ne, &runtime);
 
   // String is sliced.
   __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
@@ -4404,8 +4778,7 @@
   // String is a cons string, check whether it is flat.
   __ bind(&cons_string);
   __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
-  __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
-  __ cmp(r0, r1);
+  __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
   __ b(ne, &runtime);
   __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   // Is first part of cons or parent of slice a flat string?
@@ -4414,7 +4787,8 @@
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r0, Operand(kStringRepresentationMask));
-  __ b(ne, &runtime);
+  __ b(ne, &external_string);
+
   __ bind(&seq_string);
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
@@ -4450,8 +4824,8 @@
   __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
 
   // Isolates: note we add an additional parameter here (isolate pointer).
-  static const int kRegExpExecuteArguments = 8;
-  static const int kParameterRegisters = 4;
+  const int kRegExpExecuteArguments = 8;
+  const int kParameterRegisters = 4;
   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
 
   // Stack pointer now points to cell where return address is to be written.
@@ -4480,8 +4854,7 @@
 
   // For arguments 4 and 3 get string length, calculate start of string data and
   // calculate the shift of the index (0 for ASCII and 1 for two byte).
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   __ eor(r3, r3, Operand(1));
   // Load the length from the original subject string from the previous stack
   // frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4532,8 +4905,7 @@
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
-  __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
-  __ ldr(r1, MemOperand(r1, 0));
+  __ mov(r1, Operand(isolate->factory()->the_hole_value()));
   __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ ldr(r0, MemOperand(r2, 0));
@@ -4548,10 +4920,10 @@
   Label termination_exception;
   __ b(eq, &termination_exception);
 
-  __ Throw(r0);  // Expects thrown value in r0.
+  __ Throw(r0);
 
   __ bind(&termination_exception);
-  __ ThrowUncatchable(TERMINATION, r0);  // Expects thrown value in r0.
+  __ ThrowUncatchable(r0);
 
   __ bind(&failure);
   // For failure and exception return null.
@@ -4575,16 +4947,25 @@
   __ str(r2, FieldMemOperand(last_match_info_elements,
                              RegExpImpl::kLastCaptureCountOffset));
   // Store last subject and last input.
-  __ mov(r3, last_match_info_elements);  // Moved up to reduce latency.
   __ str(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastSubjectOffset));
-  __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
+  __ mov(r2, subject);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastSubjectOffset,
+                      r2,
+                      r7,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs);
   __ str(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastInputOffset));
-  __ mov(r3, last_match_info_elements);
-  __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastInputOffset,
+                      subject,
+                      r7,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -4615,6 +4996,26 @@
   __ add(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
+  // External string.  Short external strings have already been ruled out.
+  // r0: scratch
+  __ bind(&external_string);
+  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ tst(r0, Operand(kIsIndirectStringMask));
+    __ Assert(eq, "external string expected, but not found");
+  }
+  __ ldr(subject,
+         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ sub(subject,
+         subject,
+         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ jmp(&seq_string);
+
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -4670,11 +5071,11 @@
 
   // Set input, index and length fields from arguments.
   __ ldr(r1, MemOperand(sp, kPointerSize * 0));
+  __ ldr(r2, MemOperand(sp, kPointerSize * 1));
+  __ ldr(r6, MemOperand(sp, kPointerSize * 2));
   __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
-  __ ldr(r1, MemOperand(sp, kPointerSize * 1));
-  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
-  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
-  __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
+  __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
+  __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
 
   // Fill out the elements FixedArray.
   // r0: JSArray, tagged.
@@ -4696,9 +5097,9 @@
   // r3: Start of elements in FixedArray.
   // r5: Number of elements to fill.
   Label loop;
-  __ tst(r5, Operand(r5));
+  __ cmp(r5, Operand(0));
   __ bind(&loop);
-  __ b(le, &done);  // Jump if r1 is negative or zero.
+  __ b(le, &done);  // Jump if r5 is negative or zero.
   __ sub(r5, r5, Operand(1), SetCC);
   __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
   __ jmp(&loop);
@@ -4712,7 +5113,48 @@
 }
 
 
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+  // Cache the called function in a global property cell.  Cache states
+  // are uninitialized, monomorphic (indicated by a JSFunction), and
+  // megamorphic.
+  // r1 : the function to call
+  // r2 : cache cell for call target
+  Label done;
+
+  ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+            masm->isolate()->heap()->undefined_value());
+  ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+            masm->isolate()->heap()->the_hole_value());
+
+  // Load the cache state into r3.
+  __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+
+  // A monomorphic cache hit or an already megamorphic state: invoke the
+  // function without changing the state.
+  __ cmp(r3, r1);
+  __ b(eq, &done);
+  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+  __ b(eq, &done);
+
+  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+  // megamorphic.
+  __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+  // MegamorphicSentinel is an immortal immovable object (undefined) so no
+  // write-barrier is needed.
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
+  __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
+
+  // An uninitialized cache is patched with the function.
+  __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
+  // No need for a write barrier here - cells are rescanned.
+
+  __ bind(&done);
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
+  // r1 : the function to call
+  // r2 : cache cell for call target
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
@@ -4727,16 +5169,12 @@
     __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
     __ b(ne, &call);
     // Patch the receiver on the stack with the global receiver object.
-    __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
-    __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+    __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+    __ str(r2, MemOperand(sp, argc_ * kPointerSize));
     __ bind(&call);
   }
 
-  // Get the function to call from the stack.
-  // function, receiver [, arguments]
-  __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
   // Check that the function is really a JavaScript function.
   // r1: pushed function (to be verified)
   __ JumpIfSmi(r1, &non_function);
@@ -4774,7 +5212,7 @@
   __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
   __ mov(r2, Operand(0, RelocInfo::NONE));
   __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
-  __ SetCallKind(r5, CALL_AS_FUNCTION);
+  __ SetCallKind(r5, CALL_AS_METHOD);
   {
     Handle<Code> adaptor =
       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -4785,7 +5223,7 @@
   // of the original receiver from the call site).
   __ bind(&non_function);
   __ str(r1, MemOperand(sp, argc_ * kPointerSize));
-  __ mov(r0, Operand(argc_));  // Setup the number of arguments.
+  __ mov(r0, Operand(argc_));  // Set up the number of arguments.
   __ mov(r2, Operand(0, RelocInfo::NONE));
   __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
   __ SetCallKind(r5, CALL_AS_METHOD);
@@ -4794,6 +5232,48 @@
 }
 
 
+void CallConstructStub::Generate(MacroAssembler* masm) {
+  // r0 : number of arguments
+  // r1 : the function to call
+  // r2 : cache cell for call target
+  Label slow, non_function_call;
+
+  // Check that the function is not a smi.
+  __ JumpIfSmi(r1, &non_function_call);
+  // Check that the function is a JSFunction.
+  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+  __ b(ne, &slow);
+
+  if (RecordCallTarget()) {
+    GenerateRecordCallTarget(masm);
+  }
+
+  // Jump to the function-specific construct stub.
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
+  __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // r0: number of arguments
+  // r1: called object
+  // r3: object type
+  Label do_call;
+  __ bind(&slow);
+  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ b(ne, &non_function_call);
+  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
+  __ bind(&non_function_call);
+  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
+  // Set expected number of arguments to zero (not changing r0).
+  __ mov(r2, Operand(0, RelocInfo::NONE));
+  __ SetCallKind(r5, CALL_AS_METHOD);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
 // Unfortunately you have to run without snapshots to see most of these
 // names in the profile since most compare stubs end up in the snapshot.
 void CompareStub::PrintName(StringStream* stream) {
@@ -4855,100 +5335,41 @@
 
   // If the index is non-smi trigger the non-smi case.
   __ JumpIfNotSmi(index_, &index_not_smi_);
-
-  // Put smi-tagged index into scratch register.
-  __ mov(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
   __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
-  __ cmp(ip, Operand(scratch_));
+  __ cmp(ip, Operand(index_));
   __ b(ls, index_out_of_range_);
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(result_, Operand(kStringRepresentationMask));
-  __ b(eq, &flat_string);
+  __ mov(index_, Operand(index_, ASR, kSmiTagSize));
 
-  // Handle non-flat strings.
-  __ and_(result_, result_, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ cmp(result_, Operand(kExternalStringTag));
-  __ b(gt, &sliced_string);
-  __ b(eq, &call_runtime_);
+  StringCharLoadGenerator::Generate(masm,
+                                    object_,
+                                    index_,
+                                    result_,
+                                    &call_runtime_);
 
-  // ConsString.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  Label assure_seq_string;
-  __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
-  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
-  __ cmp(result_, Operand(ip));
-  __ b(ne, &call_runtime_);
-  // Get the first of the two strings and load its instance type.
-  __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
-  __ jmp(&assure_seq_string);
-
-  // SlicedString, unpack and add offset.
-  __ bind(&sliced_string);
-  __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
-  __ add(scratch_, scratch_, result_);
-  __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
-
-  // Assure that we are dealing with a sequential string. Go to runtime if not.
-  __ bind(&assure_seq_string);
-  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
-  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
-  // Check that parent is not an external string. Go to runtime otherwise.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(result_, Operand(kStringRepresentationMask));
-  __ b(ne, &call_runtime_);
-
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ tst(result_, Operand(kStringEncodingMask));
-  __ b(ne, &ascii_string);
-
-  // 2-byte string.
-  // Load the 2-byte character code into the result register. We can
-  // add without shifting since the smi tag size is the log2 of the
-  // number of bytes in a two-byte character.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
-  __ add(scratch_, object_, Operand(scratch_));
-  __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
-  __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
-
-  __ bind(&got_char_code);
   __ mov(result_, Operand(result_, LSL, kSmiTagSize));
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
   __ bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
   __ CheckMap(index_,
-              scratch_,
+              result_,
               Heap::kHeapNumberMapRootIndex,
               index_not_number_,
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
-  __ Push(object_, index_);
+  __ push(object_);
   __ push(index_);  // Consumed by runtime conversion function.
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4959,15 +5380,14 @@
   }
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
-  __ Move(scratch_, r0);
-  __ pop(index_);
+  __ Move(index_, r0);
   __ pop(object_);
   // Reload the instance type.
   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  __ JumpIfNotSmi(index_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -4976,6 +5396,7 @@
   // is too complex (e.g., when the string needs to be flattened).
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
+  __ mov(index_, Operand(index_, LSL, kSmiTagSize));
   __ Push(object_, index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   __ Move(result_, r0);
@@ -5004,15 +5425,15 @@
   STATIC_ASSERT(kSmiTag == 0);
   __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(result_, Operand(ip));
+  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
   __ b(eq, &slow_case_);
   __ bind(&exit_);
 }
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5037,7 +5458,8 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
@@ -5292,7 +5714,7 @@
   // scratch: -
 
   // Perform a number of probes in the symbol table.
-  static const int kProbes = 4;
+  const int kProbes = 4;
   Label found_in_symbol_table;
   Label next_probe[kProbes];
   Register candidate = scratch5;  // Scratch register contains candidate.
@@ -5321,11 +5743,11 @@
 
     __ cmp(undefined, candidate);
     __ b(eq, not_found);
-    // Must be null (deleted entry).
+    // Must be the hole (deleted entry).
     if (FLAG_debug_code) {
-      __ LoadRoot(ip, Heap::kNullValueRootIndex);
+      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
       __ cmp(ip, candidate);
-      __ Assert(eq, "oddball in symbol table is not undefined or null");
+      __ Assert(eq, "oddball in symbol table is not undefined or the hole");
     }
     __ jmp(&next_probe[i]);
 
@@ -5417,41 +5839,28 @@
   //  0 <= from <= to <= string.length.
   // If any of these assumptions fail, we call the runtime system.
 
-  static const int kToOffset = 0 * kPointerSize;
-  static const int kFromOffset = 1 * kPointerSize;
-  static const int kStringOffset = 2 * kPointerSize;
+  const int kToOffset = 0 * kPointerSize;
+  const int kFromOffset = 1 * kPointerSize;
+  const int kStringOffset = 2 * kPointerSize;
 
-  // Check bounds and smi-ness.
-  Register to = r6;
-  Register from = r7;
-
-  __ Ldrd(to, from, MemOperand(sp, kToOffset));
+  __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
   STATIC_ASSERT(kFromOffset == kToOffset + 4);
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
 
   // I.e., arithmetic shift right by one un-smi-tags.
-  __ mov(r2, Operand(to, ASR, 1), SetCC);
-  __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
+  __ mov(r2, Operand(r2, ASR, 1), SetCC);
+  __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
   // If either to or from had the smi tag bit set, then carry is set now.
   __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
-  __ b(mi, &runtime);  // From is negative.
-
-  // Both to and from are smis.
-  __ sub(r2, r2, Operand(r3), SetCC);
+  // We want to bailout to runtime here if From is negative.  In that case, the
+  // next instruction is not executed and we fall through to bailing out to
+  // runtime.  pl is the opposite of mi.
+  // Both r2 and r3 are untagged integers.
+  __ sub(r2, r2, Operand(r3), SetCC, pl);
   __ b(mi, &runtime);  // Fail if from > to.
-  // Special handling of sub-strings of length 1 and 2. One character strings
-  // are handled in the runtime system (looked up in the single character
-  // cache). Two character strings are looked for in the symbol cache in
-  // generated code.
-  __ cmp(r2, Operand(2));
-  __ b(lt, &runtime);
 
-  // r2: result string length
-  // r3: from index (untagged smi)
-  // r6 (a.k.a. to): to (smi)
-  // r7 (a.k.a. from): from offset (smi)
-  // Make sure first argument is a sequential (or flat) string.
+  // Make sure first argument is a string.
   __ ldr(r0, MemOperand(sp, kStringOffset));
   STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfSmi(r0, &runtime);
@@ -5466,67 +5875,15 @@
   __ cmp(r2, Operand(r4, ASR, 1));
   __ b(eq, &return_r0);
 
-  Label create_slice;
-  if (FLAG_string_slices) {
-    __ cmp(r2, Operand(SlicedString::kMinLength));
-    __ b(ge, &create_slice);
-  }
-
-  // r0: original string
-  // r1: instance type
-  // r2: result string length
-  // r3: from index (untagged smi)
-  // r6 (a.k.a. to): to (smi)
-  // r7 (a.k.a. from): from offset (smi)
-  Label seq_string;
-  __ and_(r4, r1, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
-  __ cmp(r4, Operand(kConsStringTag));
-  __ b(gt, &runtime);  // Slices and external strings go to runtime.
-  __ b(lt, &seq_string);  // Sequential strings are handled directly.
-
-  // Cons string. Try to recurse (once) on the first substring.
-  // (This adds a little more generality than necessary to handle flattened
-  // cons strings, but not much).
-  __ ldr(r0, FieldMemOperand(r0, ConsString::kFirstOffset));
-  __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
-  __ tst(r1, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ b(ne, &runtime);  // Cons, slices and external strings go to runtime.
-
-  // Definitly a sequential string.
-  __ bind(&seq_string);
-
-  // r0: original string
-  // r1: instance type
-  // r2: result string length
-  // r3: from index (untagged smi)
-  // r6 (a.k.a. to): to (smi)
-  // r7 (a.k.a. from): from offset (smi)
-  __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
-  __ cmp(r4, Operand(to));
-  __ b(lt, &runtime);  // Fail if to > length.
-  to = no_reg;
-
-  // r0: original string or left hand side of the original cons string.
-  // r1: instance type
-  // r2: result string length
-  // r3: from index (untagged smi)
-  // r7 (a.k.a. from): from offset (smi)
-  // Check for flat ASCII string.
-  Label non_ascii_flat;
-  __ tst(r1, Operand(kStringEncodingMask));
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ b(eq, &non_ascii_flat);
-
   Label result_longer_than_two;
+  // Check for special case of two character ASCII string, in which case
+  // we do a lookup in the symbol table first.
   __ cmp(r2, Operand(2));
   __ b(gt, &result_longer_than_two);
+  __ b(lt, &runtime);
 
-  // Sub string of length 2 requested.
+  __ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
+
   // Get the two characters forming the sub string.
   __ add(r0, r0, Operand(r3));
   __ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
@@ -5536,7 +5893,6 @@
   Label make_two_character_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
       masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
-  Counters* counters = masm->isolate()->counters();
   __ jmp(&return_r0);
 
   // r2: result string length.
@@ -5547,18 +5903,114 @@
   __ jmp(&return_r0);
 
   __ bind(&result_longer_than_two);
+  // Deal with different string types: update the index if necessary
+  // and put the underlying string into r5.
+  // r0: original string
+  // r1: instance type
+  // r2: length
+  // r3: from index (untagged)
+  Label underlying_unpacked, sliced_string, seq_or_external_string;
+  // If the string is not indirect, it can only be sequential or external.
+  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+  STATIC_ASSERT(kIsIndirectStringMask != 0);
+  __ tst(r1, Operand(kIsIndirectStringMask));
+  __ b(eq, &seq_or_external_string);
 
-  // Locate 'from' character of string.
-  __ add(r5, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ add(r5, r5, Operand(from, ASR, 1));
+  __ tst(r1, Operand(kSlicedNotConsMask));
+  __ b(ne, &sliced_string);
+  // Cons string.  Check whether it is flat, then fetch first part.
+  __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
+  __ CompareRoot(r5, Heap::kEmptyStringRootIndex);
+  __ b(ne, &runtime);
+  __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
+  // Update instance type.
+  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+  __ jmp(&underlying_unpacked);
 
-  // Allocate the result.
-  __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
+  __ bind(&sliced_string);
+  // Sliced string.  Fetch parent and correct start index by offset.
+  __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+  __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+  __ add(r3, r3, Operand(r4, ASR, 1));  // Add offset to index.
+  // Update instance type.
+  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+  __ jmp(&underlying_unpacked);
 
-  // r0: result string
-  // r2: result string length
-  // r5: first character of substring to copy
-  // r7 (a.k.a. from): from offset (smi)
+  __ bind(&seq_or_external_string);
+  // Sequential or external string.  Just move string to the expected register.
+  __ mov(r5, r0);
+
+  __ bind(&underlying_unpacked);
+
+  if (FLAG_string_slices) {
+    Label copy_routine;
+    // r5: underlying subject string
+    // r1: instance type of underlying subject string
+    // r2: length
+    // r3: adjusted start index (untagged)
+    __ cmp(r2, Operand(SlicedString::kMinLength));
+    // Short slice.  Copy instead of slicing.
+    __ b(lt, &copy_routine);
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ tst(r1, Operand(kStringEncodingMask));
+    __ b(eq, &two_byte_slice);
+    __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
+    __ jmp(&set_slice_header);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
+    __ bind(&set_slice_header);
+    __ mov(r3, Operand(r3, LSL, 1));
+    __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+    __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+    __ jmp(&return_r0);
+
+    __ bind(&copy_routine);
+  }
+
+  // r5: underlying subject string
+  // r1: instance type of underlying subject string
+  // r2: length
+  // r3: adjusted start index (untagged)
+  Label two_byte_sequential, sequential_string, allocate_result;
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ tst(r1, Operand(kExternalStringTag));
+  __ b(eq, &sequential_string);
+
+  // Handle external string.
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ tst(r1, Operand(kShortExternalStringTag));
+  __ b(ne, &runtime);
+  __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
+  // r5 already points to the first character of underlying string.
+  __ jmp(&allocate_result);
+
+  __ bind(&sequential_string);
+  // Locate first character of underlying subject string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+  __ bind(&allocate_result);
+  // Sequential acii string.  Allocate the result.
+  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+  __ tst(r1, Operand(kStringEncodingMask));
+  __ b(eq, &two_byte_sequential);
+
+  // Allocate and copy the resulting ASCII string.
+  __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
+
+  // Locate first character of substring to copy.
+  __ add(r5, r5, r3);
   // Locate first character of result.
   __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
 
@@ -5571,30 +6023,16 @@
                                            COPY_ASCII | DEST_ALWAYS_ALIGNED);
   __ jmp(&return_r0);
 
-  __ bind(&non_ascii_flat);
-  // r0: original string
-  // r2: result string length
-  // r7 (a.k.a. from): from offset (smi)
-  // Check for flat two byte string.
+  // Allocate and copy the resulting two-byte string.
+  __ bind(&two_byte_sequential);
+  __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
 
-  // Locate 'from' character of string.
-  __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // As "from" is a smi it is 2 times the value which matches the size of a two
-  // byte character.
+  // Locate first character of substring to copy.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ add(r5, r5, Operand(from));
-
-  // Allocate the result.
-  __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
-
-  // r0: result string
-  // r2: result string length
-  // r5: first character of substring to copy
+  __ add(r5, r5, Operand(r3, LSL, 1));
   // Locate first character of result.
   __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
 
-  from = no_reg;
-
   // r0: result string.
   // r1: first character of result.
   // r2: result length.
@@ -5602,72 +6040,9 @@
   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   StringHelper::GenerateCopyCharactersLong(
       masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
-  __ jmp(&return_r0);
-
-  if (FLAG_string_slices) {
-    __ bind(&create_slice);
-    // r0: original string
-    // r1: instance type
-    // r2: length
-    // r3: from index (untagged smi)
-    // r6 (a.k.a. to): to (smi)
-    // r7 (a.k.a. from): from offset (smi)
-    Label allocate_slice, sliced_string, seq_string;
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ tst(r1, Operand(kStringRepresentationMask));
-    __ b(eq, &seq_string);
-    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-    STATIC_ASSERT(kIsIndirectStringMask != 0);
-    __ tst(r1, Operand(kIsIndirectStringMask));
-    // External string.  Jump to runtime.
-    __ b(eq, &runtime);
-
-    __ tst(r1, Operand(kSlicedNotConsMask));
-    __ b(ne, &sliced_string);
-    // Cons string.  Check whether it is flat, then fetch first part.
-    __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
-    __ LoadRoot(r9, Heap::kEmptyStringRootIndex);
-    __ cmp(r5, r9);
-    __ b(ne, &runtime);
-    __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
-    __ jmp(&allocate_slice);
-
-    __ bind(&sliced_string);
-    // Sliced string.  Fetch parent and correct start index by offset.
-    __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
-    __ add(r7, r7, r5);
-    __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
-    __ jmp(&allocate_slice);
-
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
-    __ mov(r5, r0);
-
-    __ bind(&allocate_slice);
-    // r1: instance type of original string
-    // r2: length
-    // r5: underlying subject string
-    // r7 (a.k.a. from): from offset (smi)
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ tst(r1, Operand(kStringEncodingMask));
-    __ b(eq, &two_byte_slice);
-    __ AllocateAsciiSlicedString(r0, r2, r3, r4, &runtime);
-    __ jmp(&set_slice_header);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(r0, r2, r3, r4, &runtime);
-    __ bind(&set_slice_header);
-    __ str(r7, FieldMemOperand(r0, SlicedString::kOffsetOffset));
-    __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
-  }
 
   __ bind(&return_r0);
+  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Ret();
@@ -5700,7 +6075,7 @@
   Label compare_chars;
   __ bind(&check_zero_length);
   STATIC_ASSERT(kSmiTag == 0);
-  __ tst(length, Operand(length));
+  __ cmp(length, Operand(0));
   __ b(ne, &compare_chars);
   __ mov(r0, Operand(Smi::FromInt(EQUAL)));
   __ Ret();
@@ -5733,7 +6108,7 @@
   __ mov(scratch1, scratch2, LeaveCC, gt);
   Register min_length = scratch1;
   STATIC_ASSERT(kSmiTag == 0);
-  __ tst(min_length, Operand(min_length));
+  __ cmp(min_length, Operand(0));
   __ b(eq, &compare_lengths);
 
   // Compare loop.
@@ -5824,7 +6199,7 @@
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  Label string_add_runtime, call_builtin;
+  Label call_runtime, call_builtin;
   Builtins::JavaScript builtin_id = Builtins::ADD;
 
   Counters* counters = masm->isolate()->counters();
@@ -5839,7 +6214,7 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (flags_ == NO_STRING_ADD_FLAGS) {
-    __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
+    __ JumpIfEitherSmi(r0, r1, &call_runtime);
     // Load instance types.
     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
     __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -5849,7 +6224,7 @@
     // If either is not a string, go to runtime.
     __ tst(r4, Operand(kIsNotStringMask));
     __ tst(r5, Operand(kIsNotStringMask), eq);
-    __ b(ne, &string_add_runtime);
+    __ b(ne, &call_runtime);
   } else {
     // Here at least one of the arguments is definitely a string.
     // We convert the one that is not known to be a string.
@@ -5918,7 +6293,7 @@
     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   }
   __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
-                                                  &string_add_runtime);
+                                                  &call_runtime);
 
   // Get the two characters forming the sub string.
   __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
@@ -5940,7 +6315,7 @@
   // halfword store instruction (which assumes that processor is
   // in a little endian mode)
   __ mov(r6, Operand(2));
-  __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
+  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
   __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
@@ -5948,14 +6323,14 @@
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ cmp(r6, Operand(String::kMinNonFlatLength));
+  __ cmp(r6, Operand(ConsString::kMinLength));
   __ b(lt, &string_add_flat_result);
   // Handle exceptionally long strings in the runtime system.
   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   ASSERT(IsPowerOf2(String::kMaxLength + 1));
   // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
   __ cmp(r6, Operand(String::kMaxLength + 1));
-  __ b(hs, &string_add_runtime);
+  __ b(hs, &call_runtime);
 
   // If result is not supposed to be flat, allocate a cons string object.
   // If both strings are ASCII the result is an ASCII cons string.
@@ -5973,7 +6348,7 @@
 
   // Allocate an ASCII cons string.
   __ bind(&ascii_data);
-  __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
+  __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
   __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
@@ -5998,11 +6373,13 @@
   __ b(eq, &ascii_data);
 
   // Allocate a two byte cons string.
-  __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
+  __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
   __ jmp(&allocated);
 
-  // Handle creating a flat result. First check that both strings are
-  // sequential and that they have the same encoding.
+  // We cannot encounter sliced strings or cons strings here since:
+  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
+  // Handle creating a flat result from either external or sequential strings.
+  // Locate the first characters' locations.
   // r0: first string
   // r1: second string
   // r2: length of first string
@@ -6010,6 +6387,7 @@
   // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   // r6: sum of lengths.
+  Label first_prepared, second_prepared;
   __ bind(&string_add_flat_result);
   if (flags_ != NO_STRING_ADD_FLAGS) {
     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -6017,97 +6395,88 @@
     __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   }
-  // Check that both strings are sequential.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(r4, Operand(kStringRepresentationMask));
-  __ tst(r5, Operand(kStringRepresentationMask), eq);
-  __ b(ne, &string_add_runtime);
-  // Now check if both strings have the same encoding (ASCII/Two-byte).
-  // r0: first string.
-  // r1: second string.
-  // r2: length of first string.
-  // r3: length of second string.
-  // r6: sum of lengths..
-  Label non_ascii_string_add_flat_result;
-  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
+
+  // Check whether both strings have same encoding
   __ eor(r7, r4, Operand(r5));
   __ tst(r7, Operand(kStringEncodingMask));
-  __ b(ne, &string_add_runtime);
-  // And see if it's ASCII or two-byte.
-  __ tst(r4, Operand(kStringEncodingMask));
+  __ b(ne, &call_runtime);
+
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ tst(r4, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ add(r7,
+         r0,
+         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+         LeaveCC,
+         eq);
+  __ b(eq, &first_prepared);
+  // External string: rule out short external string and load string resource.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ tst(r4, Operand(kShortExternalStringMask));
+  __ b(ne, &call_runtime);
+  __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
+  __ bind(&first_prepared);
+
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ tst(r5, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ add(r1,
+         r1,
+         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+         LeaveCC,
+         eq);
+  __ b(eq, &second_prepared);
+  // External string: rule out short external string and load string resource.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ tst(r5, Operand(kShortExternalStringMask));
+  __ b(ne, &call_runtime);
+  __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
+  __ bind(&second_prepared);
+
+  Label non_ascii_string_add_flat_result;
+  // r7: first character of first string
+  // r1: first character of second string
+  // r2: length of first string.
+  // r3: length of second string.
+  // r6: sum of lengths.
+  // Both strings have the same encoding.
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ tst(r5, Operand(kStringEncodingMask));
   __ b(eq, &non_ascii_string_add_flat_result);
 
-  // Both strings are sequential ASCII strings. We also know that they are
-  // short (since the sum of the lengths is less than kMinNonFlatLength).
-  // r6: length of resulting flat string
-  __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
-  // Locate first character of result.
-  __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // Locate first character of first argument.
-  __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // r0: first character of first string.
-  // r1: second string.
+  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
+  __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // r0: result string.
+  // r7: first character of first string.
+  // r1: first character of second string.
   // r2: length of first string.
   // r3: length of second string.
   // r6: first character of result.
-  // r7: result string.
-  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
-
-  // Load second argument and locate first character.
-  __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // r1: first character of second string.
-  // r3: length of second string.
+  StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
   // r6: next character of result.
-  // r7: result string.
   StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
-  __ mov(r0, Operand(r7));
   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
   __ Ret();
 
   __ bind(&non_ascii_string_add_flat_result);
-  // Both strings are sequential two byte strings.
-  // r0: first string.
-  // r1: second string.
-  // r2: length of first string.
-  // r3: length of second string.
-  // r6: sum of length of strings.
-  __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
-  // r0: first string.
-  // r1: second string.
-  // r2: length of first string.
-  // r3: length of second string.
-  // r7: result string.
-
-  // Locate first character of result.
-  __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // Locate first character of first argument.
-  __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  // r0: first character of first string.
-  // r1: second string.
+  __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
+  __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // r0: result string.
+  // r7: first character of first string.
+  // r1: first character of second string.
   // r2: length of first string.
   // r3: length of second string.
   // r6: first character of result.
-  // r7: result string.
-  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
-
-  // Locate first character of second argument.
-  __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  // r1: first character of second string.
-  // r3: length of second string.
-  // r6: next character of result (after copy of first string).
-  // r7: result string.
+  StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
+  // r6: next character of result.
   StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
-
-  __ mov(r0, Operand(r7));
   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
   __ Ret();
 
   // Just jump to runtime to add the two strings.
-  __ bind(&string_add_runtime);
+  __ bind(&call_runtime);
   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 
   if (call_builtin.is_linked()) {
@@ -6191,15 +6560,15 @@
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
   Label generic_stub;
-  Label unordered;
+  Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss;
   __ and_(r2, r1, Operand(r0));
   __ JumpIfSmi(r2, &generic_stub);
 
   __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
-  __ b(ne, &miss);
+  __ b(ne, &maybe_undefined1);
   __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
-  __ b(ne, &miss);
+  __ b(ne, &maybe_undefined2);
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or VFP3 is unsupported.
@@ -6223,14 +6592,28 @@
     __ mov(r0, Operand(LESS), LeaveCC, lt);
     __ mov(r0, Operand(GREATER), LeaveCC, gt);
     __ Ret();
-
-    __ bind(&unordered);
   }
 
+  __ bind(&unordered);
   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
   __ bind(&generic_stub);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
+  __ bind(&maybe_undefined1);
+  if (Token::IsOrderedRelationalCompareOp(op_)) {
+    __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+    __ b(ne, &miss);
+    __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+    __ b(ne, &maybe_undefined2);
+    __ jmp(&unordered);
+  }
+
+  __ bind(&maybe_undefined2);
+  if (Token::IsOrderedRelationalCompareOp(op_)) {
+    __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
+    __ b(eq, &unordered);
+  }
+
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -6278,6 +6661,8 @@
   ASSERT(state_ == CompareIC::STRINGS);
   Label miss;
 
+  bool equality = Token::IsEqualityOp(op_);
+
   // Registers containing left and right operands respectively.
   Register left = r1;
   Register right = r0;
@@ -6311,28 +6696,39 @@
 
   // Check that both strings are symbols. If they are, we're done
   // because we already know they are not identical.
-  ASSERT(GetCondition() == eq);
-  STATIC_ASSERT(kSymbolTag != 0);
-  __ and_(tmp3, tmp1, Operand(tmp2));
-  __ tst(tmp3, Operand(kIsSymbolMask));
-  // Make sure r0 is non-zero. At this point input operands are
-  // guaranteed to be non-zero.
-  ASSERT(right.is(r0));
-  __ Ret(ne);
+  if (equality) {
+    ASSERT(GetCondition() == eq);
+    STATIC_ASSERT(kSymbolTag != 0);
+    __ and_(tmp3, tmp1, Operand(tmp2));
+    __ tst(tmp3, Operand(kIsSymbolMask));
+    // Make sure r0 is non-zero. At this point input operands are
+    // guaranteed to be non-zero.
+    ASSERT(right.is(r0));
+    __ Ret(ne);
+  }
 
   // Check that both strings are sequential ASCII.
   Label runtime;
-  __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
-                                                  &runtime);
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
+      tmp1, tmp2, tmp3, tmp4, &runtime);
 
   // Compare flat ASCII strings. Returns when done.
-  StringCompareStub::GenerateFlatAsciiStringEquals(
-      masm, left, right, tmp1, tmp2, tmp3);
+  if (equality) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(
+        masm, left, right, tmp1, tmp2, tmp3);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(
+        masm, left, right, tmp1, tmp2, tmp3, tmp4);
+  }
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
   __ Push(left, right);
-  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+  if (equality) {
+    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+  } else {
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+  }
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -6359,25 +6755,47 @@
 }
 
 
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  __ Push(r1, r0);
-  __ push(lr);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+  Label miss;
+  __ and_(r2, r1, Operand(r0));
+  __ JumpIfSmi(r2, &miss);
+  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r2, Operand(known_map_));
+  __ b(ne, &miss);
+  __ cmp(r3, Operand(known_map_));
+  __ b(ne, &miss);
 
-  // Call the runtime system in a fresh internal frame.
-  ExternalReference miss =
-      ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-  __ EnterInternalFrame();
-  __ Push(r1, r0);
-  __ mov(ip, Operand(Smi::FromInt(op_)));
-  __ push(ip);
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
-  // Compute the entry point of the rewritten stub.
-  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  // Restore registers.
-  __ pop(lr);
-  __ pop(r0);
-  __ pop(r1);
+  __ sub(r0, r0, Operand(r1));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+  {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss =
+        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1, r0);
+    __ push(lr);
+    __ Push(r1, r0);
+    __ mov(ip, Operand(Smi::FromInt(op_)));
+    __ push(ip);
+    __ CallExternalReference(miss, 3);
+    // Compute the entry point of the rewritten stub.
+    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Restore registers.
+    __ pop(lr);
+    __ pop(r0);
+    __ pop(r1);
+  }
+
   __ Jump(r2);
 }
 
@@ -6410,19 +6828,18 @@
 }
 
 
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register receiver,
-    Register properties,
-    String* name,
-    Register scratch0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register receiver,
+                                                        Register properties,
+                                                        Handle<String> name,
+                                                        Register scratch0) {
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
   // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
+  // (their names are the hole value).
   for (int i = 0; i < kInlinedProbes; i++) {
     // scratch0 points to properties hash.
     // Compute the masked index: (hash + i + i * i) & mask.
@@ -6450,10 +6867,17 @@
     __ b(eq, done);
 
     if (i != kInlinedProbes - 1) {
+      // Load the hole ready for use below:
+      __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
+
       // Stop if found the property.
       __ cmp(entity_name, Operand(Handle<String>(name)));
       __ b(eq, miss);
 
+      Label the_hole;
+      __ cmp(entity_name, tmp);
+      __ b(eq, &the_hole);
+
       // Check if the entry name is not a symbol.
       __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
       __ ldrb(entity_name,
@@ -6461,6 +6885,8 @@
       __ tst(entity_name, Operand(kIsSymbolMask));
       __ b(eq, miss);
 
+      __ bind(&the_hole);
+
       // Restore the properties.
       __ ldr(properties,
              FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -6475,14 +6901,12 @@
   __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   __ mov(r1, Operand(Handle<String>(name)));
   StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
-  __ tst(r0, Operand(r0));
+  __ CallStub(&stub);
+  __ cmp(r0, Operand(0));
   __ ldm(ia_w, sp, spill_mask);
 
   __ b(eq, done);
   __ b(ne, miss);
-  return result;
 }
 
 
@@ -6497,6 +6921,11 @@
                                                         Register name,
                                                         Register scratch1,
                                                         Register scratch2) {
+  ASSERT(!elements.is(scratch1));
+  ASSERT(!elements.is(scratch2));
+  ASSERT(!name.is(scratch1));
+  ASSERT(!name.is(scratch2));
+
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -6540,11 +6969,17 @@
       ~(scratch1.bit() | scratch2.bit());
 
   __ stm(db_w, sp, spill_mask);
-  __ Move(r0, elements);
-  __ Move(r1, name);
+  if (name.is(r0)) {
+    ASSERT(!elements.is(r1));
+    __ Move(r1, name);
+    __ Move(r0, elements);
+  } else {
+    __ Move(r0, elements);
+    __ Move(r1, name);
+  }
   StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
   __ CallStub(&stub);
-  __ tst(r0, Operand(r0));
+  __ cmp(r0, Operand(0));
   __ mov(scratch2, Operand(r2));
   __ ldm(ia_w, sp, spill_mask);
 
@@ -6554,6 +6989,8 @@
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Registers:
   //  result: StringDictionary to probe
   //  r1: key
@@ -6643,6 +7080,337 @@
 }
 
 
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
+  { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+  // Also used in KeyedStoreIC::GenerateGeneric.
+  { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal.
+  { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
+  { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
+  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
+  { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
+  { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
+  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+  // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+  // and ElementsTransitionGenerator::GenerateDoubleToObject
+  { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
+  { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
+  // ElementsTransitionGenerator::GenerateDoubleToObject
+  { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
+  { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
+  // StoreArrayLiteralElementStub::Generate
+  { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
+  // Null termination.
+  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
+};
+
+#undef REG
+
+bool RecordWriteStub::IsPregenerated() {
+  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two instructions are generated with labels so as to get the
+  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
+  // forth between a compare instructions (a nop in this position) and the
+  // real branch when we start and stop incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+  __ b(&skip_to_incremental_noncompacting);
+  __ b(&skip_to_incremental_compacting);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  }
+  __ Ret();
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+  PatchBranchIntoNop(masm, 0);
+  PatchBranchIntoNop(masm, Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     ne,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  Register address =
+      r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+  ASSERT(!address.is(regs_.object()));
+  ASSERT(!address.is(r0));
+  __ Move(address, regs_.address());
+  __ Move(r0, regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ Move(r1, address);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ ldr(r1, MemOperand(address, 0));
+  }
+  __ mov(r2, Operand(ExternalReference::isolate_address()));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_scratch;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     eq,
+                     &ensure_not_white);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     eq,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need extra registers for this, so we push the object and the address
+  // register temporarily.
+  __ Push(regs_.object(), regs_.address());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    regs_.address(),  // Scratch.
+                    &need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : element value to store
+  //  -- r1    : array literal
+  //  -- r2    : map of array literal
+  //  -- r3    : element index as smi
+  //  -- r4    : array literal index in function as smi
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label fast_elements;
+
+  __ CheckFastElements(r2, r5, &double_elements);
+  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+  __ JumpIfSmi(r0, &smi_element);
+  __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+  __ bind(&slow_elements);
+  // call.
+  __ Push(r1, r3, r0);
+  __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
+  __ Push(r5, r4);
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ str(r0, MemOperand(r6, 0));
+  // Update the write barrier for the array store.
+  __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret();
+
+  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+  // FAST_ELEMENTS, and value is Smi.
+  __ bind(&smi_element);
+  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
+  __ Ret();
+
+  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
+                                 &slow_elements);
+  __ Ret();
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index cdea03e..38ed476 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -58,6 +58,25 @@
 };
 
 
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -117,7 +136,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -216,7 +235,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -387,6 +406,9 @@
         the_heap_number_(the_heap_number),
         scratch_(scratch) { }
 
+  bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+
  private:
   Register the_int_;
   Register the_heap_number_;
@@ -435,6 +457,218 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+    masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
+    ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
+  }
+
+  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+    masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
+    ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
+  }
+
+  static Mode GetMode(Code* stub) {
+    Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+                                                   Assembler::kInstrSize);
+
+    if (Assembler::IsBranch(first_instruction)) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(Assembler::IsTstImmediate(first_instruction));
+
+    if (Assembler::IsBranch(second_instruction)) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(Assembler::IsTstImmediate(second_instruction));
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    MacroAssembler masm(NULL,
+                        stub->instruction_start(),
+                        stub->instruction_size());
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        PatchBranchIntoNop(&masm, 0);
+        PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, 0);
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers.  The input is
+  // two registers that must be preserved and one scratch register provided by
+  // the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      // We don't have to save scratch0_ because it was given to us as
+      // a scratch register.
+      masm->push(scratch1_);
+    }
+
+    void Restore(MacroAssembler* masm) {
+      masm->pop(scratch1_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The scratch registers
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(VFP3);
+        masm->sub(sp,
+                  sp,
+                  Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+        // Save all VFP registers except d0.
+        for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+          DwVfpRegister reg = DwVfpRegister::from_code(i);
+          masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+        }
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(VFP3);
+        // Restore all VFP registers except d0.
+        for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+          DwVfpRegister reg = DwVfpRegister::from_code(i);
+          masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+        }
+        masm->add(sp,
+                  sp,
+                  Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+      }
+      masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+
+    Register GetRegThatIsNotOneOf(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 4> {};
+  class ValueBits: public BitField<int, 4, 4> {};
+  class AddressBits: public BitField<int, 8, 4> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  Label slow_;
+  RegisterAllocation regs_;
+};
+
+
 // Enter C code from generated RegExp code in a way that allows
 // the C code to fix the return address in case of a GC.
 // Currently only needed on ARM.
@@ -622,14 +856,13 @@
 
   void Generate(MacroAssembler* masm);
 
-  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register receiver,
-      Register properties,
-      String* name,
-      Register scratch0);
+  static void GenerateNegativeLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register receiver,
+                                     Register properties,
+                                     Handle<String> name,
+                                     Register scratch0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -639,6 +872,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -651,7 +886,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return LookupModeBits::encode(mode_);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index bf748a9..befd8f2 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,23 +30,409 @@
 #if defined(V8_TARGET_ARCH_ARM)
 
 #include "codegen.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
+
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
+  switch (type) {
+    case TranscendentalCache::SIN: return &sin;
+    case TranscendentalCache::COS: return &cos;
+    case TranscendentalCache::TAN: return &tan;
+    case TranscendentalCache::LOG: return &log;
+    default: UNIMPLEMENTED();
+  }
+  return NULL;
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+  return &sqrt;
+}
+
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
 
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : target map, scratch for subsequent call
+  //  -- r4    : scratch (elements)
+  // -----------------------------------
+  // Set transitioned map.
+  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ RecordWriteField(r2,
+                      HeapObject::kMapOffset,
+                      r3,
+                      r9,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : target map, scratch for subsequent call
+  //  -- r4    : scratch (elements)
+  // -----------------------------------
+  Label loop, entry, convert_hole, gc_required, only_change_map, done;
+  bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+  __ b(eq, &only_change_map);
+
+  __ push(lr);
+  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
+  // r4: source FixedArray
+  // r5: number of elements (smi-tagged)
+
+  // Allocate new FixedDoubleArray.
+  __ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
+  __ add(lr, lr, Operand(r5, LSL, 2));
+  __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+  // r6: destination FixedDoubleArray, not tagged as heap object
+  // Set destination FixedDoubleArray's length and map.
+  __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
+  __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+  // Update receiver's map.
+
+  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ RecordWriteField(r2,
+                      HeapObject::kMapOffset,
+                      r3,
+                      r9,
+                      kLRHasBeenSaved,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created FixedDoubleArray.
+  __ add(r3, r6, Operand(kHeapObjectTag));
+  __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ RecordWriteField(r2,
+                      JSObject::kElementsOffset,
+                      r3,
+                      r9,
+                      kLRHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Prepare for conversion loop.
+  __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
+  __ add(r6, r7, Operand(r5, LSL, 2));
+  __ mov(r4, Operand(kHoleNanLower32));
+  __ mov(r5, Operand(kHoleNanUpper32));
+  // r3: begin of source FixedArray element fields, not tagged
+  // r4: kHoleNanLower32
+  // r5: kHoleNanUpper32
+  // r6: end of destination FixedDoubleArray, not tagged
+  // r7: begin of FixedDoubleArray element fields, not tagged
+  if (!vfp3_supported) __ Push(r1, r0);
+
+  __ b(&entry);
+
+  __ bind(&only_change_map);
+  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ RecordWriteField(r2,
+                      HeapObject::kMapOffset,
+                      r3,
+                      r9,
+                      kLRHasBeenSaved,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ b(&done);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ pop(lr);
+  __ b(fail);
+
+  // Convert and copy elements.
+  __ bind(&loop);
+  __ ldr(r9, MemOperand(r3, 4, PostIndex));
+  // r9: current element
+  __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
+
+  // Normal smi, convert to double and store.
+  if (vfp3_supported) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(s0, r9);
+    __ vcvt_f64_s32(d0, s0);
+    __ vstr(d0, r7, 0);
+    __ add(r7, r7, Operand(8));
+  } else {
+    FloatingPointHelper::ConvertIntToDouble(masm,
+                                            r9,
+                                            FloatingPointHelper::kCoreRegisters,
+                                            d0,
+                                            r0,
+                                            r1,
+                                            lr,
+                                            s0);
+    __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
+  }
+  __ b(&entry);
+
+  // Hole found, store the-hole NaN.
+  __ bind(&convert_hole);
+  if (FLAG_debug_code) {
+    // Restore a "smi-untagged" heap object.
+    __ SmiTag(r9);
+    __ orr(r9, r9, Operand(1));
+    __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+    __ Assert(eq, "object found in smi-only array");
+  }
+  __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
+
+  __ bind(&entry);
+  __ cmp(r7, r6);
+  __ b(lt, &loop);
+
+  if (!vfp3_supported) __ Pop(r1, r0);
+  __ pop(lr);
+  __ bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : target map, scratch for subsequent call
+  //  -- r4    : scratch (elements)
+  // -----------------------------------
+  Label entry, loop, convert_hole, gc_required, only_change_map;
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+  __ b(eq, &only_change_map);
+
+  __ push(lr);
+  __ Push(r3, r2, r1, r0);
+  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
+  // r4: source FixedDoubleArray
+  // r5: number of elements (smi-tagged)
+
+  // Allocate new FixedArray.
+  __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
+  __ add(r0, r0, Operand(r5, LSL, 1));
+  __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+  // r6: destination FixedArray, not tagged as heap object
+  // Set destination FixedDoubleArray's length and map.
+  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
+  __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+
+  // Prepare for conversion loop.
+  __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+  __ add(r3, r6, Operand(FixedArray::kHeaderSize));
+  __ add(r6, r6, Operand(kHeapObjectTag));
+  __ add(r5, r3, Operand(r5, LSL, 1));
+  __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
+  // Using offsetted addresses in r4 to fully take advantage of post-indexing.
+  // r3: begin of destination FixedArray element fields, not tagged
+  // r4: begin of source FixedDoubleArray element fields, not tagged, +4
+  // r5: end of destination FixedArray, not tagged
+  // r6: destination FixedArray
+  // r7: the-hole pointer
+  // r9: heap number map
+  __ b(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ Pop(r3, r2, r1, r0);
+  __ pop(lr);
+  __ b(fail);
+
+  __ bind(&loop);
+  __ ldr(r1, MemOperand(r4, 8, PostIndex));
+  // lr: current element's upper 32 bit
+  // r4: address of next element's upper 32 bit
+  __ cmp(r1, Operand(kHoleNanUpper32));
+  __ b(eq, &convert_hole);
+
+  // Non-hole double, copy value into a heap number.
+  __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
+  // r2: new heap number
+  __ ldr(r0, MemOperand(r4, 12, NegOffset));
+  __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
+  __ mov(r0, r3);
+  __ str(r2, MemOperand(r3, 4, PostIndex));
+  __ RecordWrite(r6,
+                 r0,
+                 r2,
+                 kLRHasBeenSaved,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ b(&entry);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ str(r7, MemOperand(r3, 4, PostIndex));
+
+  __ bind(&entry);
+  __ cmp(r3, r5);
+  __ b(lt, &loop);
+
+  __ Pop(r3, r2, r1, r0);
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ RecordWriteField(r2,
+                      JSObject::kElementsOffset,
+                      r6,
+                      r9,
+                      kLRHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(lr);
+
+  __ bind(&only_change_map);
+  // Update receiver's map.
+  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ RecordWriteField(r2,
+                      HeapObject::kMapOffset,
+                      r3,
+                      r9,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ tst(result, Operand(kIsIndirectStringMask));
+  __ b(eq, &check_sequential);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ tst(result, Operand(kSlicedNotConsMask));
+  __ b(eq, &cons_string);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ add(index, index, Operand(result, ASR, kSmiTagSize));
+  __ jmp(&indirect_string_loaded);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ CompareRoot(result, Heap::kEmptyStringRootIndex);
+  __ b(ne, call_runtime);
+  // Get the first of the two strings and load its instance type.
+  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label external_string, check_encoding;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ tst(result, Operand(kStringRepresentationMask));
+  __ b(ne, &external_string);
+
+  // Prepare sequential strings
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ add(string,
+         string,
+         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ jmp(&check_encoding);
+
+  // Handle external strings.
+  __ bind(&external_string);
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ tst(result, Operand(kIsIndirectStringMask));
+    __ Assert(eq, "external string expected, but not found");
+  }
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ tst(result, Operand(kShortExternalStringMask));
+  __ b(ne, call_runtime);
+  __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+  Label ascii, done;
+  __ bind(&check_encoding);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ tst(result, Operand(kStringEncodingMask));
+  __ b(ne, &ascii);
+  // Two-byte string.
+  __ ldrh(result, MemOperand(string, index, LSL, 1));
+  __ jmp(&done);
+  __ bind(&ascii);
+  // Ascii string.
+  __ ldrb(result, MemOperand(string, index));
+  __ bind(&done);
+}
+
+#undef __
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index d27982a..c340e6b 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -29,7 +29,6 @@
 #define V8_ARM_CODEGEN_ARM_H_
 
 #include "ast.h"
-#include "code-stubs-arm.h"
 #include "ic-inl.h"
 
 namespace v8 {
@@ -69,21 +68,26 @@
                               int pos,
                               bool right_here = false);
 
-  // Constants related to patching of inlined load/store.
-  static int GetInlinedKeyedLoadInstructionsAfterPatch() {
-    return FLAG_debug_code ? 32 : 13;
-  }
-  static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
-  static int GetInlinedNamedStoreInstructionsAfterPatch() {
-    ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
-    return Isolate::Current()->inlined_write_barrier_size() + 4;
-  }
-
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
 
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 823c6ff..e767001 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -87,22 +87,21 @@
 namespace internal {
 
 // Constant pool marker.
-static const int kConstantPoolMarkerMask = 0xffe00000;
-static const int kConstantPoolMarker = 0x0c000000;
-static const int kConstantPoolLengthMask = 0x001ffff;
+const int kConstantPoolMarkerMask = 0xffe00000;
+const int kConstantPoolMarker = 0x0c000000;
+const int kConstantPoolLengthMask = 0x001ffff;
 
 // Number of registers in normal ARM mode.
-static const int kNumRegisters = 16;
+const int kNumRegisters = 16;
 
 // VFP support.
-static const int kNumVFPSingleRegisters = 32;
-static const int kNumVFPDoubleRegisters = 16;
-static const int kNumVFPRegisters =
-    kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
+const int kNumVFPSingleRegisters = 32;
+const int kNumVFPDoubleRegisters = 16;
+const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
 
 // PC is register 15.
-static const int kPCRegister = 15;
-static const int kNoRegister = -1;
+const int kPCRegister = 15;
+const int kNoRegister = -1;
 
 // -----------------------------------------------------------------------------
 // Conditions.
@@ -371,9 +370,9 @@
   // stop
   kStopCode = 1 << 23
 };
-static const uint32_t kStopCodeMask = kStopCode - 1;
-static const uint32_t kMaxStopCode = kStopCode - 1;
-static const int32_t  kDefaultStopCode = -1;
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+const int32_t  kDefaultStopCode = -1;
 
 
 // Type of VFP register. Determines register encoding.
@@ -391,17 +390,17 @@
 
 // This mask does not include the "inexact" or "input denormal" cumulative
 // exceptions flags, because we usually don't want to check for it.
-static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
-static const uint32_t kVFPOverflowExceptionBit = 1 << 2;
-static const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
-static const uint32_t kVFPInexactExceptionBit = 1 << 4;
-static const uint32_t kVFPFlushToZeroMask = 1 << 24;
+const uint32_t kVFPExceptionMask = 0xf;
+const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
+const uint32_t kVFPOverflowExceptionBit = 1 << 2;
+const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
+const uint32_t kVFPInexactExceptionBit = 1 << 4;
+const uint32_t kVFPFlushToZeroMask = 1 << 24;
 
-static const uint32_t kVFPNConditionFlagBit = 1 << 31;
-static const uint32_t kVFPZConditionFlagBit = 1 << 30;
-static const uint32_t kVFPCConditionFlagBit = 1 << 29;
-static const uint32_t kVFPVConditionFlagBit = 1 << 28;
+const uint32_t kVFPNConditionFlagBit = 1 << 31;
+const uint32_t kVFPZConditionFlagBit = 1 << 30;
+const uint32_t kVFPCConditionFlagBit = 1 << 29;
+const uint32_t kVFPVConditionFlagBit = 1 << 28;
 
 
 // VFP rounding modes. See ARM DDI 0406B Page A2-29.
@@ -418,7 +417,7 @@
   kRoundToZero = RZ
 };
 
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
+const uint32_t kVFPRoundingModeMask = 3 << 22;
 
 enum CheckForInexactConversion {
   kCheckForInexactConversion,
@@ -574,13 +573,13 @@
   // The naming of these accessor corresponds to figure A3-1.
   //
   // Two kind of accessors are declared:
-  // - <Name>Field() will return the raw field, ie the field's bits at their
+  // - <Name>Field() will return the raw field, i.e. the field's bits at their
   //   original place in the instruction encoding.
-  //   eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
-  //   ConditionField(instr) will return 0xC0000000.
+  //   e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+  //   0xC0810002 ConditionField(instr) will return 0xC0000000.
   // - <Name>Value() will return the field value, shifted back to bit 0.
-  //   eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
-  //   ConditionField(instr) will return 0xC.
+  //   e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+  //   0xC0810002 ConditionField(instr) will return 0xC.
 
 
   // Generally applicable fields
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 51cfeb6..7b08ed8 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -41,7 +41,7 @@
 namespace v8 {
 namespace internal {
 
-void CPU::Setup() {
+void CPU::SetUp() {
   CpuFeatures::Probe();
 }
 
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 07a2272..96139a2 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -132,55 +132,57 @@
 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
                                           RegList object_regs,
                                           RegList non_object_regs) {
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as a smi causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  if ((object_regs | non_object_regs) != 0) {
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        if (FLAG_debug_code) {
-          __ tst(reg, Operand(0xc0000000));
-          __ Assert(eq, "Unable to encode value as smi");
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    if ((object_regs | non_object_regs) != 0) {
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          if (FLAG_debug_code) {
+            __ tst(reg, Operand(0xc0000000));
+            __ Assert(eq, "Unable to encode value as smi");
+          }
+          __ mov(reg, Operand(reg, LSL, kSmiTagSize));
         }
-        __ mov(reg, Operand(reg, LSL, kSmiTagSize));
       }
+      __ stm(db_w, sp, object_regs | non_object_regs);
     }
-    __ stm(db_w, sp, object_regs | non_object_regs);
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ mov(r0, Operand(0, RelocInfo::NONE));  // no arguments
-  __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
+    __ mov(r0, Operand(0, RelocInfo::NONE));  // no arguments
+    __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values from the expression stack.
-  if ((object_regs | non_object_regs) != 0) {
-    __ ldm(ia_w, sp, object_regs | non_object_regs);
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        __ mov(reg, Operand(reg, LSR, kSmiTagSize));
-      }
-      if (FLAG_debug_code &&
-          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
-        __ mov(reg, Operand(kDebugZapValue));
+    // Restore the register values from the expression stack.
+    if ((object_regs | non_object_regs) != 0) {
+      __ ldm(ia_w, sp, object_regs | non_object_regs);
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+        }
+        if (FLAG_debug_code &&
+            (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+          __ mov(reg, Operand(kDebugZapValue));
+        }
       }
     }
-  }
 
-  __ LeaveInternalFrame();
+    // Leave the internal frame.
+  }
 
   // Now that the break point has been handled, resume normal execution by
   // jumping to the target address intended by the caller and that was
@@ -249,14 +251,6 @@
 }
 
 
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
-  // Calling convention for construct call (from builtins-arm.cc)
-  //  -- r0     : number of arguments (not smi)
-  //  -- r1     : constructor function
-  Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
-}
-
-
 void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
   // In places other than IC call sites it is expected that r0 is TOS which
   // is an object - this is not generally the case so this should be used with
@@ -265,11 +259,43 @@
 }
 
 
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+  // Register state for CallFunctionStub (from code-stubs-arm.cc).
   // ----------- S t a t e -------------
-  //  No registers used on entry.
+  //  -- r1 : function
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, 0, 0);
+  Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
+}
+
+
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+  // Register state for CallFunctionStub (from code-stubs-arm.cc).
+  // ----------- S t a t e -------------
+  //  -- r1 : function
+  //  -- r2 : cache cell for call target
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+  // Calling convention for CallConstructStub (from code-stubs-arm.cc)
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments (not smi)
+  //  -- r1     : constructor function
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+  // Calling convention for CallConstructStub (from code-stubs-arm.cc)
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments (not smi)
+  //  -- r1     : constructor function
+  //  -- r2     : cache cell for call target
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
 }
 
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index d4f251f..7b2a3c4 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -79,18 +79,24 @@
     ASSERT(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
     ASSERT(call_address + patch_size() <= code->instruction_end());
-
 #ifdef DEBUG
     prev_call_address = call_address;
 #endif
   }
 
+  Isolate* isolate = code->GetIsolate();
+
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
-  DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+  DeoptimizerData* data = isolate->deoptimizer_data();
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -102,7 +108,12 @@
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+static const int32_t kBranchBeforeStackCheck = 0x2a000001;
+static const int32_t kBranchBeforeInterrupt =  0x5a000004;
+
+
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   const int kInstrSize = Assembler::kInstrSize;
@@ -111,10 +122,16 @@
   //  2a 00 00 01       bcs ok
   //  e5 9f c? ??       ldr ip, [pc, <stack guard address>]
   //  e1 2f ff 3c       blx ip
-  ASSERT(Memory::int32_at(pc_after - kInstrSize) ==
-      (al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code()));
+  ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
   ASSERT(Assembler::IsLdrPcImmediateOffset(
       Assembler::instr_at(pc_after - 2 * kInstrSize)));
+  if (FLAG_count_based_interrupts) {
+    ASSERT_EQ(kBranchBeforeInterrupt,
+              Memory::int32_at(pc_after - 3 * kInstrSize));
+  } else {
+    ASSERT_EQ(kBranchBeforeStackCheck,
+              Memory::int32_at(pc_after - 3 * kInstrSize));
+  }
 
   // We patch the code to the following form:
   //  e1 5d 00 0c       cmp sp, <limit>
@@ -137,20 +154,32 @@
          reinterpret_cast<uint32_t>(check_code->entry()));
   Memory::uint32_at(stack_check_address_pointer) =
       reinterpret_cast<uint32_t>(replacement_code->entry());
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+                                         Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
   const int kInstrSize = Assembler::kInstrSize;
-  ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c);
-  ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5);
-  ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f);
+  ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
+  ASSERT(Assembler::IsLdrPcImmediateOffset(
+      Assembler::instr_at(pc_after - 2 * kInstrSize)));
 
   // Replace NOP with conditional jump.
   CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
-  patcher.masm()->b(+4, cs);
+  if (FLAG_count_based_interrupts) {
+    patcher.masm()->b(+16, pl);
+    ASSERT_EQ(kBranchBeforeInterrupt,
+              Memory::int32_at(pc_after - 3 * kInstrSize));
+  } else {
+    patcher.masm()->b(+4, cs);
+    ASSERT_EQ(kBranchBeforeStackCheck,
+              Memory::int32_at(pc_after - 3 * kInstrSize));
+  }
 
   // Replace the stack check address in the constant pool
   // with the entry address of the replacement code.
@@ -161,6 +190,9 @@
          reinterpret_cast<uint32_t>(replacement_code->entry()));
   Memory::uint32_at(stack_check_address_pointer) =
       reinterpret_cast<uint32_t>(check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_after - 2 * kInstrSize, check_code);
 }
 
 
@@ -197,12 +229,13 @@
   ASSERT(Translation::BEGIN == opcode);
   USE(opcode);
   int count = iterator.Next();
+  iterator.Skip(1);  // Drop JS frame count.
   ASSERT(count == 1);
   USE(count);
 
   opcode = static_cast<Translation::Opcode>(iterator.Next());
   USE(opcode);
-  ASSERT(Translation::FRAME == opcode);
+  ASSERT(Translation::JS_FRAME == opcode);
   unsigned node_id = iterator.Next();
   USE(node_id);
   ASSERT(node_id == ast_id);
@@ -238,9 +271,7 @@
   output_ = new FrameDescription*[1];
   output_[0] = new(output_frame_size) FrameDescription(
       output_frame_size, function_);
-#ifdef DEBUG
-  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
 
   // Clear the incoming parameters in the optimized frame to avoid
   // confusing the garbage collector.
@@ -305,7 +336,7 @@
     output_[0] = input_;
     output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
   } else {
-    // Setup the frame pointer and the context pointer.
+    // Set up the frame pointer and the context pointer.
     output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
     output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
 
@@ -328,15 +359,220 @@
 }
 
 
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+                                                 int frame_index) {
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating arguments adaptor => height=%d\n", height_in_bytes);
+  }
+
+  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+  // Arguments adaptor can not be topmost or bottommost.
+  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address of the frame is computed from the previous
+  // frame's top and this frame's size.
+  uint32_t top_address;
+  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = height;
+  unsigned output_offset = output_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+
+  // Read caller's PC from the previous frame.
+  output_offset -= kPointerSize;
+  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+  output_frame->SetFrameSlot(output_offset, callers_pc);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, callers_pc);
+  }
+
+  // Read caller's FP from the previous frame, and set this frame's FP.
+  output_offset -= kPointerSize;
+  intptr_t value = output_[frame_index - 1]->GetFp();
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  output_frame->SetFp(fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // A marker value is used in place of the context.
+  output_offset -= kPointerSize;
+  intptr_t context = reinterpret_cast<intptr_t>(
+      Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  output_frame->SetFrameSlot(output_offset, context);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
+           top_address + output_offset, output_offset, context);
+  }
+
+  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(function);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Number of incoming arguments.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+           top_address + output_offset, output_offset, value, height - 1);
+  }
+
+  ASSERT(0 == output_offset);
+
+  Builtins* builtins = isolate_->builtins();
+  Code* adaptor_trampoline =
+      builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+  uint32_t pc = reinterpret_cast<uint32_t>(
+      adaptor_trampoline->instruction_start() +
+      isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+  output_frame->SetPc(pc);
+}
+
+
+void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
+                                              int frame_index) {
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating construct stub => height=%d\n", height_in_bytes);
+  }
+
+  unsigned fixed_frame_size = 7 * kPointerSize;
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+  output_frame->SetFrameType(StackFrame::CONSTRUCT);
+
+  // Construct stub can not be topmost or bottommost.
+  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address of the frame is computed from the previous
+  // frame's top and this frame's size.
+  uint32_t top_address;
+  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = height;
+  unsigned output_offset = output_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+
+  // Read caller's PC from the previous frame.
+  output_offset -= kPointerSize;
+  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+  output_frame->SetFrameSlot(output_offset, callers_pc);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, callers_pc);
+  }
+
+  // Read caller's FP from the previous frame, and set this frame's FP.
+  output_offset -= kPointerSize;
+  intptr_t value = output_[frame_index - 1]->GetFp();
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  output_frame->SetFp(fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // The context can be gotten from the previous frame.
+  output_offset -= kPointerSize;
+  value = output_[frame_index - 1]->GetContext();
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // A marker value is used in place of the function.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Number of incoming arguments.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+           top_address + output_offset, output_offset, value, height - 1);
+  }
+
+  // Constructor function being invoked by the stub.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(function);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The newly allocated object was passed as receiver in the artificial
+  // constructor stub environment created by HEnvironment::CopyForInlining().
+  output_offset -= kPointerSize;
+  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  ASSERT(0 == output_offset);
+
+  Builtins* builtins = isolate_->builtins();
+  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+  uint32_t pc = reinterpret_cast<uint32_t>(
+      construct_stub->instruction_start() +
+      isolate_->heap()->construct_stub_deopt_pc_offset()->value());
+  output_frame->SetPc(pc);
+}
+
+
 // This code is very similar to ia32 code, but relies on register names (fp, sp)
 // and how the frame is laid out.
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
-                                 int frame_index) {
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+                                   int frame_index) {
   // Read the ast node id, function, and frame height for this output frame.
-  Translation::Opcode opcode =
-      static_cast<Translation::Opcode>(iterator->Next());
-  USE(opcode);
-  ASSERT(Translation::FRAME == opcode);
   int node_id = iterator->Next();
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
@@ -356,9 +592,7 @@
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, function);
-#ifdef DEBUG
-  output_frame->SetKind(Code::FUNCTION);
-#endif
+  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
 
   bool is_bottommost = (0 == frame_index);
   bool is_topmost = (output_count_ - 1 == frame_index);
@@ -446,9 +680,8 @@
     value = reinterpret_cast<intptr_t>(function->context());
   }
   output_frame->SetFrameSlot(output_offset, value);
-  if (is_topmost) {
-    output_frame->SetRegister(cp.code(), value);
-  }
+  output_frame->SetContext(value);
+  if (is_topmost) output_frame->SetRegister(cp.code(), value);
   if (FLAG_trace_deopt) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
            top_address + output_offset, output_offset, value);
@@ -600,7 +833,10 @@
   __ mov(r5, Operand(ExternalReference::isolate_address()));
   __ str(r5, MemOperand(sp, 1 * kPointerSize));  // Isolate.
   // Call Deoptimizer::New().
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
 
   // Preserve "deoptimizer" object in register r0 and get the input
   // frame descriptor pointer to r1 (deoptimizer->input_);
@@ -654,8 +890,11 @@
   // r0: deoptimizer object; r1: scratch.
   __ PrepareCallCFunction(1, r1);
   // Call Deoptimizer::ComputeOutputFrames().
-  __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 1);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 1);
+  }
   __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
 
   // Replace the current (input) frame with the output frames.
@@ -671,7 +910,6 @@
   __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
   __ bind(&inner_push_loop);
   __ sub(r3, r3, Operand(sizeof(uint32_t)));
-  // __ add(r6, r2, Operand(r3, LSL, 1));
   __ add(r6, r2, Operand(r3));
   __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
   __ push(r7);
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 603b3cf..96a7d3c 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -473,7 +473,7 @@
       return 1;
     }
     case 'i': {  // 'i: immediate value from adjacent bits.
-      // Expects tokens in the form imm%02d@%02d, ie. imm05@07, imm10@16
+      // Expects tokens in the form imm%02d@%02d, i.e. imm05@07, imm10@16
       int width = (format[3] - '0') * 10 + (format[4] - '0');
       int lsb   = (format[6] - '0') * 10 + (format[7] - '0');
 
@@ -662,6 +662,15 @@
 }
 
 
+// The disassembler may end up decoding data inlined in the code. We do not want
+// it to crash if the data does not ressemble any known instruction.
+#define VERIFY(condition) \
+if(!(condition)) {        \
+  Unknown(instr);         \
+  return;                 \
+}
+
+
 // For currently unimplemented decodings the disassembler calls Unknown(instr)
 // which will just print "unknown" of the instruction bits.
 void Decoder::Unknown(Instruction* instr) {
@@ -947,13 +956,13 @@
 void Decoder::DecodeType3(Instruction* instr) {
   switch (instr->PUField()) {
     case da_x: {
-      ASSERT(!instr->HasW());
+      VERIFY(!instr->HasW());
       Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
       break;
     }
     case ia_x: {
       if (instr->HasW()) {
-        ASSERT(instr->Bits(5, 4) == 0x1);
+        VERIFY(instr->Bits(5, 4) == 0x1);
         if (instr->Bit(22) == 0x1) {
           Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
         } else {
@@ -1074,8 +1083,8 @@
 // vmsr
 // Dd = vsqrt(Dm)
 void Decoder::DecodeTypeVFP(Instruction* instr) {
-  ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
-  ASSERT(instr->Bits(11, 9) == 0x5);
+  VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
+  VERIFY(instr->Bits(11, 9) == 0x5);
 
   if (instr->Bit(4) == 0) {
     if (instr->Opc1Value() == 0x7) {
@@ -1166,7 +1175,7 @@
 
 void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
     Instruction* instr) {
-  ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+  VERIFY((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
          (instr->VAValue() == 0x0));
 
   bool to_arm_register = (instr->VLValue() == 0x1);
@@ -1180,8 +1189,8 @@
 
 
 void Decoder::DecodeVCMP(Instruction* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
-  ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+  VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  VERIFY(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
          (instr->Opc3Value() & 0x1));
 
   // Comparison.
@@ -1203,8 +1212,8 @@
 
 
 void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
-  ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
+  VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  VERIFY((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
 
   bool double_to_single = (instr->SzValue() == 1);
 
@@ -1217,8 +1226,8 @@
 
 
 void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
-  ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+  VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  VERIFY(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
          (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
 
   bool to_integer = (instr->Bit(18) == 1);
@@ -1265,7 +1274,7 @@
 // Ddst = MEM(Rbase + 4*offset).
 // MEM(Rbase + 4*offset) = Dsrc.
 void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
-  ASSERT(instr->TypeValue() == 6);
+  VERIFY(instr->TypeValue() == 6);
 
   if (instr->CoprocessorValue() == 0xA) {
     switch (instr->OpcodeValue()) {
@@ -1347,6 +1356,7 @@
   }
 }
 
+#undef VERIFIY
 
 bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
   int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 26bbd82..a10acd0 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,22 +35,22 @@
 // The ARM ABI does not specify the usage of register r9, which may be reserved
 // as the static base or thread register on some platforms, in which case we
 // leave it alone. Adjust the value of kR9Available accordingly:
-static const int kR9Available = 1;  // 1 if available to us, 0 if reserved
+const int kR9Available = 1;  // 1 if available to us, 0 if reserved
 
 
 // Register list in load/store instructions
 // Note that the bit values must match those used in actual instruction encoding
-static const int kNumRegs = 16;
+const int kNumRegs = 16;
 
 
 // Caller-saved/arguments registers
-static const RegList kJSCallerSaved =
+const RegList kJSCallerSaved =
   1 << 0 |  // r0 a1
   1 << 1 |  // r1 a2
   1 << 2 |  // r2 a3
   1 << 3;   // r3 a4
 
-static const int kNumJSCallerSaved = 4;
+const int kNumJSCallerSaved = 4;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
@@ -60,7 +60,7 @@
 
 
 // Callee-saved registers preserved when switching from C to JavaScript
-static const RegList kCalleeSaved =
+const RegList kCalleeSaved =
   1 <<  4 |  //  r4 v1
   1 <<  5 |  //  r5 v2
   1 <<  6 |  //  r6 v3
@@ -70,36 +70,45 @@
   1 << 10 |  // r10 v7
   1 << 11;   // r11 v8 (fp in JavaScript code)
 
-static const int kNumCalleeSaved = 7 + kR9Available;
+// When calling into C++ (only for C++ calls that can't cause a GC).
+// The call code will take care of lr, fp, etc.
+const RegList kCallerSaved =
+  1 <<  0 |  // r0
+  1 <<  1 |  // r1
+  1 <<  2 |  // r2
+  1 <<  3 |  // r3
+  1 <<  9;   // r9
+
+
+const int kNumCalleeSaved = 7 + kR9Available;
 
 // Double registers d8 to d15 are callee-saved.
-static const int kNumDoubleCalleeSaved = 8;
+const int kNumDoubleCalleeSaved = 8;
 
 
 // Number of registers for which space is reserved in safepoints. Must be a
 // multiple of 8.
 // TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-static const int kNumSafepointRegisters = 16;
+const int kNumSafepointRegisters = 16;
 
 // Define the list of registers actually saved at safepoints.
 // Note that the number of saved registers may be smaller than the reserved
 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-static const int kNumSafepointSavedRegisters =
-    kNumJSCallerSaved + kNumCalleeSaved;
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
 
 // ----------------------------------------------------
 
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset    = 0 * kPointerSize;
-  static const int kStateOffset   = 1 * kPointerSize;
-  static const int kContextOffset = 2 * kPointerSize;
-  static const int kFPOffset      = 3 * kPointerSize;
-  static const int kPCOffset      = 4 * kPointerSize;
+  static const int kNextOffset     = 0 * kPointerSize;
+  static const int kCodeOffset     = 1 * kPointerSize;
+  static const int kStateOffset    = 2 * kPointerSize;
+  static const int kContextOffset  = 3 * kPointerSize;
+  static const int kFPOffset       = 4 * kPointerSize;
 
-  static const int kSize = kPCOffset + kPointerSize;
+  static const int kSize = kFPOffset + kPointerSize;
 };
 
 
@@ -127,6 +136,9 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
+  // Fixed part of the frame consists of return address, caller fp,
+  // context and function.
+  static const int kFixedFrameSize    =  4 * kPointerSize;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
@@ -152,6 +164,8 @@
 class ArgumentsAdaptorFrameConstants : public AllStatic {
  public:
   static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+  static const int kFrameSize =
+      StandardFrameConstants::kFixedFrameSize + kPointerSize;
 };
 
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 50ed8b1..0cbd46e 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,11 +34,13 @@
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
+#include "isolate-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "stub-cache.h"
 
 #include "arm/code-stubs-arm.h"
+#include "arm/macro-assembler-arm.h"
 
 namespace v8 {
 namespace internal {
@@ -46,11 +48,6 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
@@ -113,13 +110,20 @@
 };
 
 
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
+int FullCodeGenerator::self_optimization_header_size() {
+  UNREACHABLE();
+  return 24;
+}
+
+
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right.  The actual
 // argument count matches the formal parameter count expected by the
 // function.
 //
 // The live registers are:
-//   o r1: the JS function object being called (ie, ourselves)
+//   o r1: the JS function object being called (i.e., ourselves)
 //   o cp: our context
 //   o fp: our caller's frame pointer
 //   o sp: stack pointer
@@ -127,10 +131,12 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-arm.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
-  ASSERT(info_ == NULL);
-  info_ = info;
-  scope_ = info->scope();
+void FullCodeGenerator::Generate() {
+  CompilationInfo* info = info_;
+  handler_table_ =
+      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+  profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -145,7 +151,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). r5 is zero for method calls and non-zero for
   // function calls.
-  if (info->is_strict_mode() || info->is_native()) {
+  if (!info->is_classic_mode() || info->is_native()) {
     Label ok;
     __ cmp(r5, Operand(0));
     __ b(eq, &ok);
@@ -155,6 +161,11 @@
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   int locals_count = info->scope()->num_stack_slots();
 
   __ Push(lr, fp, cp, r1);
@@ -200,13 +211,12 @@
         // Load parameter from stack.
         __ ldr(r0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        __ mov(r1, Operand(Context::SlotOffset(var->index())));
-        __ str(r0, MemOperand(cp, r1));
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use two more registers to avoid
-        // clobbering cp.
-        __ mov(r2, Operand(cp));
-        __ RecordWrite(r2, Operand(r1), r3, r0);
+        MemOperand target = ContextOperand(cp, var->index());
+        __ str(r0, target);
+
+        // Update the write barrier.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
       }
     }
   }
@@ -234,7 +244,7 @@
     // The stub will rewrite receiever and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub::Type type;
-    if (is_strict_mode()) {
+    if (!is_classic_mode()) {
       type = ArgumentsAccessStub::NEW_STRICT;
     } else if (function()->has_duplicate_parameters()) {
       type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -263,8 +273,11 @@
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        int ignored = 0;
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+        VariableProxy* proxy = scope()->function();
+        ASSERT(proxy->var()->mode() == CONST ||
+               proxy->var()->mode() == CONST_HARMONY);
+        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
+        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -305,19 +318,68 @@
 }
 
 
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+  __ mov(r2, Operand(profiling_counter_));
+  __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+  __ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
+  __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+  int reset_value = FLAG_interrupt_budget;
+  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
+    // Self-optimization is a one-off thing: if it fails, don't try again.
+    reset_value = Smi::kMaxValue;
+  }
+  if (isolate()->IsDebuggerActive()) {
+    // Detect debug break requests as soon as possible.
+    reset_value = 10;
+  }
+  __ mov(r2, Operand(profiling_counter_));
+  __ mov(r3, Operand(Smi::FromInt(reset_value)));
+  __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 142;
+
+
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+                                       Label* back_edge_target) {
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
-  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-  __ cmp(sp, Operand(ip));
-  __ b(hs, &ok);
-  StackCheckStub stub;
-  __ CallStub(&stub);
+
+  if (FLAG_count_based_interrupts) {
+    int weight = 1;
+    if (FLAG_weighted_back_edges) {
+      ASSERT(back_edge_target->is_bound());
+      int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+      weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kBackEdgeDistanceDivisor));
+    }
+    EmitProfilingCounterDecrement(weight);
+    __ b(pl, &ok);
+    InterruptStub stub;
+    __ CallStub(&stub);
+  } else {
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmp(sp, Operand(ip));
+    __ b(hs, &ok);
+    StackCheckStub stub;
+    __ CallStub(&stub);
+  }
+
   // Record a mapping of this PC offset to the OSR id.  This is used to find
   // the AST id from the unoptimized code in order to use it as a key into
   // the deoptimization input data found in the optimized code.
   RecordStackCheck(stmt->OsrEntryId());
 
+  if (FLAG_count_based_interrupts) {
+    EmitProfilingCounterReset();
+  }
+
   __ bind(&ok);
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
@@ -339,6 +401,32 @@
       __ push(r0);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
+    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
+      // Pretend that the exit is a backwards jump to the entry.
+      int weight = 1;
+      if (info_->ShouldSelfOptimize()) {
+        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+      } else if (FLAG_weighted_back_edges) {
+        int distance = masm_->pc_offset();
+        weight = Min(kMaxBackEdgeWeight,
+                     Max(1, distance / kBackEdgeDistanceDivisor));
+      }
+      EmitProfilingCounterDecrement(weight);
+      Label ok;
+      __ b(pl, &ok);
+      __ push(r0);
+      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
+        __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+        __ push(r2);
+        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
+      } else {
+        InterruptStub stub;
+        __ CallStub(&stub);
+      }
+      __ pop(r0);
+      EmitProfilingCounterReset();
+      __ bind(&ok);
+    }
 
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
@@ -391,7 +479,7 @@
   ASSERT(var->IsStackAllocated() || var->IsContextSlot());
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -414,7 +502,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -449,7 +537,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -508,7 +596,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -575,7 +663,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -665,17 +753,20 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ str(src, location);
+
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
-    __ RecordWrite(scratch0,
-                   Operand(Context::SlotOffset(var->index())),
-                   scratch1,
-                   src);
+    __ RecordWriteContextSlot(scratch0,
+                              location.offset(),
+                              src,
+                              scratch1,
+                              kLRHasBeenSaved,
+                              kDontSaveFPRegs);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -686,13 +777,7 @@
 
   Label skip;
   if (should_normalize) __ b(&skip);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
     __ cmp(r0, ip);
@@ -703,16 +788,17 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        Variable::Mode mode,
-                                        FunctionLiteral* function,
-                                        int* global_count) {
+                                        VariableMode mode,
+                                        FunctionLiteral* function) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++(*global_count);
+      ++global_count_;
       break;
 
     case Variable::PARAMETER:
@@ -721,7 +807,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ str(result_register(), StackOperand(variable));
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
         __ str(ip, StackOperand(variable));
@@ -746,10 +832,16 @@
         __ str(result_register(), ContextOperand(cp, variable->index()));
         int offset = Context::SlotOffset(variable->index());
         // We know that we have written a function, which is not a smi.
-        __ mov(r1, Operand(cp));
-        __ RecordWrite(r1, Operand(offset), r2, result_register());
+        __ RecordWriteContextSlot(cp,
+                                  offset,
+                                  result_register(),
+                                  r2,
+                                  kLRHasBeenSaved,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
         __ str(ip, ContextOperand(cp, variable->index()));
@@ -761,11 +853,13 @@
     case Variable::LOOKUP: {
       Comment cmnt(masm_, "[ Declaration");
       __ mov(r2, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of three modes.
-      ASSERT(mode == Variable::VAR ||
-             mode == Variable::CONST ||
-             mode == Variable::LET);
-      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of four modes.
+      ASSERT(mode == VAR ||
+             mode == CONST ||
+             mode == CONST_HARMONY ||
+             mode == LET);
+      PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
+          ? READ_ONLY : NONE;
       __ mov(r1, Operand(Smi::FromInt(attr)));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
@@ -775,7 +869,7 @@
         __ Push(cp, r2, r1);
         // Push initial value for function declaration.
         VisitForStackValue(function);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
         __ Push(cp, r2, r1, r0);
       } else {
@@ -789,9 +883,6 @@
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
-
-
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
@@ -853,7 +944,7 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
     patch_site.EmitPatchInfo();
 
     __ cmp(r0, Operand(0));
@@ -906,6 +997,8 @@
   __ cmp(r0, null_value);
   __ b(eq, &exit);
 
+  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
   // Convert the object to a JS object.
   Label convert, done_convert;
   __ JumpIfSmi(r0, &convert);
@@ -917,52 +1010,17 @@
   __ bind(&done_convert);
   __ push(r0);
 
+  // Check for proxies.
+  Label call_runtime;
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
+  __ b(le, &call_runtime);
+
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  Label next, call_runtime;
-  // Preload a couple of values used in the loop.
-  Register  empty_fixed_array_value = r6;
-  __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
-  Register empty_descriptor_array_value = r7;
-  __ LoadRoot(empty_descriptor_array_value,
-              Heap::kEmptyDescriptorArrayRootIndex);
-  __ mov(r1, r0);
-  __ bind(&next);
-
-  // Check that there are no elements.  Register r1 contains the
-  // current JS object we've reached through the prototype chain.
-  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ cmp(r2, empty_fixed_array_value);
-  __ b(ne, &call_runtime);
-
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in r2 for the subsequent
-  // prototype load.
-  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
-  __ JumpIfSmi(r3, &call_runtime);
-
-  // Check that there is an enum cache in the non-empty instance
-  // descriptors (r3).  This is the case if the next enumeration
-  // index field does not contain a smi.
-  __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
-  __ JumpIfSmi(r3, &call_runtime);
-
-  // For all objects but the receiver, check that the cache is empty.
-  Label check_prototype;
-  __ cmp(r1, r0);
-  __ b(eq, &check_prototype);
-  __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ cmp(r3, empty_fixed_array_value);
-  __ b(ne, &call_runtime);
-
-  // Load the prototype from the map and loop if non-null.
-  __ bind(&check_prototype);
-  __ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
-  __ cmp(r1, null_value);
-  __ b(ne, &next);
+  __ CheckEnumCache(null_value, &call_runtime);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -991,7 +1049,7 @@
   __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
   __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
-  // Setup the four remaining stack slots.
+  // Set up the four remaining stack slots.
   __ push(r0);  // Map.
   __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
   __ mov(r0, Operand(Smi::FromInt(0)));
@@ -1000,14 +1058,32 @@
   __ jmp(&loop);
 
   // We got a fixed array in register r0. Iterate through that.
+  Label non_proxy;
   __ bind(&fixed_array);
-  __ mov(r1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
-  __ Push(r1, r0);
+
+  Handle<JSGlobalPropertyCell> cell =
+      isolate()->factory()->NewJSGlobalPropertyCell(
+          Handle<Object>(
+              Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+  RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+  __ LoadHeapObject(r1, cell);
+  __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+  __ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
+
+  __ mov(r1, Operand(Smi::FromInt(1)));  // Smi indicates slow check
+  __ ldr(r2, MemOperand(sp, 0 * kPointerSize));  // Get enumerated object
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
+  __ b(gt, &non_proxy);
+  __ mov(r1, Operand(Smi::FromInt(0)));  // Zero indicates proxy
+  __ bind(&non_proxy);
+  __ Push(r1, r0);  // Smi and array
   __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
   __ mov(r0, Operand(Smi::FromInt(0)));
   __ Push(r1, r0);  // Fixed array length (as smi) and initial index.
 
   // Generate code for doing the condition check.
+  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
   // Load the current count to r0, load the length to r1.
   __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
@@ -1019,18 +1095,23 @@
   __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
 
-  // Get the expected map from the stack or a zero map in the
+  // Get the expected map from the stack or a smi in the
   // permanent slow case into register r2.
   __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
+  // If not, we may have to filter the key.
   Label update_each;
   __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
   __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
   __ cmp(r4, Operand(r2));
   __ b(eq, &update_each);
 
+  // For proxies, no filtering is done.
+  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+  __ cmp(r2, Operand(Smi::FromInt(0)));
+  __ b(eq, &update_each);
+
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1046,7 +1127,7 @@
   __ mov(result_register(), r3);
   // Perform the assignment as if via '='.
   { EffectContext context(this);
-    EmitAssignment(stmt->each(), stmt->AssignmentId());
+    EmitAssignment(stmt->each());
   }
 
   // Generate code for the body of the loop.
@@ -1059,7 +1140,7 @@
   __ add(r0, r0, Operand(Smi::FromInt(1)));
   __ push(r0);
 
-  EmitStackCheck(stmt);
+  EmitStackCheck(stmt, &loop);
   __ b(&loop);
 
   // Remove the pointers stored on the stack.
@@ -1067,6 +1148,7 @@
   __ Drop(5);
 
   // Exit and decrement the loop depth.
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1085,7 +1167,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(info->language_mode());
     __ mov(r0, Operand(info));
     __ push(r0);
     __ CallStub(&stub);
@@ -1116,7 +1198,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
         __ tst(temp, temp);
@@ -1129,7 +1211,7 @@
     }
     // If no outer scope calls eval, we do not need to check more
     // context extensions.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1160,7 +1242,7 @@
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ Call(ic, mode);
+  CallIC(ic, mode);
 }
 
 
@@ -1173,7 +1255,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
         __ tst(temp, temp);
@@ -1205,15 +1287,24 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+  if (var->mode() == DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+  } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == Variable::CONST) {
+    if (local->mode() == CONST ||
+        local->mode() == CONST_HARMONY ||
+        local->mode() == LET) {
       __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
-      __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+      if (local->mode() == CONST) {
+        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+      } else {  // LET || CONST_HARMONY
+        __ b(ne, done);
+        __ mov(r0, Operand(var->name()));
+        __ push(r0);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      }
     }
     __ jmp(done);
   }
@@ -1235,7 +1326,7 @@
       __ ldr(r0, GlobalObjectOperand());
       __ mov(r2, Operand(var->name()));
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+      CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(r0);
       break;
     }
@@ -1246,24 +1337,64 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        GetVar(r0, var);
-        __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
-        if (var->mode() == Variable::LET) {
-          Label done;
-          __ b(ne, &done);
-          __ mov(r0, Operand(var->name()));
-          __ push(r0);
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-          __ bind(&done);
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        bool skip_init_check;
+        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+          skip_init_check = false;
         } else {
-          __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+          // Check that we always have valid source position.
+          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          skip_init_check = var->mode() != CONST &&
+              var->initializer_position() < proxy->position();
         }
-        context()->Plug(r0);
+
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          GetVar(r0, var);
+          __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            Label done;
+            __ b(ne, &done);
+            __ mov(r0, Operand(var->name()));
+            __ push(r0);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+            __ bind(&done);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+          }
+          context()->Plug(r0);
+          break;
+        }
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1335,12 +1466,23 @@
 }
 
 
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+  if (expression == NULL) {
+    __ LoadRoot(r1, Heap::kNullValueRootIndex);
+    __ push(r1);
+  } else {
+    VisitForStackValue(expression);
+  }
+}
+
+
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
+  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ ldr(r3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
-  __ mov(r1, Operand(expr->constant_properties()));
+  __ mov(r1, Operand(constant_properties));
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1349,10 +1491,15 @@
       : ObjectLiteral::kNoFlags;
   __ mov(r0, Operand(Smi::FromInt(flags)));
   __ Push(r3, r2, r1, r0);
+  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    __ CallStub(&stub);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1364,6 +1511,7 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore();
 
+  AccessorTable accessor_table(isolate()->zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
@@ -1386,10 +1534,10 @@
             VisitForAccumulatorValue(value);
             __ mov(r2, Operand(key->handle()));
             __ ldr(r1, MemOperand(sp));
-            Handle<Code> ic = is_strict_mode()
-                ? isolate()->builtins()->StoreIC_Initialize_Strict()
-                : isolate()->builtins()->StoreIC_Initialize();
-            __ Call(ic, RelocInfo::CODE_TARGET, key->id());
+            Handle<Code> ic = is_classic_mode()
+                ? isolate()->builtins()->StoreIC_Initialize()
+                : isolate()->builtins()->StoreIC_Initialize_Strict();
+            CallIC(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1412,21 +1560,29 @@
         }
         break;
       case ObjectLiteral::Property::GETTER:
+        accessor_table.lookup(key)->second->getter = value;
+        break;
       case ObjectLiteral::Property::SETTER:
-        // Duplicate receiver on stack.
-        __ ldr(r0, MemOperand(sp));
-        __ push(r0);
-        VisitForStackValue(key);
-        __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
-                           Smi::FromInt(1) :
-                           Smi::FromInt(0)));
-        __ push(r1);
-        VisitForStackValue(value);
-        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        accessor_table.lookup(key)->second->setter = value;
         break;
     }
   }
 
+  // Emit code to define accessors, using only a single call to the runtime for
+  // each pair of corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end();
+       ++it) {
+    __ ldr(r0, MemOperand(sp));  // Duplicate receiver.
+    __ push(r0);
+    VisitForStackValue(it->first);
+    EmitAccessor(it->second->getter);
+    EmitAccessor(it->second->setter);
+    __ mov(r0, Operand(Smi::FromInt(NONE)));
+    __ push(r0);
+    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+  }
+
   if (expr->has_function()) {
     ASSERT(result_saved);
     __ ldr(r0, MemOperand(sp));
@@ -1447,13 +1603,20 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
 
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
-  __ mov(r1, Operand(expr->constant_elements()));
+  __ mov(r1, Operand(constant_elements));
   __ Push(r3, r2, r1);
-  if (expr->constant_elements()->map() ==
+  if (has_fast_elements && constant_elements_values->map() ==
       isolate()->heap()->fixed_cow_array_map()) {
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1465,8 +1628,13 @@
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           FLAG_smi_only_arrays);
+    FastCloneShallowArrayStub::Mode mode = has_fast_elements
+      ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+      : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
 
@@ -1489,15 +1657,23 @@
     }
     VisitForAccumulatorValue(subexpr);
 
-    // Store the subexpression value in the array's elements.
-    __ ldr(r1, MemOperand(sp));  // Copy of array literal.
-    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
-    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-    __ str(result_register(), FieldMemOperand(r1, offset));
-
-    // Update the write barrier for the array store with r0 as the scratch
-    // register.
-    __ RecordWrite(r1, Operand(offset), r2, result_register());
+    if (constant_elements_kind == FAST_ELEMENTS) {
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ ldr(r6, MemOperand(sp));  // Copy of array literal.
+      __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
+      __ str(result_register(), FieldMemOperand(r1, offset));
+      // Update the write barrier for the array store.
+      __ RecordWriteField(r1, offset, result_register(), r2,
+                          kLRHasBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+    } else {
+      __ ldr(r1, MemOperand(sp));  // Copy of array literal.
+      __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
+      __ mov(r3, Operand(Smi::FromInt(i)));
+      __ mov(r4, Operand(Smi::FromInt(expr->literal_index())));
+      StoreArrayLiteralElementStub stub;
+      __ CallStub(&stub);
+    }
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1629,7 +1805,7 @@
   __ mov(r2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1637,7 +1813,7 @@
   SetSourcePosition(prop->position());
   // Call keyed load IC. It has arguments key and receiver in r0 and r1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1664,7 +1840,7 @@
 
   __ bind(&stub_call);
   BinaryOpStub stub(op, mode);
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   __ jmp(&done);
 
@@ -1715,7 +1891,7 @@
       __ mov(ip, Operand(scratch1, ASR, 31));
       __ cmp(ip, Operand(scratch2));
       __ b(ne, &stub_call);
-      __ tst(scratch1, Operand(scratch1));
+      __ cmp(scratch1, Operand(0));
       __ mov(right, Operand(scratch1), LeaveCC, ne);
       __ b(ne, &done);
       __ add(scratch2, right, Operand(left), SetCC);
@@ -1747,13 +1923,13 @@
   __ pop(r1);
   BinaryOpStub stub(op, mode);
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
@@ -1785,10 +1961,10 @@
       __ mov(r1, r0);
       __ pop(r0);  // Restore value.
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
-      __ Call(ic);
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
+      CallIC(ic);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1798,14 +1974,13 @@
       __ mov(r1, r0);
       __ pop(r2);
       __ pop(r0);  // Restore value.
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      __ Call(ic);
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      CallIC(ic);
       break;
     }
   }
-  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
   context()->Plug(r0);
 }
 
@@ -1816,10 +1991,10 @@
     // Global var, const, or let.
     __ mov(r2, Operand(var->name()));
     __ ldr(r1, GlobalObjectOperand());
-    Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->StoreIC_Initialize_Strict()
-        : isolate()->builtins()->StoreIC_Initialize();
-    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->StoreIC_Initialize()
+        : isolate()->builtins()->StoreIC_Initialize_Strict();
+    CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
@@ -1844,12 +2019,12 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+  } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(r0);  // Value.
       __ mov(r1, Operand(var->name()));
-      __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ mov(r0, Operand(Smi::FromInt(language_mode())));
       __ Push(cp, r1, r0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
@@ -1869,12 +2044,14 @@
         // RecordWrite may destroy all its register arguments.
         __ mov(r3, result_register());
         int offset = Context::SlotOffset(var->index());
-        __ RecordWrite(r1, Operand(offset), r2, r3);
+        __ RecordWriteContextSlot(
+            r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
       }
     }
 
-  } else if (var->mode() != Variable::CONST) {
-    // Assignment to var or initializing assignment to let.
+  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+    // Assignment to var or initializing assignment to let/const
+    // in harmony mode.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, r1);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1887,13 +2064,15 @@
       __ str(r0, location);
       if (var->IsContextSlot()) {
         __ mov(r3, r0);
-        __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(
+            r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(r0);  // Value.
       __ mov(r1, Operand(var->name()));
-      __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ mov(r0, Operand(Smi::FromInt(language_mode())));
       __ Push(cp, r1, r0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
@@ -1930,10 +2109,10 @@
     __ pop(r1);
   }
 
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->StoreIC_Initialize_Strict()
-      : isolate()->builtins()->StoreIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->StoreIC_Initialize()
+      : isolate()->builtins()->StoreIC_Initialize_Strict();
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1976,10 +2155,10 @@
     __ pop(r2);
   }
 
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize()
+      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2013,6 +2192,14 @@
   }
 }
 
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+                               RelocInfo::Mode rmode,
+                               unsigned ast_id) {
+  ic_total_count_++;
+  __ Call(code, rmode, ast_id);
+}
+
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
@@ -2030,7 +2217,7 @@
   // Call the IC initialization code.
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-  __ Call(ic, mode, expr->id());
+  CallIC(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2063,7 +2250,7 @@
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
-  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2083,6 +2270,7 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   CallFunctionStub stub(arg_count, flags);
+  __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2091,8 +2279,7 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
@@ -2101,22 +2288,20 @@
   }
   __ push(r1);
 
-  // Push the receiver of the enclosing function and do runtime call.
+  // Push the receiver of the enclosing function.
   int receiver_offset = 2 + info_->scope()->num_parameters();
   __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
   __ push(r1);
-  // Push the strict mode flag. In harmony mode every eval call
-  // is a strict mode eval call.
-  StrictModeFlag strict_mode = strict_mode_flag();
-  if (FLAG_harmony_block_scoping) {
-    strict_mode = kStrictMode;
-  }
-  __ mov(r1, Operand(Smi::FromInt(strict_mode)));
+  // Push the language mode.
+  __ mov(r1, Operand(Smi::FromInt(language_mode())));
   __ push(r1);
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  // Push the start position of the scope the calls resides in.
+  __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
+  __ push(r1);
+
+  // Do the runtime call.
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2150,28 +2335,11 @@
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly
-      // in generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(r0);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
       __ push(r1);
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in r0 (function) and
       // r1 (receiver). Touch up the stack with the right values.
@@ -2182,6 +2350,7 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2288,14 +2457,29 @@
   __ mov(r0, Operand(arg_count));
   __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
 
-  Handle<Code> construct_builtin =
-      isolate()->builtins()->JSConstructCall();
-  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+  // Record call targets in unoptimized code, but not in the snapshot.
+  CallFunctionFlags flags;
+  if (!Serializer::enabled()) {
+    flags = RECORD_CALL_TARGET;
+    Handle<Object> uninitialized =
+        TypeFeedbackCells::UninitializedSentinel(isolate());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+    RecordTypeFeedbackCell(expr->id(), cell);
+    __ mov(r2, Operand(cell));
+  } else {
+    flags = NO_CALL_FUNCTION_FLAGS;
+  }
+
+  CallConstructStub stub(flags);
+  __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2307,7 +2491,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ tst(r0, Operand(kSmiTagMask));
   Split(eq, if_true, if_false, fall_through);
 
@@ -2315,7 +2499,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2327,7 +2512,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ tst(r0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, if_true, if_false, fall_through);
 
@@ -2335,7 +2520,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2360,14 +2546,15 @@
   __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ b(lt, if_false);
   __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(le, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2381,14 +2568,15 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ge, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2404,7 +2592,7 @@
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
   __ tst(r1, Operand(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ne, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2412,8 +2600,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2492,12 +2680,13 @@
   __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2511,14 +2700,15 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2532,14 +2722,15 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2553,7 +2744,7 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2561,8 +2752,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2585,14 +2776,15 @@
   __ bind(&check_frame_marker);
   __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
   __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2608,14 +2800,15 @@
 
   __ pop(r1);
   __ cmp(r0, r1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
@@ -2629,9 +2822,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label exit;
   // Get the number of formal parameters.
   __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2651,7 +2843,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2662,20 +2855,24 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
   // Map is now in r0.
   __ b(lt, &null);
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ b(eq, &function);
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
-  __ b(ge, &function);
+  __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ b(eq, &function);
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
 
-  // Check if the constructor in the map is a function.
+  // Check if the constructor in the map is a JS function.
   __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
   __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
   __ b(ne, &non_function_constructor);
@@ -2707,7 +2904,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2715,6 +2912,7 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2728,9 +2926,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
 
@@ -2750,7 +2947,8 @@
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   if (CpuFeatures::IsSupported(VFP3)) {
     __ PrepareCallCFunction(1, r0);
-    __ mov(r0, Operand(ExternalReference::isolate_address()));
+    __ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
     __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
     CpuFeatures::Scope scope(VFP3);
@@ -2770,8 +2968,9 @@
     __ mov(r0, r4);
   } else {
     __ PrepareCallCFunction(2, r0);
+    __ ldr(r1, ContextOperand(context_register(), Context::GLOBAL_INDEX));
     __ mov(r0, Operand(r4));
-    __ mov(r1, Operand(ExternalReference::isolate_address()));
+    __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
     __ CallCFunction(
         ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
   }
@@ -2780,9 +2979,10 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2792,9 +2992,10 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2805,9 +3006,9 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
-
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
   Label done;
@@ -2823,20 +3024,69 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
-  // Load the arguments on the stack and call the runtime function.
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-  MathPowStub stub;
-  __ CallStub(&stub);
+  ASSERT_NE(NULL, args->at(1)->AsLiteral());
+  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label runtime, done;
+  Register object = r0;
+  Register result = r0;
+  Register scratch0 = r9;
+  Register scratch1 = r1;
+
+#ifdef DEBUG
+  __ AbortIfSmi(object);
+  __ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
+  __ Assert(eq, "Trying to get date field from non-date.");
+#endif
+
+  if (index->value() == 0) {
+    __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ mov(scratch1, Operand(stamp));
+      __ ldr(scratch1, MemOperand(scratch1));
+      __ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
+      __ cmp(scratch1, scratch0);
+      __ b(ne, &runtime);
+      __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+                                             kPointerSize * index->value()));
+      __ jmp(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2, scratch1);
+    __ mov(r1, Operand(index));
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ bind(&done);
+  }
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+  // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  if (CpuFeatures::IsSupported(VFP3)) {
+    MathPowStub stub(MathPowStub::ON_STACK);
+    __ CallStub(&stub);
+  } else {
+    __ CallRuntime(Runtime::kMath_pow, 2);
+  }
+  context()->Plug(r0);
+}
 
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(r1);  // r0 = value. r1 = object.
@@ -2853,16 +3103,18 @@
   __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
-  __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
+  __ mov(r2, r0);
+  __ RecordWriteField(
+      r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
-
   // Load the argument on the stack and call the stub.
   VisitForStackValue(args->at(0));
 
@@ -2872,9 +3124,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
-
   VisitForAccumulatorValue(args->at(0));
 
   Label done;
@@ -2890,15 +3142,14 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
-
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
   Register object = r1;
   Register index = r0;
-  Register scratch = r2;
   Register result = r3;
 
   __ pop(object);
@@ -2908,7 +3159,6 @@
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
-                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -2937,16 +3187,15 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
-
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
   Register object = r1;
   Register index = r0;
-  Register scratch1 = r2;
-  Register scratch2 = r3;
+  Register scratch = r3;
   Register result = r0;
 
   __ pop(object);
@@ -2956,8 +3205,7 @@
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch1,
-                                  scratch2,
+                                  scratch,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -2986,9 +3234,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -2998,9 +3246,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3010,10 +3258,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3021,10 +3270,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3032,10 +3282,23 @@
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallStub(&stub);
+  context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3043,8 +3306,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3052,7 +3316,8 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3061,18 +3326,31 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
+  // Check for proxy.
+  Label proxy, done;
+  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_PROXY_TYPE);
+  __ b(eq, &proxy);
+
   // InvokeFunction requires the function in r1. Move it in there.
   __ mov(r1, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(r1, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ jmp(&done);
+
+  __ bind(&proxy);
+  __ push(r0);
+  __ CallRuntime(Runtime::kCall, args->length());
+  __ bind(&done);
+
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3082,7 +3360,8 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3141,16 +3420,31 @@
   __ str(scratch1, MemOperand(index2, 0));
   __ str(scratch2, MemOperand(index1, 0));
 
-  Label new_space;
-  __ InNewSpace(elements, scratch1, eq, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   scratch1,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   ne,
+                   &no_remembered_set);
   // Possible optimization: do a check that both values are Smis
   // (or them and test against Smi mask.)
 
-  __ mov(scratch1, elements);
-  __ RecordWriteHelper(elements, index1, scratch2);
-  __ RecordWriteHelper(scratch1, index2, scratch2);  // scratch1 holds elements.
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index1,
+                         scratch2,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index2,
+                         scratch2,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
   // We are done. Drop elements from the stack, and return undefined.
   __ Drop(3);
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3164,9 +3458,9 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
 
@@ -3215,7 +3509,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = r0;
@@ -3255,7 +3550,8 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
@@ -3267,14 +3563,15 @@
 
   __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
   __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3289,12 +3586,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       empty_separator_loop, one_char_separator_loop,
       one_char_separator_loop_entry, long_separator_loop;
-
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
@@ -3457,7 +3754,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Replace separator with its ascii character value.
+  // Replace separator with its ASCII character value.
   __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
   // Jump into the loop after the code that copies the separator, so the first
   // element is not preceded by a separator
@@ -3468,7 +3765,7 @@
   //   result_pos: the position to which we are currently copying characters.
   //   element: Current array element.
   //   elements_end: Array end.
-  //   separator: Single separator ascii char (in lower byte).
+  //   separator: Single separator ASCII char (in lower byte).
 
   // Copy the separator character to the result.
   __ strb(separator, MemOperand(result_pos, 1, PostIndex));
@@ -3550,7 +3847,7 @@
     RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
         isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-    __ Call(ic, mode, expr->id());
+    CallIC(ic, mode, expr->id());
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3571,7 +3868,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+            ? kNonStrictMode : kStrictMode;
+        __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
         __ push(r1);
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
         context()->Plug(r0);
@@ -3579,7 +3878,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
           __ ldr(r2, GlobalObjectOperand());
           __ mov(r1, Operand(var->name()));
@@ -3622,18 +3921,35 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+        if (context()->IsStackValue()) __ push(r0);
+        __ jmp(&done);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+        if (context()->IsStackValue()) __ push(r0);
+        __ bind(&done);
       }
       break;
     }
@@ -3686,7 +4002,7 @@
   // accumulator register r0.
   VisitForAccumulatorValue(expr->expression());
   SetSourcePosition(expr->position());
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   context()->Plug(r0);
 }
 
@@ -3797,7 +4113,7 @@
   SetSourcePosition(expr->position());
 
   BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -3826,10 +4142,10 @@
     case NAMED_PROPERTY: {
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
       __ pop(r1);
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
-      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3843,10 +4159,10 @@
     case KEYED_PROPERTY: {
       __ pop(r1);  // Key.
       __ pop(r2);  // Receiver.
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3872,7 +4188,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ Call(ic);
+    CallIC(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(r0);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -3892,20 +4208,25 @@
     context()->Plug(r0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(r0, if_true);
@@ -3942,9 +4263,11 @@
 
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(r0, if_false);
-    __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-    Split(ge, if_true, if_false, fall_through);
-
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
+    __ b(eq, if_true);
+    __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
+    Split(eq, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(r0, if_false);
     if (!FLAG_harmony_typeof) {
@@ -3963,18 +4286,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
-  Split(eq, if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -3982,9 +4294,12 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -3992,20 +4307,13 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
       __ cmp(r0, ip);
       Split(eq, if_true, if_false, fall_through);
@@ -4015,7 +4323,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       // The stub returns 0 for true.
       __ tst(r0, r0);
       Split(eq, if_true, if_false, fall_through);
@@ -4029,33 +4337,25 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cond = eq;
-          __ pop(r1);
           break;
         case Token::LT:
           cond = lt;
-          __ pop(r1);
           break;
         case Token::GT:
-          // Reverse left and right sides to obtain ECMA-262 conversion order.
-          cond = lt;
-          __ mov(r1, result_register());
-          __ pop(r0);
+          cond = gt;
          break;
         case Token::LTE:
-          // Reverse left and right sides to obtain ECMA-262 conversion order.
-          cond = ge;
-          __ mov(r1, result_register());
-          __ pop(r0);
+          cond = le;
           break;
         case Token::GTE:
           cond = ge;
-          __ pop(r1);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
+      __ pop(r1);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4071,9 +4371,9 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ cmp(r0, Operand(0));
       Split(cond, if_true, if_false, fall_through);
     }
@@ -4085,8 +4385,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4094,15 +4395,21 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ LoadRoot(r1, Heap::kNullValueRootIndex);
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Heap::RootListIndex nil_value = nil == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ LoadRoot(r1, nil_value);
   __ cmp(r0, r1);
-  if (expr->is_strict()) {
+  if (expr->op() == Token::EQ_STRICT) {
     Split(eq, if_true, if_false, fall_through);
   } else {
+    Heap::RootListIndex other_nil_value = nil == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     __ b(eq, if_true);
-    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(r1, other_nil_value);
     __ cmp(r0, r1);
     __ b(eq, if_true);
     __ JumpIfSmi(r0, if_false);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 2e49cae..e843657 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -208,7 +208,8 @@
 
   // Update the write barrier. Make sure not to clobber the value.
   __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1);
+  __ RecordWrite(
+      elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
 }
 
 
@@ -381,10 +382,10 @@
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                          int argc,
-                                          Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                               int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- r1    : receiver
   //  -- r2    : name
@@ -394,11 +395,11 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_ic_state,
+                                         extra_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, r1, r2, r3, r4, r5);
+      masm, flags, r1, r2, r3, r4, r5, r6);
 
   // If the stub cache probing failed, the receiver might be a value.
   // For value objects, we use the map of the prototype objects for
@@ -437,7 +438,7 @@
   // Probe the stub cache for the value object.
   __ bind(&probe);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, r1, r2, r3, r4, r5);
+      masm, flags, r1, r2, r3, r4, r5, r6);
 
   __ bind(&miss);
 }
@@ -463,7 +464,7 @@
 }
 
 
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -485,10 +486,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm,
-                             int argc,
-                             IC::UtilityId id,
-                             Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+                              int argc,
+                              IC::UtilityId id,
+                              Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -504,21 +505,22 @@
   // Get the receiver of the function from the stack.
   __ ldr(r3, MemOperand(sp, argc * kPointerSize));
 
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ Push(r3, r2);
+    // Push the receiver and the name of the function.
+    __ Push(r3, r2);
 
-  // Call the entry.
-  __ mov(r0, Operand(2));
-  __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
+    // Call the entry.
+    __ mov(r0, Operand(2));
+    __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
 
-  CEntryStub stub(1);
-  __ CallStub(&stub);
+    CEntryStub stub(1);
+    __ CallStub(&stub);
 
-  // Move result to r1 and leave the internal frame.
-  __ mov(r1, Operand(r0));
-  __ LeaveInternalFrame();
+    // Move result to r1 and leave the internal frame.
+    __ mov(r1, Operand(r0));
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -539,7 +541,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -551,18 +553,6 @@
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm,
-                          int argc,
-                          Code::ExtraICState extra_ic_state) {
-  // ----------- S t a t e -------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
 void CallIC::GenerateMegamorphic(MacroAssembler* masm,
                                  int argc,
                                  Code::ExtraICState extra_ic_state) {
@@ -578,27 +568,6 @@
 }
 
 
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- r2    : name
@@ -650,12 +619,13 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
-  __ EnterInternalFrame();
-  __ push(r2);  // save the key
-  __ Push(r1, r2);  // pass the receiver and the key
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(r2);  // restore the key
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(r2);  // save the key
+    __ Push(r1, r2);  // pass the receiver and the key
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(r2);  // restore the key
+  }
   __ mov(r1, r0);
   __ jmp(&do_call);
 
@@ -715,7 +685,7 @@
   __ JumpIfSmi(r2, &miss);
   __ IsObjectJSStringType(r2, r0, &miss);
 
-  GenerateCallNormal(masm, argc);
+  CallICBase::GenerateNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
@@ -736,7 +706,7 @@
   Code::Flags flags =
       Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, r0, r2, r3, r4, r5);
+      masm, flags, r0, r2, r3, r4, r5, r6);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
@@ -908,7 +878,8 @@
       GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
   __ str(r0, mapped_location);
   __ add(r6, r3, r5);
-  __ RecordWrite(r3, r6, r9);
+  __ mov(r9, r0);
+  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in r3.
@@ -916,7 +887,8 @@
       GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
   __ str(r0, unmapped_location);
   __ add(r6, r3, r4);
-  __ RecordWrite(r3, r6, r9);
+  __ mov(r9, r0);
+  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -1059,15 +1031,34 @@
   __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
   __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
   __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
-  __ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
+  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+  __ And(r3, r3, Operand(mask));
 
   // Load the key (consisting of map and symbol) from the cache and
   // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
   ExternalReference cache_keys =
       ExternalReference::keyed_lookup_cache_keys(isolate);
+
   __ mov(r4, Operand(cache_keys));
   __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
-  __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));  // Move r4 to symbol.
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    // Load map and move r4 to next entry.
+    __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
+    __ cmp(r2, r5);
+    __ b(ne, &try_next_entry);
+    __ ldr(r5, MemOperand(r4, -kPointerSize));  // Load symbol
+    __ cmp(r0, r5);
+    __ b(eq, &hit_on_nth_entry[i]);
+    __ bind(&try_next_entry);
+  }
+
+  // Last entry: Load map and move r4 to symbol.
+  __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
   __ cmp(r2, r5);
   __ b(ne, &slow);
   __ ldr(r5, MemOperand(r4));
@@ -1081,13 +1072,25 @@
   // r3     : lookup cache index
   ExternalReference cache_field_offsets =
       ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-  __ mov(r4, Operand(cache_field_offsets));
-  __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
-  __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
-  __ sub(r5, r5, r6, SetCC);
-  __ b(ge, &property_array_property);
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    __ mov(r4, Operand(cache_field_offsets));
+    if (i != 0) {
+      __ add(r3, r3, Operand(i));
+    }
+    __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
+    __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
+    __ sub(r5, r5, r6, SetCC);
+    __ b(ge, &property_array_property);
+    if (i != 0) {
+      __ jmp(&load_in_object_property);
+    }
+  }
 
   // Load in-object property.
+  __ bind(&load_in_object_property);
   __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
   __ add(r6, r6, r5);  // Index from start of object.
   __ sub(r1, r1, Operand(kHeapObjectTag));  // Remove the heap tag.
@@ -1137,14 +1140,12 @@
 
   Register receiver = r1;
   Register index = r0;
-  Register scratch1 = r2;
-  Register scratch2 = r3;
+  Register scratch = r3;
   Register result = r0;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch1,
-                                          scratch2,
+                                          scratch,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -1239,6 +1240,47 @@
 }
 
 
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- r2     : receiver
+  //  -- r3     : target map
+  //  -- lr     : return address
+  // -----------------------------------
+  // Must return the modified receiver in r0.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+    __ mov(r0, r2);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ push(r2);
+  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+    MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- r2     : receiver
+  //  -- r3     : target map
+  //  -- lr     : return address
+  // -----------------------------------
+  // Must return the modified receiver in r0.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+    __ mov(r0, r2);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ push(r2);
+  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
                                               StrictModeFlag strict_mode) {
   // ---------- S t a t e --------------
@@ -1267,13 +1309,19 @@
   //  -- r2     : receiver
   //  -- lr     : return address
   // -----------------------------------
-  Label slow, fast, array, extra;
+  Label slow, array, extra, check_if_double_array;
+  Label fast_object_with_map_check, fast_object_without_map_check;
+  Label fast_double_with_map_check, fast_double_without_map_check;
+  Label transition_smi_elements, finish_object_store, non_double_value;
+  Label transition_double_elements;
 
   // Register usage.
   Register value = r0;
   Register key = r1;
   Register receiver = r2;
-  Register elements = r3;  // Elements array of the receiver.
+  Register receiver_map = r3;
+  Register elements_map = r6;
+  Register elements = r7;  // Elements array of the receiver.
   // r4 and r5 are used as general scratch registers.
 
   // Check that the key is a smi.
@@ -1281,35 +1329,26 @@
   // Check that the object isn't a smi.
   __ JumpIfSmi(receiver, &slow);
   // Get the map of the object.
-  __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
   __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
   __ b(ne, &slow);
   // Check if the object is a JS array or not.
-  __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+  __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
   __ cmp(r4, Operand(JS_ARRAY_TYPE));
   __ b(eq, &array);
   // Check that the object is some kind of JSObject.
-  __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
   __ b(lt, &slow);
-  __ cmp(r4, Operand(JS_PROXY_TYPE));
-  __ b(eq, &slow);
-  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
-  __ b(eq, &slow);
 
   // Object case: Check key against length in the elements array.
   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check that the object is in fast mode and writable.
-  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(r4, ip);
-  __ b(ne, &slow);
   // Check array bounds. Both the key and the length of FixedArray are smis.
   __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
   __ cmp(key, Operand(ip));
-  __ b(lo, &fast);
+  __ b(lo, &fast_object_with_map_check);
 
   // Slow case, handle jump to runtime.
   __ bind(&slow);
@@ -1330,21 +1369,31 @@
   __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
   __ cmp(key, Operand(ip));
   __ b(hs, &slow);
+  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ cmp(elements_map,
+         Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ b(ne, &check_if_double_array);
   // Calculate key + 1 as smi.
   STATIC_ASSERT(kSmiTag == 0);
   __ add(r4, key, Operand(Smi::FromInt(1)));
   __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ b(&fast);
+  __ b(&fast_object_without_map_check);
+
+  __ bind(&check_if_double_array);
+  __ cmp(elements_map,
+         Operand(masm->isolate()->factory()->fixed_double_array_map()));
+  __ b(ne, &slow);
+  // Add 1 to key, and go to common element store code for doubles.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ add(r4, key, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
   // is the length is always a smi.
   __ bind(&array);
   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(r4, ip);
-  __ b(ne, &slow);
 
   // Check the key against the length in the array.
   __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1352,19 +1401,104 @@
   __ b(hs, &extra);
   // Fall through to fast case.
 
-  __ bind(&fast);
-  // Fast case, store the value to the elements backing store.
-  __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ str(value, MemOperand(r5));
-  // Skip write barrier if the written value is a smi.
-  __ tst(value, Operand(kSmiTagMask));
-  __ Ret(eq);
-  // Update write barrier for the elements array address.
-  __ sub(r4, r5, Operand(elements));
-  __ RecordWrite(elements, Operand(r4), r5, r6);
-
+  __ bind(&fast_object_with_map_check);
+  Register scratch_value = r4;
+  Register address = r5;
+  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ cmp(elements_map,
+         Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ b(ne, &fast_double_with_map_check);
+  __ bind(&fast_object_without_map_check);
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(value, MemOperand(address));
   __ Ret();
+
+  __ bind(&non_smi_value);
+  // Escape to elements kind transition case.
+  __ CheckFastObjectElements(receiver_map, scratch_value,
+                             &transition_smi_elements);
+  // Fast elements array, store the value to the elements backing store.
+  __ bind(&finish_object_store);
+  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(value, MemOperand(address));
+  // Update write barrier for the elements array address.
+  __ mov(scratch_value, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements,
+                 address,
+                 scratch_value,
+                 kLRHasNotBeenSaved,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ Ret();
+
+  __ bind(&fast_double_with_map_check);
+  // Check for fast double array case. If this fails, call through to the
+  // runtime.
+  __ cmp(elements_map,
+         Operand(masm->isolate()->factory()->fixed_double_array_map()));
+  __ b(ne, &slow);
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value,
+                                 key,
+                                 receiver,
+                                 elements,
+                                 r3,
+                                 r4,
+                                 r5,
+                                 r6,
+                                 &transition_double_elements);
+  __ Ret();
+
+  __ bind(&transition_smi_elements);
+  // Transition the array appropriately depending on the value type.
+  __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
+  __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
+  __ b(ne, &non_double_value);
+
+  // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_DOUBLE_ELEMENTS,
+                                         receiver_map,
+                                         r4,
+                                         &slow);
+  ASSERT(receiver_map.is(r3));  // Transition code expects map in r3
+  ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         receiver_map,
+                                         r4,
+                                         &slow);
+  ASSERT(receiver_map.is(r3));  // Transition code expects map in r3
+  ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         receiver_map,
+                                         r4,
+                                         &slow);
+  ASSERT(receiver_map.is(r3));  // Transition code expects map in r3
+  ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
 }
 
 
@@ -1382,7 +1516,7 @@
       Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
 
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, r1, r2, r3, r4, r5);
+      masm, flags, r1, r2, r3, r4, r5, r6);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
@@ -1414,11 +1548,10 @@
   //  -- lr    : return address
   // -----------------------------------
   //
-  // This accepts as a receiver anything JSObject::SetElementsLength accepts
-  // (currently anything except for external and pixel arrays which means
-  // anything with elements of FixedArray type.), but currently is restricted
-  // to JSArray.
-  // Value must be a number, but only smis are accepted as the most common case.
+  // This accepts as a receiver anything JSArray::SetElementsLength accepts
+  // (currently anything except for external arrays which means anything with
+  // elements of FixedArray type).  Value must be a number, but only smis are
+  // accepted as the most common case.
 
   Label miss;
 
@@ -1440,6 +1573,13 @@
   __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
   __ b(ne, &miss);
 
+  // Check that the array has fast properties, otherwise the length
+  // property might have been redefined.
+  __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
+  __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
+  __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
+  __ b(eq, &miss);
+
   // Check that value is a smi.
   __ JumpIfNotSmi(value, &miss);
 
@@ -1510,11 +1650,9 @@
     case Token::LT:
       return lt;
     case Token::GT:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return lt;
+      return gt;
     case Token::LTE:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return ge;
+      return le;
     case Token::GTE:
       return ge;
     default:
@@ -1534,6 +1672,9 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
+    if (state == KNOWN_OBJECTS) {
+      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 30ccd05..c3dd1cb 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -212,10 +212,11 @@
 }
 
 
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -227,6 +228,13 @@
 }
 
 
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -241,6 +249,14 @@
 }
 
 
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(0)->PrintTo(stream);
+  InputAt(1)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -390,6 +406,12 @@
 }
 
 
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
 LChunk::LChunk(CompilationInfo* info, HGraph* graph)
     : spill_slot_count_(0),
       info_(info),
@@ -418,7 +440,7 @@
 
 
 void LChunk::MarkEmptyBlocks() {
-  HPhase phase("Mark empty blocks", this);
+  HPhase phase("L_Mark empty blocks", this);
   for (int i = 0; i < graph()->blocks()->length(); ++i) {
     HBasicBlock* block = graph()->blocks()->at(i);
     int first = block->first_instruction_index();
@@ -454,7 +476,7 @@
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new LInstructionGap(block);
+  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -529,8 +551,8 @@
 
 LChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
-  chunk_ = new LChunk(info(), graph());
-  HPhase phase("Building chunk", chunk_);
+  chunk_ = new(zone()) LChunk(info(), graph());
+  HPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -559,20 +581,15 @@
 }
 
 
-LRegister* LChunkBuilder::ToOperand(Register reg) {
-  return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
-  return new LUnallocated(LUnallocated::FIXED_REGISTER,
-                          Register::ToAllocationIndex(reg));
+  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+                                  Register::ToAllocationIndex(reg));
 }
 
 
 LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
-  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-                          DoubleRegister::ToAllocationIndex(reg));
+  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                                  DoubleRegister::ToAllocationIndex(reg));
 }
 
 
@@ -587,30 +604,30 @@
 
 
 LOperand* LChunkBuilder::UseRegister(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
   return Use(value,
-             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
-                              LUnallocated::USED_AT_START));
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
 }
 
 
 LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::Use(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::NONE));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
 }
 
 
 LOperand* LChunkBuilder::UseAtStart(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::NONE,
-                                     LUnallocated::USED_AT_START));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                             LUnallocated::USED_AT_START));
 }
 
 
@@ -645,7 +662,7 @@
 LOperand* LChunkBuilder::UseAny(HValue* value) {
   return value->IsConstant()
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      :  Use(value, new LUnallocated(LUnallocated::ANY));
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
 }
 
 
@@ -654,7 +671,7 @@
     HInstruction* instr = HInstruction::cast(value);
     VisitInstruction(instr);
   }
-  allocator_->RecordUse(value, operand);
+  operand->set_virtual_register(value->id());
   return operand;
 }
 
@@ -662,36 +679,33 @@
 template<int I, int T>
 LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
                                     LUnallocated* result) {
-  allocator_->RecordDefinition(current_instruction_, result);
+  result->set_virtual_register(current_instruction_->id());
   instr->set_result(result);
   return instr;
 }
 
 
 template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
 LInstruction* LChunkBuilder::DefineAsRegister(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineAsSpilled(
     LTemplateInstruction<1, I, T>* instr, int index) {
-  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineSameAsFirst(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
 }
 
 
@@ -711,7 +725,9 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  instr->set_environment(CreateEnvironment(hydrogen_env));
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
   return instr;
 }
 
@@ -741,7 +757,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasSideEffects()) {
+  if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -753,7 +769,8 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -770,66 +787,46 @@
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
-  instr->set_pointer_map(new LPointerMap(position_));
+  instr->set_pointer_map(new(zone()) LPointerMap(position_));
   return instr;
 }
 
 
 LUnallocated* LChunkBuilder::TempRegister() {
-  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
-  allocator_->RecordTemporary(operand);
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  operand->set_virtual_register(allocator_->GetVirtualRegister());
+  if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  allocator_->RecordTemporary(operand);
+  ASSERT(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  allocator_->RecordTemporary(operand);
+  ASSERT(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
-  return new LLabel(instr->block());
+  return new(zone()) LLabel(instr->block());
 }
 
 
 LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new LDeoptimize);
+  return AssignEnvironment(new(zone()) LDeoptimize);
 }
 
 
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
-  return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
-                                   HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineAsRegister(new LBitI(op, left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* left = UseFixed(instr->left(), r1);
-    LOperand* right = UseFixed(instr->right(), r0);
-    LArithmeticT* result = new LArithmeticT(op, left, right);
-    return MarkAsCall(DefineFixed(result, r0), instr);
-  }
+  return AssignEnvironment(new(zone()) LDeoptimize);
 }
 
 
@@ -841,7 +838,7 @@
 
     LOperand* left = UseFixed(instr->left(), r1);
     LOperand* right = UseFixed(instr->right(), r0);
-    LArithmeticT* result = new LArithmeticT(op, left, right);
+    LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
     return MarkAsCall(DefineFixed(result, r0), instr);
   }
 
@@ -875,7 +872,7 @@
   }
 
   LInstruction* result =
-      DefineAsRegister(new LShiftI(op, left, right, does_deopt));
+      DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
   return does_deopt ? AssignEnvironment(result) : result;
 }
 
@@ -888,7 +885,7 @@
   ASSERT(op != Token::MOD);
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LArithmeticD* result = new LArithmeticD(op, left, right);
+  LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
   return DefineAsRegister(result);
 }
 
@@ -906,7 +903,8 @@
   ASSERT(right->representation().IsTagged());
   LOperand* left_operand = UseFixed(left, r1);
   LOperand* right_operand = UseFixed(right, r0);
-  LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, left_operand, right_operand);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -994,20 +992,26 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
   int ast_id = hydrogen_env->ast_id();
-  ASSERT(ast_id != AstNode::kNoNumber);
+  ASSERT(ast_id != AstNode::kNoNumber ||
+         hydrogen_env->frame_type() != JS_FUNCTION);
   int value_count = hydrogen_env->length();
-  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
-                                          ast_id,
-                                          hydrogen_env->parameter_count(),
-                                          argument_count_,
-                                          value_count,
-                                          outer);
-  int argument_index = 0;
+  LEnvironment* result = new(zone()) LEnvironment(
+      hydrogen_env->closure(),
+      hydrogen_env->frame_type(),
+      ast_id,
+      hydrogen_env->parameter_count(),
+      argument_count_,
+      value_count,
+      outer);
+  int argument_index = *argument_index_accumulator;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1016,31 +1020,44 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new LArgument(argument_index++);
+      op = new(zone()) LArgument(argument_index++);
     } else {
       op = UseAny(value);
     }
     result->AddValue(op, value->representation());
   }
 
+  if (hydrogen_env->frame_type() == JS_FUNCTION) {
+    *argument_index_accumulator = argument_index;
+  }
+
   return result;
 }
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  return new LGoto(instr->FirstSuccessor()->block_id());
+  return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
 LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
-  HValue* v = instr->value();
-  if (v->EmitAtUses()) {
-    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+  HValue* value = instr->value();
+  if (value->EmitAtUses()) {
+    HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
         ? instr->FirstSuccessor()
         : instr->SecondSuccessor();
-    return new LGoto(successor->block_id());
+    return new(zone()) LGoto(successor->block_id());
   }
-  return AssignEnvironment(new LBranch(UseRegister(v)));
+
+  LBranch* result = new(zone()) LBranch(UseRegister(value));
+  // Tagged values that are not known smis or booleans require a
+  // deoptimization environment.
+  Representation rep = value->representation();
+  HType type = value->type();
+  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+    return AssignEnvironment(result);
+  }
+  return result;
 }
 
 
@@ -1049,23 +1066,24 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
-  return new LCmpMapAndBranch(value, temp);
+  return new(zone()) LCmpMapAndBranch(value, temp);
 }
 
 
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
-  return DefineAsRegister(new LArgumentsLength(UseRegister(length->value())));
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+  LOperand* value = UseRegister(instr->value());
+  return DefineAsRegister(new(zone()) LArgumentsLength(value));
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
-  return DefineAsRegister(new LArgumentsElements);
+  return DefineAsRegister(new(zone()) LArgumentsElements);
 }
 
 
 LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
   LInstanceOf* result =
-      new LInstanceOf(UseFixed(instr->left(), r0),
+      new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
                       UseFixed(instr->right(), r1));
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
@@ -1074,17 +1092,26 @@
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
-      new LInstanceOfKnownGlobal(UseFixed(instr->left(), r0), FixedTemp(r4));
+      new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
+                                         FixedTemp(r4));
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegisterAtStart(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
 LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
   LOperand* function = UseFixed(instr->function(), r1);
   LOperand* receiver = UseFixed(instr->receiver(), r0);
   LOperand* length = UseFixed(instr->length(), r2);
   LOperand* elements = UseFixed(instr->elements(), r3);
-  LApplyArguments* result = new LApplyArguments(function,
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
                                                 receiver,
                                                 length,
                                                 elements);
@@ -1095,63 +1122,75 @@
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
   ++argument_count_;
   LOperand* argument = Use(instr->argument());
-  return new LPushArgument(argument);
+  return new(zone()) LPushArgument(argument);
 }
 
 
 LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
 }
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
 }
 
 
 LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LOuterContext(context));
+  return DefineAsRegister(new(zone()) LOuterContext(context));
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  return MarkAsCall(new(zone()) LDeclareGlobals, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LGlobalObject(context));
+  return DefineAsRegister(new(zone()) LGlobalObject(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
   LOperand* global_object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LGlobalReceiver(global_object));
+  return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
 }
 
 
 LInstruction* LChunkBuilder::DoCallConstantFunction(
     HCallConstantFunction* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallConstantFunction, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
   LOperand* function = UseFixed(instr->function(), r1);
   argument_count_ -= instr->argument_count();
-  LInvokeFunction* result = new LInvokeFunction(function);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(function);
   return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
 
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
-  if (op == kMathLog || op == kMathSin || op == kMathCos) {
+  if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
     LOperand* input = UseFixedDouble(instr->value(), d2);
-    LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
     return MarkAsCall(DefineFixedDouble(result, d2), instr);
+  } else if (op == kMathPowHalf) {
+    LOperand* input = UseFixedDouble(instr->value(), d2);
+    LOperand* temp = FixedTemp(d3);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
+    return DefineFixedDouble(result, d2);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
     LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
-    LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
     switch (op) {
       case kMathAbs:
         return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
@@ -1161,8 +1200,6 @@
         return DefineAsRegister(result);
       case kMathRound:
         return AssignEnvironment(DefineAsRegister(result));
-      case kMathPowHalf:
-        return DefineAsRegister(result);
       default:
         UNREACHABLE();
         return NULL;
@@ -1175,45 +1212,47 @@
   ASSERT(instr->key()->representation().IsTagged());
   argument_count_ -= instr->argument_count();
   LOperand* key = UseFixed(instr->key(), r2);
-  return MarkAsCall(DefineFixed(new LCallKeyed(key), r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallNamed, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallGlobal, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallKnownGlobal, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
   LOperand* constructor = UseFixed(instr->constructor(), r1);
   argument_count_ -= instr->argument_count();
-  LCallNew* result = new LCallNew(constructor);
+  LCallNew* result = new(zone()) LCallNew(constructor);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), r1);
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallFunction, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
+                    instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallRuntime, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
 }
 
 
@@ -1232,25 +1271,32 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
-  return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineAsRegister(new(zone()) LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), r1);
+    LOperand* right = UseFixed(instr->right(), r0);
+    LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
+    return MarkAsCall(DefineFixed(result, r0), instr);
+  }
 }
 
 
 LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
-  return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
-  return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
-  return DoBit(Token::BIT_XOR, instr);
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LBitNotI(value));
 }
 
 
@@ -1266,7 +1312,7 @@
     LOperand* dividend = UseFixed(instr->left(), r0);
     LOperand* divisor = UseFixed(instr->right(), r1);
     return AssignEnvironment(AssignPointerMap(
-             DefineFixed(new LDivI(dividend, divisor), r0)));
+             DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
   } else {
     return DoArithmeticT(Token::DIV, instr);
   }
@@ -1282,15 +1328,15 @@
     if (instr->HasPowerOf2Divisor()) {
       ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
       LOperand* value = UseRegisterAtStart(instr->left());
-      mod = new LModI(value, UseOrConstant(instr->right()));
+      mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
     } else {
       LOperand* dividend = UseRegister(instr->left());
       LOperand* divisor = UseRegister(instr->right());
-      mod = new LModI(dividend,
-                      divisor,
-                      TempRegister(),
-                      FixedTemp(d10),
-                      FixedTemp(d11));
+      mod = new(zone()) LModI(dividend,
+                              divisor,
+                              TempRegister(),
+                              FixedTemp(d10),
+                              FixedTemp(d11));
     }
 
     if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
@@ -1308,7 +1354,7 @@
     // TODO(fschneider): Allow any register as input registers.
     LOperand* left = UseFixedDouble(instr->left(), d1);
     LOperand* right = UseFixedDouble(instr->right(), d2);
-    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
     return MarkAsCall(DefineFixedDouble(result, d1), instr);
   }
 }
@@ -1329,7 +1375,12 @@
     } else {
       left = UseRegisterAtStart(instr->LeastConstantOperand());
     }
-    return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+    LMulI* mul = new(zone()) LMulI(left, right, temp);
+    if (instr->CheckFlag(HValue::kCanOverflow) ||
+        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineAsRegister(mul);
 
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MUL, instr);
@@ -1346,7 +1397,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    LSubI* sub = new LSubI(left, right);
+    LSubI* sub = new(zone()) LSubI(left, right);
     LInstruction* result = DefineAsRegister(sub);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1366,7 +1417,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    LAddI* add = new LAddI(left, right);
+    LAddI* add = new(zone()) LAddI(left, right);
     LInstruction* result = DefineAsRegister(add);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1390,22 +1441,29 @@
   LOperand* left = UseFixedDouble(instr->left(), d1);
   LOperand* right = exponent_type.IsDouble() ?
       UseFixedDouble(instr->right(), d2) :
-      UseFixed(instr->right(), r0);
-  LPower* result = new LPower(left, right);
+      UseFixed(instr->right(), r2);
+  LPower* result = new(zone()) LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, d3),
                     instr,
                     CAN_DEOPTIMIZE_EAGERLY);
 }
 
 
+LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->global_object()->representation().IsTagged());
+  LOperand* global_object = UseFixed(instr->global_object(), r0);
+  LRandom* result = new(zone()) LRandom(global_object);
+  return MarkAsCall(DefineFixedDouble(result, d7), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  Token::Value op = instr->token();
   ASSERT(instr->left()->representation().IsTagged());
   ASSERT(instr->right()->representation().IsTagged());
-  bool reversed = (op == Token::GT || op == Token::LTE);
-  LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
-  LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
-  LCmpT* result = new LCmpT(left, right);
+  LOperand* left = UseFixed(instr->left(), r1);
+  LOperand* right = UseFixed(instr->right(), r0);
+  LCmpT* result = new(zone()) LCmpT(left, right);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -1416,16 +1474,16 @@
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterAtStart(instr->left());
-    LOperand* right = UseRegisterAtStart(instr->right());
-    return new LCmpIDAndBranch(left, right);
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new(zone()) LCmpIDAndBranch(left, right);
   } else {
     ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterAtStart(instr->right());
-    return new LCmpIDAndBranch(left, right);
+    return new(zone()) LCmpIDAndBranch(left, right);
   }
 }
 
@@ -1434,47 +1492,70 @@
     HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  return new LCmpObjectEqAndBranch(left, right);
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
 }
 
 
 LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
     HCompareConstantEqAndBranch* instr) {
-  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpConstantEqAndBranch(value);
 }
 
 
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
+  return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
-  return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
+  return new(zone()) LIsObjectAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(value, temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsSmiAndBranch(Use(instr->value()));
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
-                                      TempRegister());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), r1);
+  LOperand* right = UseFixed(instr->right(), r0);
+  LStringCompareAndBranch* result =
+      new(zone()) LStringCompareAndBranch(left, right);
+  return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasInstanceTypeAndBranch(value);
 }
 
 
@@ -1483,14 +1564,14 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
-  return DefineAsRegister(new LGetCachedArrayIndex(value));
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
 }
 
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LHasCachedArrayIndexAndBranch(
+  return new(zone()) LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()));
 }
 
@@ -1498,40 +1579,48 @@
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
-                                   TempRegister());
+  LOperand* value = UseRegister(instr->value());
+  return new(zone()) LClassOfTestAndBranch(value, TempRegister());
 }
 
 
 LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LJSArrayLength(array));
+  return DefineAsRegister(new(zone()) LJSArrayLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
     HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LFixedArrayBaseLength(array));
+  return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
   LOperand* object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LElementsKind(object));
+  return DefineAsRegister(new(zone()) LElementsKind(object));
 }
 
 
 LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
   LOperand* object = UseRegister(instr->value());
-  LValueOf* result = new LValueOf(object, TempRegister());
-  return AssignEnvironment(DefineAsRegister(result));
+  LValueOf* result = new(zone()) LValueOf(object, TempRegister());
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+  LOperand* object = UseFixed(instr->value(), r0);
+  LDateField* result = new LDateField(object, FixedTemp(r1), instr->index());
+  return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
-                                            UseRegister(instr->length())));
+  LOperand* value = UseRegisterAtStart(instr->index());
+  LOperand* length = UseRegister(instr->length());
+  return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
 }
 
 
@@ -1544,7 +1633,7 @@
 
 LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
   LOperand* value = UseFixed(instr->value(), r0);
-  return MarkAsCall(new LThrow(value), instr);
+  return MarkAsCall(new(zone()) LThrow(value), instr);
 }
 
 
@@ -1567,22 +1656,25 @@
   if (from.IsTagged()) {
     if (to.IsDouble()) {
       LOperand* value = UseRegister(instr->value());
-      LNumberUntagD* res = new LNumberUntagD(value);
+      LNumberUntagD* res = new(zone()) LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
     } else {
       ASSERT(to.IsInteger32());
-      LOperand* value = UseRegister(instr->value());
+      LOperand* value = UseRegisterAtStart(instr->value());
       bool needs_check = !instr->value()->type().IsSmi();
       LInstruction* res = NULL;
       if (!needs_check) {
-        res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
+        res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
       } else {
         LOperand* temp1 = TempRegister();
         LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
                                                       : NULL;
         LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11)
                                                       : NULL;
-        res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
+        res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
+                                                       temp1,
+                                                       temp2,
+                                                       temp3));
         res = AssignEnvironment(res);
       }
       return res;
@@ -1596,32 +1688,31 @@
       // Make sure that the temp and result_temp registers are
       // different.
       LUnallocated* result_temp = TempRegister();
-      LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
       Define(result, result_temp);
       return AssignPointerMap(result);
     } else {
       ASSERT(to.IsInteger32());
       LOperand* value = UseRegister(instr->value());
-      LDoubleToI* res =
-        new LDoubleToI(value,
-                       TempRegister(),
-                       instr->CanTruncateToInt32() ? TempRegister() : NULL);
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
+      LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
       return AssignEnvironment(DefineAsRegister(res));
     }
   } else if (from.IsInteger32()) {
     if (to.IsTagged()) {
       HValue* val = instr->value();
-      LOperand* value = UseRegister(val);
+      LOperand* value = UseRegisterAtStart(val);
       if (val->HasRange() && val->range()->IsInSmiRange()) {
-        return DefineSameAsFirst(new LSmiTag(value));
+        return DefineAsRegister(new(zone()) LSmiTag(value));
       } else {
-        LNumberTagI* result = new LNumberTagI(value);
-        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+        LNumberTagI* result = new(zone()) LNumberTagI(value);
+        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
       }
     } else {
       ASSERT(to.IsDouble());
       LOperand* value = Use(instr->value());
-      return DefineAsRegister(new LInteger32ToDouble(value));
+      return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
     }
   }
   UNREACHABLE();
@@ -1631,13 +1722,13 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckNonSmi(value));
+  return AssignEnvironment(new(zone()) LCheckNonSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LInstruction* result = new LCheckInstanceType(value);
+  LInstruction* result = new(zone()) LCheckInstanceType(value);
   return AssignEnvironment(result);
 }
 
@@ -1645,26 +1736,26 @@
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
-  LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
+  LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value));
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckFunction(value));
+  return AssignEnvironment(new(zone()) LCheckFunction(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LInstruction* result = new LCheckMap(value);
+  LInstruction* result = new(zone()) LCheckMap(value);
   return AssignEnvironment(result);
 }
 
@@ -1674,57 +1765,32 @@
   Representation input_rep = value->representation();
   LOperand* reg = UseRegister(value);
   if (input_rep.IsDouble()) {
-    return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d11)));
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(d11)));
   } else if (input_rep.IsInteger32()) {
-    return DefineAsRegister(new LClampIToUint8(reg));
+    return DefineAsRegister(new(zone()) LClampIToUint8(reg));
   } else {
     ASSERT(input_rep.IsTagged());
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve d1 explicitly.
-    LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d11));
+    LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
 
 
-LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
-  HValue* value = instr->value();
-  Representation input_rep = value->representation();
-  LOperand* reg = UseRegister(value);
-  if (input_rep.IsDouble()) {
-    LOperand* temp1 = TempRegister();
-    LOperand* temp2 = TempRegister();
-    LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
-    return AssignEnvironment(DefineAsRegister(res));
-  } else if (input_rep.IsInteger32()) {
-    // Canonicalization should already have removed the hydrogen instruction in
-    // this case, since it is a noop.
-    UNREACHABLE();
-    return NULL;
-  } else {
-    ASSERT(input_rep.IsTagged());
-    LOperand* temp1 = TempRegister();
-    LOperand* temp2 = TempRegister();
-    LOperand* temp3 = FixedTemp(d11);
-    LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
-    return AssignEnvironment(DefineSameAsFirst(res));
-  }
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  return new LReturn(UseFixed(instr->value(), r0));
+  return new(zone()) LReturn(UseFixed(instr->value(), r0));
 }
 
 
 LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
   Representation r = instr->representation();
   if (r.IsInteger32()) {
-    return DefineAsRegister(new LConstantI);
+    return DefineAsRegister(new(zone()) LConstantI);
   } else if (r.IsDouble()) {
-    return DefineAsRegister(new LConstantD);
+    return DefineAsRegister(new(zone()) LConstantD);
   } else if (r.IsTagged()) {
-    return DefineAsRegister(new LConstantT);
+    return DefineAsRegister(new(zone()) LConstantT);
   } else {
     UNREACHABLE();
     return NULL;
@@ -1733,8 +1799,8 @@
 
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
-  LLoadGlobalCell* result = new LLoadGlobalCell;
-  return instr->check_hole_value()
+  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+  return instr->RequiresHoleCheck()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1742,20 +1808,18 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object = UseFixed(instr->global_object(), r0);
-  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
-  if (instr->check_hole_value()) {
-    LOperand* temp = TempRegister();
-    LOperand* value = UseRegister(instr->value());
-    return AssignEnvironment(new LStoreGlobalCell(value, temp));
-  } else {
-    LOperand* value = UseRegisterAtStart(instr->value());
-    return new LStoreGlobalCell(value, NULL);
-  }
+  LOperand* value = UseRegister(instr->value());
+  // Use a temp to check the value in the cell in the case where we perform
+  // a hole check.
+  return instr->RequiresHoleCheck()
+      ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
+      : new(zone()) LStoreGlobalCell(value, NULL);
 }
 
 
@@ -1763,14 +1827,16 @@
   LOperand* global_object = UseFixed(instr->global_object(), r1);
   LOperand* value = UseFixed(instr->value(), r0);
   LStoreGlobalGeneric* result =
-      new LStoreGlobalGeneric(global_object, value);
+      new(zone()) LStoreGlobalGeneric(global_object, value);
   return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadContextSlot(context));
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1784,13 +1850,14 @@
     context = UseRegister(instr->context());
     value = UseRegister(instr->value());
   }
-  return new LStoreContextSlot(context, value);
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value);
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   return DefineAsRegister(
-      new LLoadNamedField(UseRegisterAtStart(instr->object())));
+      new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
 }
 
 
@@ -1799,11 +1866,13 @@
   ASSERT(instr->representation().IsTagged());
   if (instr->need_generic()) {
     LOperand* obj = UseFixed(instr->object(), r0);
-    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+    LLoadNamedFieldPolymorphic* result =
+        new(zone()) LLoadNamedFieldPolymorphic(obj);
     return MarkAsCall(DefineFixed(result, r0), instr);
   } else {
     LOperand* obj = UseRegisterAtStart(instr->object());
-    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+    LLoadNamedFieldPolymorphic* result =
+        new(zone()) LLoadNamedFieldPolymorphic(obj);
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
@@ -1811,7 +1880,7 @@
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object = UseFixed(instr->object(), r0);
-  LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), r0);
+  LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
   return MarkAsCall(result, instr);
 }
 
@@ -1819,20 +1888,20 @@
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
-      new LLoadFunctionPrototype(UseRegister(instr->function()))));
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadElements(input));
+  return DefineAsRegister(new(zone()) LLoadElements(input));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
     HLoadExternalArrayPointer* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadExternalArrayPointer(input));
+  return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
 }
 
 
@@ -1842,8 +1911,9 @@
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterAtStart(instr->key());
-  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
-  return AssignEnvironment(DefineAsRegister(result));
+  LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+  if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+  return DefineAsRegister(result);
 }
 
 
@@ -1854,7 +1924,7 @@
   LOperand* elements = UseTempRegister(instr->elements());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LLoadKeyedFastDoubleElement* result =
-      new LLoadKeyedFastDoubleElement(elements, key);
+      new(zone()) LLoadKeyedFastDoubleElement(elements, key);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
@@ -1862,19 +1932,18 @@
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
   ElementsKind elements_kind = instr->elements_kind();
-  Representation representation(instr->representation());
   ASSERT(
-      (representation.IsInteger32() &&
+      (instr->representation().IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (representation.IsDouble() &&
+      (instr->representation().IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
   LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
   // has an environment.
@@ -1888,7 +1957,7 @@
   LOperand* key = UseFixed(instr->key(), r0);
 
   LInstruction* result =
-      DefineFixed(new LLoadKeyedGeneric(object, key), r0);
+      DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
   return MarkAsCall(result, instr);
 }
 
@@ -1907,8 +1976,7 @@
   LOperand* key = needs_write_barrier
       ? UseTempRegister(instr->key())
       : UseRegisterOrConstantAtStart(instr->key());
-
-  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+  return new(zone()) LStoreKeyedFastElement(obj, key, val);
 }
 
 
@@ -1922,19 +1990,18 @@
   LOperand* val = UseTempRegister(instr->value());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
 
-  return new LStoreKeyedFastDoubleElement(elements, key, val);
+  return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
-  Representation representation(instr->value()->representation());
   ElementsKind elements_kind = instr->elements_kind();
   ASSERT(
-      (representation.IsInteger32() &&
+      (instr->value()->representation().IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (representation.IsDouble() &&
+      (instr->value()->representation().IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -1949,9 +2016,9 @@
       : UseRegister(instr->value());
   LOperand* key = UseRegisterOrConstant(instr->key());
 
-  return new LStoreKeyedSpecializedArrayElement(external_pointer,
-                                                key,
-                                                val);
+  return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                        key,
+                                                        val);
 }
 
 
@@ -1964,7 +2031,29 @@
   ASSERT(instr->key()->representation().IsTagged());
   ASSERT(instr->value()->representation().IsTagged());
 
-  return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+  return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
+    return DefineSameAsFirst(result);
+  } else {
+    LOperand* object = UseFixed(instr->object(), r0);
+    LOperand* fixed_object_reg = FixedTemp(r2);
+    LOperand* new_map_reg = FixedTemp(r3);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object,
+                                            new_map_reg,
+                                            fixed_object_reg);
+    return MarkAsCall(DefineFixed(result, r0), instr);
+  }
 }
 
 
@@ -1979,7 +2068,7 @@
       ? UseTempRegister(instr->value())
       : UseRegister(instr->value());
 
-  return new LStoreNamedField(obj, val);
+  return new(zone()) LStoreNamedField(obj, val);
 }
 
 
@@ -1987,7 +2076,7 @@
   LOperand* obj = UseFixed(instr->object(), r1);
   LOperand* val = UseFixed(instr->value(), r0);
 
-  LInstruction* result = new LStoreNamedGeneric(obj, val);
+  LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
   return MarkAsCall(result, instr);
 }
 
@@ -1995,55 +2084,67 @@
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  return MarkAsCall(DefineFixed(new LStringAdd(left, right), r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
+                    instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseTempRegister(instr->string());
   LOperand* index = UseTempRegister(instr->index());
-  LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+  LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
 
 
 LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
   LOperand* char_code = UseRegister(instr->value());
-  LStringCharFromCode* result = new LStringCharFromCode(char_code);
+  LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
   return AssignPointerMap(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
   LOperand* string = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LStringLength(string));
+  return DefineAsRegister(new(zone()) LStringLength(string));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+  LAllocateObject* result = new LAllocateObject(TempRegister(), TempRegister());
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
+  return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LRegExpLiteral, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LFunctionLiteral, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
   LOperand* object = UseFixed(instr->object(), r0);
   LOperand* key = UseFixed(instr->key(), r1);
-  LDeleteProperty* result = new LDeleteProperty(object, key);
+  LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -2051,13 +2152,13 @@
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
-  return AssignEnvironment(new LOsrEntry);
+  return AssignEnvironment(new(zone()) LOsrEntry);
 }
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new LParameter, spill_index);
+  return DefineAsSpilled(new(zone()) LParameter, spill_index);
 }
 
 
@@ -2067,13 +2168,13 @@
     Abort("Too many spill slots needed for OSR");
     spill_index = 0;
   }
-  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallStub, r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
 }
 
 
@@ -2090,32 +2191,33 @@
   LOperand* arguments = UseRegister(instr->arguments());
   LOperand* length = UseTempRegister(instr->length());
   LOperand* index = UseRegister(instr->index());
-  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+  LAccessArgumentsAt* result =
+      new(zone()) LAccessArgumentsAt(arguments, length, index);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
   LOperand* object = UseFixed(instr->value(), r0);
-  LToFastProperties* result = new LToFastProperties(object);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
-  LTypeof* result = new LTypeof(UseFixed(instr->value(), r0));
+  LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
-  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
     HIsConstructCallAndBranch* instr) {
-  return new LIsConstructCallAndBranch(TempRegister());
+  return new(zone()) LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2138,7 +2240,7 @@
   // If there is an instruction pending deoptimization environment create a
   // lazy bailout instruction to capture the environment.
   if (pending_deoptimization_ast_id_ == instr->ast_id()) {
-    LInstruction* result = new LLazyBailout;
+    LInstruction* result = new(zone()) LLazyBailout;
     result = AssignEnvironment(result);
     instruction_pending_deoptimization_environment_->
         set_deoptimization_environment(result->environment());
@@ -2152,10 +2254,10 @@
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
   if (instr->is_function_entry()) {
-    return MarkAsCall(new LStackCheck, instr);
+    return MarkAsCall(new(zone()) LStackCheck, instr);
   } else {
     ASSERT(instr->is_backwards_branch());
-    return AssignEnvironment(AssignPointerMap(new LStackCheck));
+    return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
   }
 }
 
@@ -2164,9 +2266,14 @@
   HEnvironment* outer = current_block_->last_environment();
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
                                                instr->function(),
                                                undefined,
-                                               instr->call_kind());
+                                               instr->call_kind(),
+                                               instr->is_construct());
+  if (instr->arguments() != NULL) {
+    inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+  }
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2174,7 +2281,8 @@
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
-  HEnvironment* outer = current_block_->last_environment()->outer();
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
   current_block_->UpdateEnvironment(outer);
   return NULL;
 }
@@ -2183,9 +2291,37 @@
 LInstruction* LChunkBuilder::DoIn(HIn* instr) {
   LOperand* key = UseRegisterAtStart(instr->key());
   LOperand* object = UseRegisterAtStart(instr->object());
-  LIn* result = new LIn(key, object);
+  LIn* result = new(zone()) LIn(key, object);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* object = UseFixed(instr->enumerable(), r0);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+  return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseRegister(instr->index());
+  return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 8c18760..62cde6e 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,6 +49,7 @@
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
+  V(AllocateObject)                             \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
   V(ArgumentsLength)                            \
@@ -87,11 +88,13 @@
   V(ConstantI)                                  \
   V(ConstantT)                                  \
   V(Context)                                    \
+  V(DeclareGlobals)                             \
   V(DeleteProperty)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
   V(ElementsKind)                               \
+  V(FastLiteral)                                \
   V(FixedArrayBaseLength)                       \
   V(FunctionLiteral)                            \
   V(GetCachedArrayIndex)                        \
@@ -107,10 +110,12 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNullAndBranch)                            \
+  V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -138,6 +143,7 @@
   V(Parameter)                                  \
   V(Power)                                      \
   V(PushArgument)                               \
+  V(Random)                                     \
   V(RegExpLiteral)                              \
   V(Return)                                     \
   V(ShiftI)                                     \
@@ -162,11 +168,18 @@
   V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
+  V(TransitionElementsKind)                     \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
-  V(ValueOf)
+  V(ValueOf)                                    \
+  V(ForInPrepareMap)                            \
+  V(ForInCacheArray)                            \
+  V(CheckMapValue)                              \
+  V(LoadFieldByIndex)                           \
+  V(DateField)                                  \
+  V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -456,6 +469,20 @@
 };
 
 
+class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
 class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
@@ -627,16 +654,17 @@
 };
 
 
-class LIsNullAndBranch: public LControlInstruction<1, 0> {
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
  public:
-  explicit LIsNullAndBranch(LOperand* value) {
+  explicit LIsNilAndBranch(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
 
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -656,6 +684,20 @@
 };
 
 
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -684,6 +726,23 @@
 };
 
 
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -794,18 +853,15 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
+  LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return hydrogen()->op(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
-  Token::Value op_;
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
 };
 
 
@@ -949,6 +1005,41 @@
 };
 
 
+class LDateField: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
+    inputs_[0] = date;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+  Smi* index() const { return index_; }
+
+ private:
+  Smi* index_;
+};
+
+
+class LSetDateField: public LTemplateInstruction<1, 2, 1> {
+ public:
+  LSetDateField(LOperand* date, LOperand* value, LOperand* temp, int index)
+      : index_(index) {
+    inputs_[0] = date;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DateField, "date-set-field")
+  DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+  int index() const { return index_; }
+
+ private:
+  int index_;
+};
+
+
 class LThrow: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LThrow(LOperand* value) {
@@ -993,6 +1084,17 @@
 };
 
 
+class LRandom: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LRandom(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Random, "random")
+  DECLARE_HYDROGEN_ACCESSOR(Random)
+};
+
+
 class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1209,6 +1311,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+
+  LOperand* value() { return inputs_[0]; }
 };
 
 
@@ -1226,7 +1330,7 @@
   LOperand* global_object() { return InputAt(0); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(1); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1259,7 +1363,6 @@
   LOperand* context() { return InputAt(0); }
   LOperand* value() { return InputAt(1); }
   int slot_index() { return hydrogen()->slot_index(); }
-  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1276,7 +1379,9 @@
 
 
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
@@ -1298,6 +1403,13 @@
 };
 
 
+class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
 class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LGlobalObject(LOperand* context) {
@@ -1379,12 +1491,17 @@
 };
 
 
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LCallFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
   DECLARE_HYDROGEN_ACCESSOR(CallFunction)
 
-  int arity() const { return hydrogen()->argument_count() - 2; }
+  LOperand* function() { return inputs_[0]; }
+  int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
@@ -1560,7 +1677,6 @@
   Handle<Object> name() const { return hydrogen()->name(); }
   bool is_in_object() { return hydrogen()->is_in_object(); }
   int offset() { return hydrogen()->offset(); }
-  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
   Handle<Map> transition() const { return hydrogen()->transition(); }
 };
 
@@ -1580,7 +1696,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1642,7 +1758,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
@@ -1668,6 +1784,30 @@
 };
 
 
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* new_map_temp,
+                          LOperand* temp_reg) {
+    inputs_[0] = object;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp_reg;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_reg() { return temps_[0]; }
+  LOperand* temp_reg() { return temps_[1]; }
+  Handle<Map> original_map() { return hydrogen()->original_map(); }
+  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+};
+
+
 class LStringAdd: public LTemplateInstruction<1, 2, 0> {
  public:
   LStringAdd(LOperand* left, LOperand* right) {
@@ -1731,6 +1871,8 @@
     inputs_[0] = value;
   }
 
+  LOperand* value() { return InputAt(0); }
+
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
@@ -1831,6 +1973,25 @@
 };
 
 
+class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
+ public:
+  LAllocateObject(LOperand* temp1, LOperand* temp2) {
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+};
+
+
+class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
+};
+
+
 class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@@ -1971,6 +2132,62 @@
 };
 
 
+class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInPrepareMap(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
@@ -2038,6 +2255,7 @@
       : chunk_(NULL),
         info_(info),
         graph_(graph),
+        zone_(graph->isolate()->zone()),
         status_(UNUSED),
         current_instruction_(NULL),
         current_block_(NULL),
@@ -2067,6 +2285,7 @@
   LChunk* chunk() const { return chunk_; }
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
+  Zone* zone() const { return zone_; }
 
   bool is_unused() const { return status_ == UNUSED; }
   bool is_building() const { return status_ == BUILDING; }
@@ -2076,7 +2295,6 @@
   void Abort(const char* format, ...);
 
   // Methods for getting operands for Use / Define / Temp.
-  LRegister* ToOperand(Register reg);
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(DoubleRegister reg);
 
@@ -2127,8 +2345,6 @@
       LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
                            LUnallocated* result);
   template<int I, int T>
-      LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
-  template<int I, int T>
       LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
   template<int I, int T>
       LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
@@ -2159,12 +2375,12 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
 
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
@@ -2174,6 +2390,7 @@
   LChunk* chunk_;
   CompilationInfo* info_;
   HGraph* const graph_;
+  Zone* zone_;
   Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 4a201ab..82b80a2 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -62,11 +62,19 @@
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
-  HPhase phase("Code generation", chunk());
+  HPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
   CpuFeatures::Scope scope1(VFP3);
   CpuFeatures::Scope scope2(ARMv7);
+
+  CodeStub::GenerateFPStubs();
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -135,7 +143,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). r5 is zero for method calls and non-zero for
   // function calls.
-  if (info_->is_strict_mode() || info_->is_native()) {
+  if (!info_->is_classic_mode() || info_->is_native()) {
     Label ok;
     __ cmp(r5, Operand(0));
     __ b(eq, &ok);
@@ -190,13 +198,11 @@
         // Load parameter from stack.
         __ ldr(r0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        __ mov(r1, Operand(Context::SlotOffset(var->index())));
-        __ str(r0, MemOperand(cp, r1));
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use two more registers to avoid
-        // clobbering cp.
-        __ mov(r2, Operand(cp));
-        __ RecordWrite(r2, Operand(r1), r3, r0);
+        MemOperand target = ContextOperand(cp, var->index());
+        __ str(r0, target);
+        // Update the write barrier. This clobbers r3 and r0.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
       }
     }
     Comment(";;; End allocate local context");
@@ -238,6 +244,9 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -253,7 +262,7 @@
 
 bool LCodeGen::GenerateDeoptJumpTable() {
   // Check that the jump table is accessible from everywhere in the function
-  // code, ie that offsets to the table can be encoded in the 24bit signed
+  // code, i.e. that offsets to the table can be encoded in the 24bit signed
   // immediate of a branch instruction.
   // To simplify we consider the code size from the first instruction to the
   // end of the jump table. We also don't consider the pc load delta.
@@ -312,7 +321,22 @@
   if (op->IsRegister()) {
     return ToRegister(op->index());
   } else if (op->IsConstantOperand()) {
-    __ mov(scratch, ToOperand(op));
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
+    } else if (r.IsDouble()) {
+      Abort("EmitLoadRegister: Unsupported double immediate.");
+    } else {
+      ASSERT(r.IsTagged());
+      if (literal->IsSmi()) {
+        __ mov(scratch, Operand(literal));
+      } else {
+       __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
+      }
+    }
     return scratch;
   } else if (op->IsStackSlot() || op->IsArgument()) {
     __ ldr(scratch, ToMemOperand(op));
@@ -361,6 +385,18 @@
 }
 
 
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  Handle<Object> literal = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+  return literal;
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+}
+
+
 int LCodeGen::ToInteger32(LConstantOperand* op) const {
   Handle<Object> value = chunk_->LookupLiteral(op);
   ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
@@ -370,6 +406,12 @@
 }
 
 
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  return value->Number();
+}
+
+
 Operand LCodeGen::ToOperand(LOperand* op) {
   if (op->IsConstantOperand()) {
     LConstantOperand* const_op = LConstantOperand::cast(op);
@@ -437,7 +479,19 @@
 
   WriteTranslation(environment->outer(), translation);
   int closure_id = DefineDeoptimizationLiteral(environment->closure());
-  translation->BeginFrame(environment->ast_id(), closure_id, height);
+  switch (environment->frame_type()) {
+    case JS_FUNCTION:
+      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+      break;
+    case JS_CONSTRUCT:
+      translation->BeginConstructStubFrame(closure_id, translation_size);
+      break;
+    case ARGUMENTS_ADAPTOR:
+      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+      break;
+    default:
+      UNREACHABLE();
+  }
   for (int i = 0; i < translation_size; ++i) {
     LOperand* value = environment->values()->at(i);
     // spilled_registers_ and spilled_double_registers_ are either
@@ -570,10 +624,14 @@
     // |>------------  translation_size ------------<|
 
     int frame_count = 0;
+    int jsframe_count = 0;
     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
       ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
     }
-    Translation translation(&translations_, frame_count);
+    Translation translation(&translations_, frame_count, jsframe_count);
     WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
     int pc_offset = masm()->pc_offset();
@@ -590,7 +648,6 @@
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
-  ASSERT(entry != NULL);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -623,7 +680,6 @@
 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
-  ASSERT(FLAG_deopt);
   Handle<DeoptimizationInputData> data =
       factory()->NewDeoptimizationInputData(length, TENURED);
 
@@ -699,7 +755,7 @@
     Safepoint::DeoptMode deopt_mode) {
   ASSERT(expected_safepoint_kind_ == kind);
 
-  const ZoneList<LOperand*>* operands = pointers->operands();
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deopt_mode);
   for (int i = 0; i < operands->length(); i++) {
@@ -986,6 +1042,7 @@
     virtual void Generate() {
       codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LDivI* instr_;
   };
@@ -1321,8 +1378,13 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  ASSERT(instr->result()->IsRegister());
-  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+  Handle<Object> value = instr->value();
+  if (value->IsSmi()) {
+    __ mov(ToRegister(instr->result()), Operand(value));
+  } else {
+    __ LoadHeapObject(ToRegister(instr->result()),
+                      Handle<HeapObject>::cast(value));
+  }
 }
 
 
@@ -1375,6 +1437,46 @@
 }
 
 
+void LCodeGen::DoDateField(LDateField* instr) {
+  Register object = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->TempAt(0));
+  Smi* index = instr->index();
+  Label runtime, done;
+  ASSERT(object.is(result));
+  ASSERT(object.is(r0));
+  ASSERT(!scratch.is(scratch0()));
+  ASSERT(!scratch.is(object));
+
+#ifdef DEBUG
+  __ AbortIfSmi(object);
+  __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
+  __ Assert(eq, "Trying to get date field from non-date.");
+#endif
+
+  if (index->value() == 0) {
+    __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ mov(scratch, Operand(stamp));
+      __ ldr(scratch, MemOperand(scratch));
+      __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
+      __ cmp(scratch, scratch0());
+      __ b(ne, &runtime);
+      __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+                                             kPointerSize * index->value()));
+      __ jmp(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2, scratch);
+    __ mov(r1, Operand(index));
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ bind(&done);
+  }
+}
+
+
 void LCodeGen::DoBitNotI(LBitNotI* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
@@ -1649,30 +1751,44 @@
 }
 
 
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
-  __ cmp(ToRegister(left), ToRegister(right));
-}
-
-
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
+  Condition cond = TokenToCondition(instr->op(), false);
 
-  if (instr->is_double()) {
-    // Compare left and right as doubles and load the
-    // resulting flags into the normal status register.
-    __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
-    // If a NaN is involved, i.e. the result is unordered (V set),
-    // jump to false block label.
-    __ b(vs, chunk_->GetAssemblyLabel(false_block));
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block =
+      EvalComparison(instr->op(), left_val, right_val) ? true_block
+                                                       : false_block;
+    EmitGoto(next_block);
   } else {
-    EmitCmpI(left, right);
+    if (instr->is_double()) {
+      // Compare left and right operands as doubles and load the
+      // resulting flags into the normal status register.
+      __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
+      // If a NaN is involved, i.e. the result is unordered (V set),
+      // jump to false block label.
+      __ b(vs, chunk_->GetAssemblyLabel(false_block));
+    } else {
+      if (right->IsConstantOperand()) {
+        __ cmp(ToRegister(left),
+               Operand(ToInteger32(LConstantOperand::cast(right))));
+      } else if (left->IsConstantOperand()) {
+        __ cmp(ToRegister(right),
+               Operand(ToInteger32(LConstantOperand::cast(left))));
+        // We transposed the operands. Reverse the condition.
+        cond = ReverseCondition(cond);
+      } else {
+        __ cmp(ToRegister(left), ToRegister(right));
+      }
+    }
+    EmitBranch(true_block, false_block, cond);
   }
-
-  Condition cc = TokenToCondition(instr->op(), instr->is_double());
-  EmitBranch(true_block, false_block, cc);
 }
 
 
@@ -1697,25 +1813,35 @@
 }
 
 
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
   Register scratch = scratch0();
   Register reg = ToRegister(instr->InputAt(0));
-
-  // TODO(fsc): If the expression is known to be a smi, then it's
-  // definitely not null. Jump to the false block.
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
+  if (instr->hydrogen()->representation().IsSpecialization() ||
+      instr->hydrogen()->type().IsSmi()) {
+    EmitGoto(false_block);
+    return;
+  }
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ LoadRoot(ip, nil_value);
   __ cmp(reg, ip);
-  if (instr->is_strict()) {
+  if (instr->kind() == kStrictEquality) {
     EmitBranch(true_block, false_block, eq);
   } else {
+    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ b(eq, true_label);
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(ip, other_nil_value);
     __ cmp(reg, ip);
     __ b(eq, true_label);
     __ JumpIfSmi(reg, false_label);
@@ -1772,6 +1898,31 @@
 }
 
 
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp1 = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond =
+      EmitIsString(reg, temp1, false_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1797,6 +1948,41 @@
 }
 
 
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ cmp(r0, Operand(0));  // This instruction also signals no smi code inlined.
+
+  Condition condition = ComputeCompareCondition(op);
+
+  EmitBranch(true_block, false_block, condition);
+}
+
+
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1862,7 +2048,7 @@
 
 
 // Branches to a label or falls through with the answer in flags.  Trashes
-// the temp registers, but not the input.  Only input and temp2 may alias.
+// the temp registers, but not the input.
 void LCodeGen::EmitClassOfTest(Label* is_true,
                                Label* is_false,
                                Handle<String>class_name,
@@ -1870,30 +2056,40 @@
                                Register temp,
                                Register temp2) {
   ASSERT(!input.is(temp));
-  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
-  __ JumpIfSmi(input, is_false);
-  __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
-  __ b(lt, is_false);
+  ASSERT(!input.is(temp2));
+  ASSERT(!temp.is(temp2));
 
-  // Map is now in temp.
-  // Functions have class 'Function'.
-  __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+  __ JumpIfSmi(input, is_false);
+
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ b(ge, is_true);
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
+    __ b(lt, is_false);
+    __ b(eq, is_true);
+    __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+    __ b(eq, is_true);
   } else {
-    __ b(ge, is_false);
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+    __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                          FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ b(gt, is_false);
   }
 
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
   // Objects with a non-function constructor have class 'Object'.
   __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1970,9 +2166,8 @@
     virtual void Generate() {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-
+    virtual LInstruction* instr() { return instr_; }
     Label* map_check() { return &map_check_; }
-
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -2002,7 +2197,10 @@
   // We use Factory::the_hole_value() on purpose instead of loading from the
   // root array to force relocation to be able to later patch with
   // the cached map.
-  __ mov(ip, Operand(factory()->the_hole_value()));
+  Handle<JSGlobalPropertyCell> cell =
+      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+  __ mov(ip, Operand(Handle<Object>(cell)));
+  __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
   __ cmp(map, Operand(ip));
   __ b(ne, &cache_miss);
   // We use Factory::the_hole_value() on purpose instead of loading from the
@@ -2057,7 +2255,7 @@
   // offset to the location of the map check.
   Register temp = ToRegister(instr->TempAt(0));
   ASSERT(temp.is(r4));
-  __ mov(InstanceofStub::right(), Operand(instr->function()));
+  __ LoadHeapObject(InstanceofStub::right(), instr->function());
   static const int kAdditionalDelta = 4;
   int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
   Label before_push_delta;
@@ -2078,26 +2276,6 @@
 }
 
 
-static Condition ComputeCompareCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return eq;
-    case Token::LT:
-      return lt;
-    case Token::GT:
-      return gt;
-    case Token::LTE:
-      return le;
-    case Token::GTE:
-      return ge;
-    default:
-      UNREACHABLE();
-      return kNoCondition;
-  }
-}
-
-
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
@@ -2106,9 +2284,6 @@
   __ cmp(r0, Operand(0));  // This instruction also signals no smi code inlined.
 
   Condition condition = ComputeCompareCondition(op);
-  if (op == Token::GT || op == Token::LTE) {
-    condition = ReverseCondition(condition);
-  }
   __ LoadRoot(ToRegister(instr->result()),
               Heap::kTrueValueRootIndex,
               condition);
@@ -2137,7 +2312,7 @@
   Register result = ToRegister(instr->result());
   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
   __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
-  if (instr->hydrogen()->check_hole_value()) {
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
     __ cmp(result, ip);
     DeoptimizeIf(eq, instr->environment());
@@ -2158,27 +2333,27 @@
 
 
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
-  Register value = ToRegister(instr->InputAt(0));
-  Register scratch = scratch0();
+  Register value = ToRegister(instr->value());
+  Register cell = scratch0();
 
   // Load the cell.
-  __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
+  __ mov(cell, Operand(instr->hydrogen()->cell()));
 
   // If the cell we are storing to contains the hole it could have
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted.
-  if (instr->hydrogen()->check_hole_value()) {
-    Register scratch2 = ToRegister(instr->TempAt(0));
-    __ ldr(scratch2,
-           FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-    __ cmp(scratch2, ip);
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    // We use a temp to check the payload (CompareRoot might clobber ip).
+    Register payload = ToRegister(instr->TempAt(0));
+    __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+    __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
     DeoptimizeIf(eq, instr->environment());
   }
 
   // Store the value.
-  __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+  __ str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+  // Cells are always rescanned, so no write barrier here.
 }
 
 
@@ -2187,7 +2362,7 @@
   ASSERT(ToRegister(instr->value()).is(r0));
 
   __ mov(r2, Operand(instr->name()));
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2198,17 +2373,53 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ ldr(result, ContextOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, ip);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr->environment());
+    } else {
+      __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
+    }
+  }
 }
 
 
 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
-  __ str(value, ContextOperand(context, instr->slot_index()));
-  if (instr->needs_write_barrier()) {
-    int offset = Context::SlotOffset(instr->slot_index());
-    __ RecordWrite(context, Operand(offset), value, scratch0());
+  Register scratch = scratch0();
+  MemOperand target = ContextOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ ldr(scratch, target);
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(scratch, ip);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr->environment());
+    } else {
+      __ b(ne, &skip_assignment);
+    }
   }
+
+  __ str(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context,
+                              target.offset(),
+                              value,
+                              scratch,
+                              kLRHasBeenSaved,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
+  }
+
+  __ bind(&skip_assignment);
 }
 
 
@@ -2228,9 +2439,9 @@
                                                Register object,
                                                Handle<Map> type,
                                                Handle<String> name) {
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsProperty() &&
+  ASSERT(lookup.IsFound() &&
          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
   if (lookup.type() == FIELD) {
     int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2246,7 +2457,7 @@
     }
   } else {
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    LoadHeapObject(result, Handle<HeapObject>::cast(function));
+    __ LoadHeapObject(result, function);
   }
 }
 
@@ -2457,13 +2668,9 @@
            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
   }
 
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    // TODO(danno): If no hole check is required, there is no need to allocate
-    // elements into a temporary register, instead scratch can be used.
-    __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
-    __ cmp(scratch, Operand(kHoleNanUpper32));
-    DeoptimizeIf(eq, instr->environment());
-  }
+  __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+  __ cmp(scratch, Operand(kHoleNanUpper32));
+  DeoptimizeIf(eq, instr->environment());
 
   __ vldr(result, elements, 0);
 }
@@ -2534,6 +2741,7 @@
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
@@ -2591,15 +2799,10 @@
 }
 
 
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   Register receiver = ToRegister(instr->receiver());
   Register function = ToRegister(instr->function());
-  Register length = ToRegister(instr->length());
-  Register elements = ToRegister(instr->elements());
   Register scratch = scratch0();
-  ASSERT(receiver.is(r0));  // Used for parameter count.
-  ASSERT(function.is(r1));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(r0));
 
   // If the receiver is null or undefined, we have to pass the global
   // object as a receiver to normal functions. Values have to be
@@ -2640,6 +2843,18 @@
   __ ldr(receiver,
          FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  ASSERT(receiver.is(r0));  // Used for parameter count.
+  ASSERT(function.is(r1));  // Required by InvokeFunction.
+  ASSERT(ToRegister(instr->result()).is(r0));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
@@ -2674,7 +2889,7 @@
       this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in receiver which is r0, as expected
   // by InvokeFunction.
-  v8::internal::ParameterCount actual(receiver);
+  ParameterCount actual(receiver);
   __ InvokeFunction(function, actual, CALL_FUNCTION,
                     safepoint_generator, CALL_AS_METHOD);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2694,7 +2909,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadHeapObject(result, instr->hydrogen()->closure());
 }
 
 
@@ -2712,6 +2927,16 @@
 }
 
 
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  __ push(cp);  // The context is the first argument.
+  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
+  __ push(scratch0());
+  __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
+  __ push(scratch0());
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   Register result = ToRegister(instr->result());
   __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
@@ -2729,31 +2954,41 @@
                                  int arity,
                                  LInstruction* instr,
                                  CallKind call_kind) {
-  // Change context if needed.
-  bool change_context =
-      (info()->closure()->context() != function->context()) ||
-      scope()->contains_with() ||
-      (scope()->num_heap_slots() > 0);
-  if (change_context) {
-    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-  }
-
-  // Set r0 to arguments count if adaption is not needed. Assumes that r0
-  // is available to write to at this point.
-  if (!function->NeedsArgumentsAdaption()) {
-    __ mov(r0, Operand(arity));
-  }
+  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+      function->shared()->formal_parameter_count() == arity;
 
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  // Invoke function.
-  __ SetCallKind(r5, call_kind);
-  __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-  __ Call(ip);
+  if (can_invoke_directly) {
+    __ LoadHeapObject(r1, function);
+    // Change context if needed.
+    bool change_context =
+        (info()->closure()->context() != function->context()) ||
+        scope()->contains_with() ||
+        (scope()->num_heap_slots() > 0);
+    if (change_context) {
+      __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+    }
 
-  // Setup deoptimization.
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    // Set r0 to arguments count if adaption is not needed. Assumes that r0
+    // is available to write to at this point.
+    if (!function->NeedsArgumentsAdaption()) {
+      __ mov(r0, Operand(arity));
+    }
+
+    // Invoke function.
+    __ SetCallKind(r5, call_kind);
+    __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+    __ Call(ip);
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+  }
 
   // Restore context.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2762,7 +2997,6 @@
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
-  __ mov(r1, Operand(instr->function()));
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -2860,6 +3094,7 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -2990,68 +3225,131 @@
 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
   DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
   DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done;
+  __ vmov(temp, -V8_INFINITY);
+  __ VFPCompareAndSetFlags(input, temp);
+  __ vneg(result, temp, eq);
+  __ b(&done, eq);
+
   // Add +0 to convert -0 to +0.
   __ vadd(result, input, kDoubleRegZero);
   __ vsqrt(result, result);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  Register scratch = scratch0();
-  DoubleRegister result_reg = ToDoubleRegister(instr->result());
   Representation exponent_type = instr->hydrogen()->right()->representation();
-  if (exponent_type.IsDouble()) {
-    // Prepare arguments and call C function.
-    __ PrepareCallCFunction(0, 2, scratch);
-    __ SetCallCDoubleArguments(ToDoubleRegister(left),
-                               ToDoubleRegister(right));
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 0, 2);
-  } else if (exponent_type.IsInteger32()) {
-    ASSERT(ToRegister(right).is(r0));
-    // Prepare arguments and call C function.
-    __ PrepareCallCFunction(1, 1, scratch);
-    __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(isolate()), 1, 1);
-  } else {
-    ASSERT(exponent_type.IsTagged());
-    ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+         ToDoubleRegister(instr->InputAt(1)).is(d2));
+  ASSERT(!instr->InputAt(1)->IsRegister() ||
+         ToRegister(instr->InputAt(1)).is(r2));
+  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
+  ASSERT(ToDoubleRegister(instr->result()).is(d3));
 
-    Register right_reg = ToRegister(right);
-
-    // Check for smi on the right hand side.
-    Label non_smi, call;
-    __ JumpIfNotSmi(right_reg, &non_smi);
-
-    // Untag smi and convert it to a double.
-    __ SmiUntag(right_reg);
-    SwVfpRegister single_scratch = double_scratch0().low();
-    __ vmov(single_scratch, right_reg);
-    __ vcvt_f64_s32(result_reg, single_scratch);
-    __ jmp(&call);
-
-    // Heap number map check.
-    __ bind(&non_smi);
-    __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+  if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(r2, &no_deopt);
+    __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-    __ cmp(scratch, Operand(ip));
+    __ cmp(r7, Operand(ip));
     DeoptimizeIf(ne, instr->environment());
-    int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
-    __ add(scratch, right_reg, Operand(value_offset));
-    __ vldr(result_reg, scratch, 0);
-
-    // Prepare arguments and call C function.
-    __ bind(&call);
-    __ PrepareCallCFunction(0, 2, scratch);
-    __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 0, 2);
+    __ bind(&no_deopt);
+    MathPowStub stub(MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    ASSERT(exponent_type.IsDouble());
+    MathPowStub stub(MathPowStub::DOUBLE);
+    __ CallStub(&stub);
   }
-  // Store the result in the result register.
-  __ GetCFunctionDoubleResult(result_reg);
+}
+
+
+void LCodeGen::DoRandom(LRandom* instr) {
+  class DeferredDoRandom: public LDeferredCode {
+   public:
+    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LRandom* instr_;
+  };
+
+  DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+
+  // Having marked this instruction as a call we can use any
+  // registers.
+  ASSERT(ToDoubleRegister(instr->result()).is(d7));
+  ASSERT(ToRegister(instr->InputAt(0)).is(r0));
+
+  static const int kSeedSize = sizeof(uint32_t);
+  STATIC_ASSERT(kPointerSize == kSeedSize);
+
+  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+  static const int kRandomSeedOffset =
+      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
+  __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
+  // r2: FixedArray of the global context's random seeds
+
+  // Load state[0].
+  __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
+  __ cmp(r1, Operand(0));
+  __ b(eq, deferred->entry());
+  // Load state[1].
+  __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
+  // r1: state[0].
+  // r0: state[1].
+
+  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
+  __ and_(r3, r1, Operand(0xFFFF));
+  __ mov(r4, Operand(18273));
+  __ mul(r3, r3, r4);
+  __ add(r1, r3, Operand(r1, LSR, 16));
+  // Save state[0].
+  __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
+
+  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
+  __ and_(r3, r0, Operand(0xFFFF));
+  __ mov(r4, Operand(36969));
+  __ mul(r3, r3, r4);
+  __ add(r0, r3, Operand(r0, LSR, 16));
+  // Save state[1].
+  __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
+
+  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
+  __ and_(r0, r0, Operand(0x3FFFF));
+  __ add(r0, r0, Operand(r1, LSL, 14));
+
+  __ bind(deferred->exit());
+  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+  // Create this constant using mov/orr to avoid PC relative load.
+  __ mov(r1, Operand(0x41000000));
+  __ orr(r1, r1, Operand(0x300000));
+  // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
+  __ vmov(d7, r0, r1);
+  // Move 0x4130000000000000 to VFP.
+  __ mov(r0, Operand(0, RelocInfo::NONE));
+  __ vmov(d8, r0, r1);
+  // Subtract and store the result in the heap number.
+  __ vsub(d7, d7, d8);
+}
+
+
+void LCodeGen::DoDeferredRandom(LRandom* instr) {
+  __ PrepareCallCFunction(1, scratch0());
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+  // Return value is in r0.
 }
 
 
@@ -3063,6 +3361,14 @@
 }
 
 
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(d2));
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
   ASSERT(ToDoubleRegister(instr->result()).is(d2));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -3102,6 +3408,9 @@
     case kMathSin:
       DoMathSin(instr);
       break;
+    case kMathTan:
+      DoMathTan(instr);
+      break;
     case kMathLog:
       DoMathLog(instr);
       break;
@@ -3151,12 +3460,12 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(r1));
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ Drop(1);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
@@ -3176,7 +3485,6 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
-  __ mov(r1, Operand(instr->target()));
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
@@ -3185,9 +3493,9 @@
   ASSERT(ToRegister(instr->InputAt(0)).is(r1));
   ASSERT(ToRegister(instr->result()).is(r0));
 
-  Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   __ mov(r0, Operand(instr->arity()));
-  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
 }
 
 
@@ -3210,19 +3518,36 @@
   }
 
   // Do the store.
+  HType type = instr->hydrogen()->value()->type();
+  SmiCheck check_needed =
+      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (instr->is_in_object()) {
     __ str(value, FieldMemOperand(object, offset));
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       // Update the write barrier for the object for in-object properties.
-      __ RecordWrite(object, Operand(offset), value, scratch);
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          scratch,
+                          kLRHasBeenSaved,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   } else {
     __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
     __ str(value, FieldMemOperand(scratch, offset));
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWrite(scratch, Operand(offset), value, object);
+      __ RecordWriteField(scratch,
+                          offset,
+                          value,
+                          object,
+                          kLRHasBeenSaved,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   }
 }
@@ -3234,7 +3559,7 @@
 
   // Name is always in r2.
   __ mov(r2, Operand(instr->name()));
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3266,9 +3591,18 @@
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
-    __ add(key, scratch, Operand(FixedArray::kHeaderSize));
-    __ RecordWrite(elements, key, value);
+    __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   kLRHasBeenSaved,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed);
   }
 }
 
@@ -3369,6 +3703,7 @@
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
@@ -3383,13 +3718,55 @@
   ASSERT(ToRegister(instr->key()).is(r1));
   ASSERT(ToRegister(instr->value()).is(r0));
 
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register new_map_reg = ToRegister(instr->new_map_reg());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = from_map->elements_kind();
+  ElementsKind to_kind = to_map->elements_kind();
+
+  Label not_applicable;
+  __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(from_map));
+  __ b(ne, &not_applicable);
+  __ mov(new_map_reg, Operand(to_map));
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+                        scratch, kLRHasBeenSaved, kDontSaveFPRegs);
+  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(r2));
+    ASSERT(new_map_reg.is(r3));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+             RelocInfo::CODE_TARGET, instr);
+  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(r2));
+    ASSERT(new_map_reg.is(r3));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+             RelocInfo::CODE_TARGET, instr);
+  } else {
+    UNREACHABLE();
+  }
+  __ bind(&not_applicable);
+}
+
+
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   __ push(ToRegister(instr->left()));
   __ push(ToRegister(instr->right()));
@@ -3404,87 +3781,19 @@
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
 
-  Register string = ToRegister(instr->string());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
-
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  // Fetch the instance type of the receiver into result register.
-  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ tst(result, Operand(kIsIndirectStringMask));
-  __ b(eq, &check_sequential);
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ tst(result, Operand(kSlicedNotConsMask));
-  __ b(eq, &cons_string);
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
-  __ add(index, index, Operand(result, ASR, kSmiTagSize));
-  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded);
-
-  // Handle conses.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
-  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
-  __ cmp(result, ip);
-  __ b(ne, deferred->entry());
-  // Get the first of the two strings and load its instance type.
-  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
-  // Check whether the string is sequential. The only non-sequential
-  // shapes we support have just been unwrapped above.
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(result, Operand(kStringRepresentationMask));
-  __ b(ne, deferred->entry());
-
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii_string;
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ tst(result, Operand(kStringEncodingMask));
-  __ b(ne, &ascii_string);
-
-  // Two-byte string.
-  // Load the two-byte character code into the result register.
-  Label done;
-  __ add(result,
-         string,
-         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ ldrh(result, MemOperand(result, index, LSL, 1));
-  __ jmp(&done);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ add(result,
-         string,
-         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ ldrb(result, MemOperand(result, index));
-
-  __ bind(&done);
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
   __ bind(deferred->exit());
 }
 
@@ -3527,6 +3836,7 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3598,16 +3908,16 @@
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagI* instr_;
   };
 
-  LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
-  Register reg = ToRegister(input);
+  Register src = ToRegister(instr->InputAt(0));
+  Register dst = ToRegister(instr->result());
 
   DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
-  __ SmiTag(reg, SetCC);
+  __ SmiTag(dst, src, SetCC);
   __ b(vs, deferred->entry());
   __ bind(deferred->exit());
 }
@@ -3615,7 +3925,8 @@
 
 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
   Label slow;
-  Register reg = ToRegister(instr->InputAt(0));
+  Register src = ToRegister(instr->InputAt(0));
+  Register dst = ToRegister(instr->result());
   DoubleRegister dbl_scratch = double_scratch0();
   SwVfpRegister flt_scratch = dbl_scratch.low();
 
@@ -3626,14 +3937,16 @@
   // disagree. Try to allocate a heap number in new space and store
   // the value in there. If that fails, call the runtime system.
   Label done;
-  __ SmiUntag(reg);
-  __ eor(reg, reg, Operand(0x80000000));
-  __ vmov(flt_scratch, reg);
+  if (dst.is(src)) {
+    __ SmiUntag(src, dst);
+    __ eor(src, src, Operand(0x80000000));
+  }
+  __ vmov(flt_scratch, src);
   __ vcvt_f64_s32(dbl_scratch, flt_scratch);
   if (FLAG_inline_new) {
     __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
-    if (!reg.is(r5)) __ mov(reg, r5);
+    __ Move(dst, r5);
     __ b(&done);
   }
 
@@ -3644,16 +3957,16 @@
   // register is stored, as this register is in the pointer map, but contains an
   // integer value.
   __ mov(ip, Operand(0));
-  __ StoreToSafepointRegisterSlot(ip, reg);
+  __ StoreToSafepointRegisterSlot(ip, dst);
   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
-  if (!reg.is(r0)) __ mov(reg, r0);
+  __ Move(dst, r0);
 
   // Done. Put the value in dbl_scratch into the value of the allocated heap
   // number.
   __ bind(&done);
-  __ sub(ip, reg, Operand(kHeapObjectTag));
+  __ sub(ip, dst, Operand(kHeapObjectTag));
   __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
-  __ StoreToSafepointRegisterSlot(reg, reg);
+  __ StoreToSafepointRegisterSlot(dst, dst);
 }
 
 
@@ -3663,6 +3976,7 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3700,23 +4014,21 @@
 
 
 void LCodeGen::DoSmiTag(LSmiTag* instr) {
-  LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
   ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
-  __ SmiTag(ToRegister(input));
+  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
 }
 
 
 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
-  LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
   if (instr->needs_check()) {
     STATIC_ASSERT(kHeapObjectTag == 1);
     // If the input is a HeapObject, SmiUntag will set the carry flag.
-    __ SmiUntag(ToRegister(input), SetCC);
+    __ SmiUntag(result, input, SetCC);
     DeoptimizeIf(cs, instr->environment());
   } else {
-    __ SmiUntag(ToRegister(input));
+    __ SmiUntag(result, input);
   }
 }
 
@@ -3724,6 +4036,7 @@
 void LCodeGen::EmitNumberUntagD(Register input_reg,
                                 DoubleRegister result_reg,
                                 bool deoptimize_on_undefined,
+                                bool deoptimize_on_minus_zero,
                                 LEnvironment* env) {
   Register scratch = scratch0();
   SwVfpRegister flt_scratch = double_scratch0().low();
@@ -3732,7 +4045,7 @@
   Label load_smi, heap_number, done;
 
   // Smi check.
-  __ JumpIfSmi(input_reg, &load_smi);
+  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
 
   // Heap number map check.
   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -3759,28 +4072,25 @@
   // Heap number to double register conversion.
   __ sub(ip, input_reg, Operand(kHeapObjectTag));
   __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+  if (deoptimize_on_minus_zero) {
+    __ vmov(ip, result_reg.low());
+    __ cmp(ip, Operand(0));
+    __ b(ne, &done);
+    __ vmov(ip, result_reg.high());
+    __ cmp(ip, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(eq, env);
+  }
   __ jmp(&done);
 
   // Smi to double register conversion
   __ bind(&load_smi);
-  __ SmiUntag(input_reg);  // Untag smi before converting to float.
-  __ vmov(flt_scratch, input_reg);
+  // scratch: untagged value of input_reg
+  __ vmov(flt_scratch, scratch);
   __ vcvt_f64_s32(result_reg, flt_scratch);
-  __ SmiTag(input_reg);  // Retag smi.
   __ bind(&done);
 }
 
 
-class DeferredTaggedToI: public LDeferredCode {
- public:
-  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-      : LDeferredCode(codegen), instr_(instr) { }
-  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
-  LTaggedToI* instr_;
-};
-
-
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Register input_reg = ToRegister(instr->InputAt(0));
   Register scratch1 = scratch0();
@@ -3863,6 +4173,16 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -3892,6 +4212,7 @@
 
   EmitNumberUntagD(input_reg, result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
+                   instr->hydrogen()->deoptimize_on_minus_zero(),
                    instr->environment());
 }
 
@@ -3989,21 +4310,42 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  ASSERT(instr->InputAt(0)->IsRegister());
-  Register reg = ToRegister(instr->InputAt(0));
-  __ cmp(reg, Operand(instr->hydrogen()->target()));
+  Register reg = ToRegister(instr->value());
+  Handle<JSFunction> target = instr->hydrogen()->target();
+  if (isolate()->heap()->InNewSpace(*target)) {
+    Register reg = ToRegister(instr->value());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(target);
+    __ mov(ip, Operand(Handle<Object>(cell)));
+    __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+    __ cmp(reg, ip);
+  } else {
+    __ cmp(reg, Operand(target));
+  }
   DeoptimizeIf(ne, instr->environment());
 }
 
 
+void LCodeGen::DoCheckMapCommon(Register reg,
+                                Register scratch,
+                                Handle<Map> map,
+                                CompareMapMode mode,
+                                LEnvironment* env) {
+  Label success;
+  __ CompareMap(reg, scratch, map, &success, mode);
+  DeoptimizeIf(ne, env);
+  __ bind(&success);
+}
+
+
 void LCodeGen::DoCheckMap(LCheckMap* instr) {
   Register scratch = scratch0();
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
-  __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
-  __ cmp(scratch, Operand(instr->hydrogen()->map()));
-  DeoptimizeIf(ne, instr->environment());
+  Handle<Map> map = instr->hydrogen()->map();
+  DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
+                   instr->environment());
 }
 
 
@@ -4030,7 +4372,7 @@
   Label is_smi, done, heap_number;
 
   // Both smi and heap number cases are handled.
-  __ JumpIfSmi(input_reg, &is_smi);
+  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
 
   // Check for heap number
   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -4053,26 +4395,12 @@
 
   // smi
   __ bind(&is_smi);
-  __ SmiUntag(result_reg, input_reg);
   __ ClampUint8(result_reg, result_reg);
 
   __ bind(&done);
 }
 
 
-void LCodeGen::LoadHeapObject(Register result,
-                              Handle<HeapObject> object) {
-  if (heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        factory()->NewJSGlobalPropertyCell(object);
-    __ mov(result, Operand(cell));
-    __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
-  } else {
-    __ mov(result, Operand(object));
-  }
-}
-
-
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   Register temp1 = ToRegister(instr->TempAt(0));
   Register temp2 = ToRegister(instr->TempAt(1));
@@ -4081,31 +4409,127 @@
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  LoadHeapObject(temp1, current_prototype);
+  __ LoadHeapObject(temp1, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
-    __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
-    __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
-    DeoptimizeIf(ne, instr->environment());
+    DoCheckMapCommon(temp1, temp2,
+                     Handle<Map>(current_prototype->map()),
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    LoadHeapObject(temp1, current_prototype);
+    __ LoadHeapObject(temp1, current_prototype);
   }
 
   // Check the holder map.
-  __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
-  __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
+  DoCheckMapCommon(temp1, temp2,
+                   Handle<Map>(current_prototype->map()),
+                   ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
   DeoptimizeIf(ne, instr->environment());
 }
 
 
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+  class DeferredAllocateObject: public LDeferredCode {
+   public:
+    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LAllocateObject* instr_;
+  };
+
+  DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->TempAt(0));
+  Register scratch2 = ToRegister(instr->TempAt(1));
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+  Handle<Map> initial_map(constructor->initial_map());
+  int instance_size = initial_map->instance_size();
+  ASSERT(initial_map->pre_allocated_property_fields() +
+         initial_map->unused_property_fields() -
+         initial_map->inobject_properties() == 0);
+
+  // Allocate memory for the object.  The initial map might change when
+  // the constructor's prototype changes, but instance size and property
+  // counts remain unchanged (if slack tracking finished).
+  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
+  __ AllocateInNewSpace(instance_size,
+                        result,
+                        scratch,
+                        scratch2,
+                        deferred->entry(),
+                        TAG_OBJECT);
+
+  // Load the initial map.
+  Register map = scratch;
+  __ LoadHeapObject(map, constructor);
+  __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Initialize map and fields of the newly allocated object.
+  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+  __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+  __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
+  __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
+  if (initial_map->inobject_properties() != 0) {
+    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+    for (int i = 0; i < initial_map->inobject_properties(); i++) {
+      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+      __ str(scratch, FieldMemOperand(result, property_offset));
+    }
+  }
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+  Register result = ToRegister(instr->result());
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, Operand(0));
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ LoadHeapObject(r0, constructor);
+  __ push(r0);
+  CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
+  __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  Heap* heap = isolate()->heap();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate_elements_kind();
+
+  // Deopt if the array literal boilerplate ElementsKind is of a type different
+  // than the expected one. The check isn't necessary if the boilerplate has
+  // already been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
+    // Load map into r2.
+    __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+    // Load the map's "bit field 2".
+    __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
+    __ cmp(r2, Operand(boilerplate_elements_kind));
+    DeoptimizeIf(ne, instr->environment());
+  }
+
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ mov(r1, Operand(instr->hydrogen()->constant_elements()));
+  // Boilerplate already exists, constant elements are never accessed.
+  // Pass an empty fixed array.
+  __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
   __ Push(r3, r2, r1);
 
   // Pick the right runtime function or stub to call.
@@ -4122,26 +4546,164 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
 
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+                            Register result,
+                            Register source,
+                            int* offset) {
+  ASSERT(!source.is(r2));
+  ASSERT(!result.is(r2));
+
+  // Only elements backing stores for non-COW arrays need to be copied.
+  Handle<FixedArrayBase> elements(object->elements());
+  bool has_elements = elements->length() > 0 &&
+      elements->map() != isolate()->heap()->fixed_cow_array_map();
+
+  // Increase the offset so that subsequent objects end up right after
+  // this object and its backing store.
+  int object_offset = *offset;
+  int object_size = object->map()->instance_size();
+  int elements_offset = *offset + object_size;
+  int elements_size = has_elements ? elements->Size() : 0;
+  *offset += object_size + elements_size;
+
+  // Copy object header.
+  ASSERT(object->properties()->length() == 0);
+  int inobject_properties = object->map()->inobject_properties();
+  int header_size = object_size - inobject_properties * kPointerSize;
+  for (int i = 0; i < header_size; i += kPointerSize) {
+    if (has_elements && i == JSObject::kElementsOffset) {
+      __ add(r2, result, Operand(elements_offset));
+    } else {
+      __ ldr(r2, FieldMemOperand(source, i));
+    }
+    __ str(r2, FieldMemOperand(result, object_offset + i));
+  }
+
+  // Copy in-object properties.
+  for (int i = 0; i < inobject_properties; i++) {
+    int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
+    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      __ add(r2, result, Operand(*offset));
+      __ str(r2, FieldMemOperand(result, total_offset));
+      __ LoadHeapObject(source, value_object);
+      EmitDeepCopy(value_object, result, source, offset);
+    } else if (value->IsHeapObject()) {
+      __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+      __ str(r2, FieldMemOperand(result, total_offset));
+    } else {
+      __ mov(r2, Operand(value));
+      __ str(r2, FieldMemOperand(result, total_offset));
+    }
+  }
+
+  if (has_elements) {
+    // Copy elements backing store header.
+    __ LoadHeapObject(source, elements);
+    for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
+      __ ldr(r2, FieldMemOperand(source, i));
+      __ str(r2, FieldMemOperand(result, elements_offset + i));
+    }
+
+    // Copy elements backing store content.
+    int elements_length = has_elements ? elements->length() : 0;
+    if (elements->IsFixedDoubleArray()) {
+      Handle<FixedDoubleArray> double_array =
+          Handle<FixedDoubleArray>::cast(elements);
+      for (int i = 0; i < elements_length; i++) {
+        int64_t value = double_array->get_representation(i);
+        // We only support little endian mode...
+        int32_t value_low = value & 0xFFFFFFFF;
+        int32_t value_high = value >> 32;
+        int total_offset =
+            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+        __ mov(r2, Operand(value_low));
+        __ str(r2, FieldMemOperand(result, total_offset));
+        __ mov(r2, Operand(value_high));
+        __ str(r2, FieldMemOperand(result, total_offset + 4));
+      }
+    } else if (elements->IsFixedArray()) {
+      for (int i = 0; i < elements_length; i++) {
+        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+        Handle<Object> value = JSObject::GetElement(object, i);
+        if (value->IsJSObject()) {
+          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+          __ add(r2, result, Operand(*offset));
+          __ str(r2, FieldMemOperand(result, total_offset));
+          __ LoadHeapObject(source, value_object);
+          EmitDeepCopy(value_object, result, source, offset);
+        } else if (value->IsHeapObject()) {
+          __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+          __ str(r2, FieldMemOperand(result, total_offset));
+        } else {
+          __ mov(r2, Operand(value));
+          __ str(r2, FieldMemOperand(result, total_offset));
+        }
+      }
+    } else {
+      UNREACHABLE();
+    }
+  }
+}
+
+
+void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
+  int size = instr->hydrogen()->total_size();
+
+  // Allocate all objects that are part of the literal in one big
+  // allocation. This avoids multiple limit checks.
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ mov(r0, Operand(Smi::FromInt(size)));
+  __ push(r0);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+  __ bind(&allocated);
+  int offset = 0;
+  __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
+  EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
+  ASSERT_EQ(size, offset);
+}
+
+
 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
-  __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+  Handle<FixedArray> literals(instr->environment()->closure()->literals());
+  Handle<FixedArray> constant_properties =
+      instr->hydrogen()->constant_properties();
+
+  // Set up the parameters to the stub/runtime call.
+  __ LoadHeapObject(r4, literals);
   __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ mov(r2, Operand(instr->hydrogen()->constant_properties()));
-  __ mov(r1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+  __ mov(r2, Operand(constant_properties));
+  int flags = instr->hydrogen()->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  __ mov(r1, Operand(Smi::FromInt(flags)));
   __ Push(r4, r3, r2, r1);
 
-  // Pick the right runtime function to call.
+  // Pick the right runtime function or stub to call.
+  int properties_count = constant_properties->length() / 2;
   if (instr->hydrogen()->depth() > 1) {
     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
@@ -4214,8 +4776,7 @@
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && shared_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(shared_info->language_mode());
     __ mov(r1, Operand(shared_info));
     __ push(r1);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -4248,8 +4809,9 @@
                                                   false_label,
                                                   input,
                                                   instr->type_literal());
-
-  EmitBranch(true_block, false_block, final_branch_condition);
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(true_block, false_block, final_branch_condition);
+  }
 }
 
 
@@ -4295,10 +4857,12 @@
     final_branch_condition = ne;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CompareObjectType(input, input, scratch,
-                         FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-    final_branch_condition = ge;
+    __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
+    __ b(eq, true_label);
+    __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
+    final_branch_condition = eq;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -4317,9 +4881,7 @@
     final_branch_condition = eq;
 
   } else {
-    final_branch_condition = ne;
     __ b(false_label);
-    // A dead branch instruction will be generated after this point.
   }
 
   return final_branch_condition;
@@ -4430,6 +4992,7 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
@@ -4486,6 +5049,88 @@
 }
 
 
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
+  DeoptimizeIf(eq, instr->environment());
+
+  Register null_value = r5;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ cmp(r0, null_value);
+  DeoptimizeIf(eq, instr->environment());
+
+  __ tst(r0, Operand(kSmiTagMask));
+  DeoptimizeIf(eq, instr->environment());
+
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
+  DeoptimizeIf(le, instr->environment());
+
+  Label use_cache, call_runtime;
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ b(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(r0);
+  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+  __ cmp(r1, ip);
+  DeoptimizeIf(ne, instr->environment());
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  __ LoadInstanceDescriptors(map, result);
+  __ ldr(result,
+         FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
+  __ ldr(result,
+         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+  __ cmp(result, Operand(0));
+  DeoptimizeIf(eq, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  Register map = ToRegister(instr->map());
+  __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  __ cmp(map, scratch0());
+  DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  Label out_of_object, done;
+  __ cmp(index, Operand(0));
+  __ b(lt, &out_of_object);
+
+  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+  __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+  __ b(&done);
+
+  __ bind(&out_of_object);
+  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  // Index is equal to negated out of object property index plus 1.
+  __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ ldr(result, FieldMemOperand(scratch,
+                                 FixedArray::kHeaderSize - kPointerSize));
+  __ bind(&done);
+}
 
 
 #undef __
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 0e34c9f..adb6e1b 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -87,11 +87,15 @@
                                         SwVfpRegister flt_scratch,
                                         DoubleRegister dbl_scratch);
   int ToInteger32(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
   Operand ToOperand(LOperand* op);
   MemOperand ToMemOperand(LOperand* op) const;
   // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
   MemOperand ToHighMemOperand(LOperand* op) const;
 
+  bool IsInteger32(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
   // Try to generate code for the entire chunk, but it may fail if the
   // chunk contains constructs we cannot handle. Returns true if the
   // code generation attempt succeeded.
@@ -110,11 +114,16 @@
   void DoDeferredTaggedToI(LTaggedToI* instr);
   void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
   void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredRandom(LRandom* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocateObject(LAllocateObject* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
 
+  void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
+                        CompareMapMode mode, LEnvironment* env);
+
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
   void DoGap(LGap* instr);
@@ -140,8 +149,8 @@
   bool is_done() const { return status_ == DONE; }
   bool is_aborted() const { return status_ == ABORTED; }
 
-  int strict_mode_flag() const {
-    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+  StrictModeFlag strict_mode_flag() const {
+    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -207,7 +216,7 @@
                                LInstruction* instr);
 
   // Generate a direct call to a known function.  Expects the function
-  // to be in edi.
+  // to be in r1.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
                          LInstruction* instr,
@@ -241,6 +250,7 @@
   void DoMathSqrt(LUnaryMathOperation* instr);
   void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
   void DoMathSin(LUnaryMathOperation* instr);
 
@@ -262,17 +272,19 @@
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
-  void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
                         DoubleRegister result,
                         bool deoptimize_on_undefined,
+                        bool deoptimize_on_minus_zero,
                         LEnvironment* env);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label, Label* false_label,
-                         Register input, Handle<String> type_name);
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
@@ -282,6 +294,13 @@
                          Label* is_not_object,
                          Label* is_object);
 
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp1, Register temp2);
@@ -291,6 +310,13 @@
                                        Handle<Map> type,
                                        Handle<String> name);
 
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset);
+
   struct JumpTableEntry {
     explicit inline JumpTableEntry(Address entry)
         : label(),
@@ -378,16 +404,20 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen), external_exit_(NULL) {
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
 
-  void SetExit(Label *exit) { external_exit_ = exit; }
+  void SetExit(Label* exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -398,6 +428,7 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc
index 1cfdc79..cefca47 100644
--- a/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/arm/lithium-gap-resolver-arm.cc
@@ -245,13 +245,24 @@
     }
 
   } else if (source->IsConstantOperand()) {
-    Operand source_operand = cgen_->ToOperand(source);
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
     if (destination->IsRegister()) {
-      __ mov(cgen_->ToRegister(destination), source_operand);
+      Register dst = cgen_->ToRegister(destination);
+      if (cgen_->IsInteger32(constant_source)) {
+        __ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
+      } else {
+        __ LoadObject(dst, cgen_->ToHandle(constant_source));
+      }
     } else {
       ASSERT(destination->IsStackSlot());
       ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
-      __ mov(kSavedValueRegister, source_operand);
+      if (cgen_->IsInteger32(constant_source)) {
+        __ mov(kSavedValueRegister,
+               Operand(cgen_->ToInteger32(constant_source)));
+      } else {
+        __ LoadObject(kSavedValueRegister,
+                      cgen_->ToHandle(constant_source));
+      }
       __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
     }
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 7a1f802..857c2bf 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,7 +42,8 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true) {
+      allow_stub_calls_(true),
+      has_frame_(false) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -406,29 +407,16 @@
 }
 
 
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register address,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, ne, &not_in_new_space);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
+void MacroAssembler::LoadHeapObject(Register result,
+                                    Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    mov(result, Operand(cell));
+    ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    mov(result, Operand(object));
   }
-
-  // Calculate page address.
-  Bfc(object, 0, kPageSizeBits);
-
-  // Calculate region number.
-  Ubfx(address, address, Page::kRegionSizeLog2,
-       kPageSizeBits - Page::kRegionSizeLog2);
-
-  // Mark region dirty.
-  ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-  mov(ip, Operand(1));
-  orr(scratch, scratch, Operand(ip, LSL, address));
-  str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
 }
 
 
@@ -443,38 +431,52 @@
 }
 
 
-// Will clobber 4 registers: object, offset, scratch, ip.  The
-// register 'object' contains a heap object pointer.  The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
-                                 Operand offset,
-                                 Register scratch0,
-                                 Register scratch1) {
-  // The compiled code assumes that record write doesn't change the
-  // context register, so we check that none of the clobbered
-  // registers are cp.
-  ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    LinkRegisterStatus lr_status,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
   Label done;
 
-  // First, test that the object is not in the new space.  We cannot set
-  // region marks for new space pages.
-  InNewSpace(object, scratch0, eq, &done);
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
 
-  // Add offset into the object.
-  add(scratch0, object, offset);
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
 
-  // Record the actual write.
-  RecordWriteHelper(object, scratch0, scratch1);
+  add(dst, object, Operand(offset - kHeapObjectTag));
+  if (emit_debug_code()) {
+    Label ok;
+    tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+    b(eq, &ok);
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  RecordWrite(object,
+              dst,
+              value,
+              lr_status,
+              save_fp,
+              remembered_set_action,
+              OMIT_SMI_CHECK);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Operand(BitCast<int32_t>(kZapValue)));
-    mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
-    mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+    mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
+    mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
   }
 }
 
@@ -484,29 +486,100 @@
 // tag is shifted away.
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register scratch) {
+                                 Register value,
+                                 LinkRegisterStatus lr_status,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are cp.
-  ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+  ASSERT(!address.is(cp) && !value.is(cp));
+
+  if (emit_debug_code()) {
+    ldr(ip, MemOperand(address));
+    cmp(ip, value);
+    Check(eq, "Wrong address or value passed to RecordWrite");
+  }
 
   Label done;
 
-  // First, test that the object is not in the new space.  We cannot set
-  // region marks for new space pages.
-  InNewSpace(object, scratch, eq, &done);
+  if (smi_check == INLINE_SMI_CHECK) {
+    ASSERT_EQ(0, kSmiTag);
+    tst(value, Operand(kSmiTagMask));
+    b(eq, &done);
+  }
+
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                eq,
+                &done);
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                eq,
+                &done);
 
   // Record the actual write.
-  RecordWriteHelper(object, address, scratch);
+  if (lr_status == kLRHasNotBeenSaved) {
+    push(lr);
+  }
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
+  if (lr_status == kLRHasNotBeenSaved) {
+    pop(lr);
+  }
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Operand(BitCast<int32_t>(kZapValue)));
-    mov(address, Operand(BitCast<int32_t>(kZapValue)));
-    mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+    mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
+    mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+  }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register address,
+                                         Register scratch,
+                                         SaveFPRegsMode fp_mode,
+                                         RememberedSetFinalAction and_then) {
+  Label done;
+  if (emit_debug_code()) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok);
+    stop("Remembered set pointer is in new space");
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  mov(ip, Operand(store_buffer));
+  ldr(scratch, MemOperand(ip));
+  // Store pointer to buffer and increment buffer top.
+  str(address, MemOperand(scratch, kPointerSize, PostIndex));
+  // Write back new top of buffer.
+  str(scratch, MemOperand(ip));
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kFallThroughAtEnd) {
+    b(eq, &done);
+  } else {
+    ASSERT(and_then == kReturnAtEnd);
+    Ret(eq);
+  }
+  push(lr);
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(fp_mode);
+  CallStub(&store_buffer_overflow);
+  pop(lr);
+  bind(&done);
+  if (and_then == kReturnAtEnd) {
+    Ret();
   }
 }
 
@@ -744,12 +817,12 @@
 
 
 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
-  // Setup the frame structure on the stack.
+  // Set up the frame structure on the stack.
   ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
   ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
   ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
   Push(lr, fp);
-  mov(fp, Operand(sp));  // Setup new frame pointer.
+  mov(fp, Operand(sp));  // Set up new frame pointer.
   // Reserve room for saved entry sp and code object.
   sub(sp, sp, Operand(2 * kPointerSize));
   if (emit_debug_code()) {
@@ -884,10 +957,12 @@
                                     Handle<Code> code_constant,
                                     Register code_reg,
                                     Label* done,
+                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
+  *definitely_mismatches = false;
   Label regular_invoke;
 
   // Check whether the expected and actual arguments count match. If not,
@@ -918,6 +993,7 @@
         // arguments.
         definitely_matches = true;
       } else {
+        *definitely_mismatches = true;
         mov(r2, Operand(expected.immediate()));
       }
     }
@@ -945,7 +1021,9 @@
       SetCallKind(r5, call_kind);
       Call(adaptor);
       call_wrapper.AfterCall();
-      b(done);
+      if (!*definitely_mismatches) {
+        b(done);
+      }
     } else {
       SetCallKind(r5, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -961,24 +1039,30 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
-
-  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, Handle<Code>::null(), code,
+                 &done, &definitely_mismatches, flag,
                  call_wrapper, call_kind);
-  if (flag == CALL_FUNCTION) {
-    call_wrapper.BeforeCall(CallSize(code));
-    SetCallKind(r5, call_kind);
-    Call(code);
-    call_wrapper.AfterCall();
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(r5, call_kind);
-    Jump(code);
-  }
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      SetCallKind(r5, call_kind);
+      Call(code);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(r5, call_kind);
+      Jump(code);
+    }
 
-  // Continue here if InvokePrologue does handle the invocation due to
-  // mismatched parameter counts.
-  bind(&done);
+    // Continue here if InvokePrologue does handle the invocation due to
+    // mismatched parameter counts.
+    bind(&done);
+  }
 }
 
 
@@ -988,21 +1072,27 @@
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
-
-  InvokePrologue(expected, actual, code, no_reg, &done, flag,
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, code, no_reg,
+                 &done, &definitely_mismatches, flag,
                  NullCallWrapper(), call_kind);
-  if (flag == CALL_FUNCTION) {
-    SetCallKind(r5, call_kind);
-    Call(code, rmode);
-  } else {
-    SetCallKind(r5, call_kind);
-    Jump(code, rmode);
-  }
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      SetCallKind(r5, call_kind);
+      Call(code, rmode);
+    } else {
+      SetCallKind(r5, call_kind);
+      Jump(code, rmode);
+    }
 
-  // Continue here if InvokePrologue does handle the invocation due to
-  // mismatched parameter counts.
-  bind(&done);
+    // Continue here if InvokePrologue does handle the invocation due to
+    // mismatched parameter counts.
+    bind(&done);
+  }
 }
 
 
@@ -1011,6 +1101,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   // Contract with called JS functions requires that function is passed in r1.
   ASSERT(fun.is(r1));
 
@@ -1031,28 +1124,24 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
+                                    const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  ASSERT(function->is_compiled());
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  mov(r1, Operand(Handle<JSFunction>(function)));
+  LoadHeapObject(r1, function);
   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
-  // Invoke the cached code.
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  if (V8::UseCrankshaft()) {
-    // TODO(kasperl): For now, we always call indirectly through the
-    // code field in the function to allow recompilation to take effect
-    // without changing any of the call sites.
-    ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-    InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
-  } else {
-    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
@@ -1090,56 +1179,49 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
-  ASSERT(allow_stub_calls());
   mov(r0, Operand(0, RelocInfo::NONE));
   mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
   CEntryStub ces(1);
+  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 #endif
 
 
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
-                                    HandlerType type) {
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+                                    int handler_index) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // The pc (return address) is passed in register lr.
-  if (try_location == IN_JAVASCRIPT) {
-    if (type == TRY_CATCH_HANDLER) {
-      mov(r3, Operand(StackHandler::TRY_CATCH));
-    } else {
-      mov(r3, Operand(StackHandler::TRY_FINALLY));
-    }
-    stm(db_w, sp, r3.bit() | cp.bit() | fp.bit() | lr.bit());
-    // Save the current handler as the next handler.
-    mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-    ldr(r1, MemOperand(r3));
-    push(r1);
-    // Link this handler as the new current one.
-    str(sp, MemOperand(r3));
+  // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
+  // We will build up the handler from the bottom by pushing on the stack.
+  // Set up the code object (r5) and the state (r6) for pushing.
+  unsigned state =
+      StackHandler::IndexField::encode(handler_index) |
+      StackHandler::KindField::encode(kind);
+  mov(r5, Operand(CodeObject()));
+  mov(r6, Operand(state));
+
+  // Push the frame pointer, context, state, and code object.
+  if (kind == StackHandler::JS_ENTRY) {
+    mov(r7, Operand(Smi::FromInt(0)));  // Indicates no context.
+    mov(ip, Operand(0, RelocInfo::NONE));  // NULL frame pointer.
+    stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
   } else {
-    // Must preserve r0-r4, r5-r7 are available.
-    ASSERT(try_location == IN_JS_ENTRY);
-    // The frame pointer does not point to a JS frame so we save NULL
-    // for fp. We expect the code throwing an exception to check fp
-    // before dereferencing it to restore the context.
-    mov(r5, Operand(StackHandler::ENTRY));  // State.
-    mov(r6, Operand(Smi::FromInt(0)));  // Indicates no context.
-    mov(r7, Operand(0, RelocInfo::NONE));  // NULL frame pointer.
-    stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | lr.bit());
-    // Save the current handler as the next handler.
-    mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-    ldr(r6, MemOperand(r7));
-    push(r6);
-    // Link this handler as the new current one.
-    str(sp, MemOperand(r7));
+    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
   }
+
+  // Link the current handler as the next handler.
+  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  ldr(r5, MemOperand(r6));
+  push(r5);
+  // Set this new handler as the current one.
+  str(sp, MemOperand(r6));
 }
 
 
@@ -1152,111 +1234,90 @@
 }
 
 
+void MacroAssembler::JumpToHandlerEntry() {
+  // Compute the handler entry address and jump to it.  The handler table is
+  // a fixed array of (smi-tagged) code offsets.
+  // r0 = exception, r1 = code object, r2 = state.
+  ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset));  // Handler table.
+  add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  mov(r2, Operand(r2, LSR, StackHandler::kKindWidth));  // Handler index.
+  ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));  // Smi-tagged offset.
+  add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
+  add(pc, r1, Operand(r2, ASR, kSmiTagSize));  // Jump.
+}
+
+
 void MacroAssembler::Throw(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // r0 is expected to hold the exception.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in r0.
   if (!value.is(r0)) {
     mov(r0, value);
   }
-
-  // Drop the sp to the top of the handler.
+  // Drop the stack pointer to the top of the top handler.
   mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   ldr(sp, MemOperand(r3));
-
   // Restore the next handler.
   pop(r2);
   str(r2, MemOperand(r3));
 
-  // Restore context and frame pointer, discard state (r3).
-  ldm(ia_w, sp, r3.bit() | cp.bit() | fp.bit());
+  // Get the code object (r1) and state (r2).  Restore the context and frame
+  // pointer.
+  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (r3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
-  // of them.
-  cmp(r3, Operand(StackHandler::ENTRY));
+  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+  // or cp.
+  tst(cp, cp);
   str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
 
-#ifdef DEBUG
-  if (emit_debug_code()) {
-    mov(lr, Operand(pc));
-  }
-#endif
-  pop(pc);
+  JumpToHandlerEntry();
 }
 
 
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
-                                      Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // r0 is expected to hold the exception.
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in r0.
   if (!value.is(r0)) {
     mov(r0, value);
   }
-
-  // Drop sp to the top stack handler.
+  // Drop the stack pointer to the top of the top stack handler.
   mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   ldr(sp, MemOperand(r3));
 
   // Unwind the handlers until the ENTRY handler is found.
-  Label loop, done;
-  bind(&loop);
-  // Load the type of the current stack handler.
-  const int kStateOffset = StackHandlerConstants::kStateOffset;
-  ldr(r2, MemOperand(sp, kStateOffset));
-  cmp(r2, Operand(StackHandler::ENTRY));
-  b(eq, &done);
-  // Fetch the next handler in the list.
-  const int kNextOffset = StackHandlerConstants::kNextOffset;
-  ldr(sp, MemOperand(sp, kNextOffset));
-  jmp(&loop);
-  bind(&done);
+  Label fetch_next, check_kind;
+  jmp(&check_kind);
+  bind(&fetch_next);
+  ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
 
-  // Set the top handler address to next handler past the current ENTRY handler.
+  bind(&check_kind);
+  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+  ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+  tst(r2, Operand(StackHandler::KindField::kMask));
+  b(ne, &fetch_next);
+
+  // Set the top handler address to next handler past the top ENTRY handler.
   pop(r2);
   str(r2, MemOperand(r3));
+  // Get the code object (r1) and state (r2).  Clear the context and frame
+  // pointer (0 was saved in the handler).
+  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
 
-  if (type == OUT_OF_MEMORY) {
-    // Set external caught exception to false.
-    ExternalReference external_caught(
-        Isolate::kExternalCaughtExceptionAddress, isolate());
-    mov(r0, Operand(false, RelocInfo::NONE));
-    mov(r2, Operand(external_caught));
-    str(r0, MemOperand(r2));
-
-    // Set pending exception and r0 to out of memory exception.
-    Failure* out_of_memory = Failure::OutOfMemoryException();
-    mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
-    mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      isolate())));
-    str(r0, MemOperand(r2));
-  }
-
-  // Stack layout at this point. See also StackHandlerConstants.
-  // sp ->   state (ENTRY)
-  //         cp
-  //         fp
-  //         lr
-
-  // Restore context and frame pointer, discard state (r2).
-  ldm(ia_w, sp, r2.bit() | cp.bit() | fp.bit());
-#ifdef DEBUG
-  if (emit_debug_code()) {
-    mov(lr, Operand(pc));
-  }
-#endif
-  pop(pc);
+  JumpToHandlerEntry();
 }
 
 
@@ -1358,8 +1419,9 @@
   // hash = hash ^ (hash >> 4);
   eor(t0, t0, Operand(t0, LSR, 4));
   // hash = hash * 2057;
-  mov(scratch, Operand(2057));
-  mul(t0, t0, scratch);
+  mov(scratch, Operand(t0, LSL, 11));
+  add(t0, t0, Operand(t0, LSL, 3));
+  add(t0, t0, scratch);
   // hash = hash ^ (hash >> 16);
   eor(t0, t0, Operand(t0, LSR, 16));
 }
@@ -1548,6 +1610,7 @@
   ASSERT(!result.is(scratch1));
   ASSERT(!result.is(scratch2));
   ASSERT(!scratch1.is(scratch2));
+  ASSERT(!object_size.is(ip));
   ASSERT(!result.is(ip));
   ASSERT(!scratch1.is(ip));
   ASSERT(!scratch2.is(ip));
@@ -1805,25 +1868,170 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Register scratch,
                                        Label* fail) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
   b(hi, fail);
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Register scratch,
+                                             Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  b(ls, fail);
+  cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+  b(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Register scratch,
+                                              Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  b(hi, fail);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+                                                 Register key_reg,
+                                                 Register receiver_reg,
+                                                 Register elements_reg,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Register scratch3,
+                                                 Register scratch4,
+                                                 Label* fail) {
+  Label smi_value, maybe_nan, have_double_value, is_nan, done;
+  Register mantissa_reg = scratch2;
+  Register exponent_reg = scratch3;
+
+  // Handle smi values specially.
+  JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  CheckMap(value_reg,
+           scratch1,
+           isolate()->factory()->heap_number_map(),
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+  // in the exponent.
+  mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+  ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+  cmp(exponent_reg, scratch1);
+  b(ge, &maybe_nan);
+
+  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+  bind(&have_double_value);
+  add(scratch1, elements_reg,
+      Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+  str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  str(exponent_reg, FieldMemOperand(scratch1, offset));
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  b(gt, &is_nan);
+  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+  cmp(mantissa_reg, Operand(0));
+  b(eq, &have_double_value);
+  bind(&is_nan);
+  // Load canonical NaN for storing into the double array.
+  uint64_t nan_int64 = BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+  mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+  mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+  jmp(&have_double_value);
+
+  bind(&smi_value);
+  add(scratch1, elements_reg,
+      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  add(scratch1, scratch1,
+      Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+  // scratch1 is now effective address of the double element
+
+  FloatingPointHelper::Destination destination;
+  if (CpuFeatures::IsSupported(VFP3)) {
+    destination = FloatingPointHelper::kVFPRegisters;
+  } else {
+    destination = FloatingPointHelper::kCoreRegisters;
+  }
+
+  Register untagged_value = receiver_reg;
+  SmiUntag(untagged_value, value_reg);
+  FloatingPointHelper::ConvertIntToDouble(this,
+                                          untagged_value,
+                                          destination,
+                                          d0,
+                                          mantissa_reg,
+                                          exponent_reg,
+                                          scratch4,
+                                          s2);
+  if (destination == FloatingPointHelper::kVFPRegisters) {
+    CpuFeatures::Scope scope(VFP3);
+    vstr(d0, scratch1, 0);
+  } else {
+    str(mantissa_reg, MemOperand(scratch1, 0));
+    str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::CompareMap(Register obj,
+                                Register scratch,
+                                Handle<Map> map,
+                                Label* early_success,
+                                CompareMapMode mode) {
+  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  cmp(scratch, Operand(map));
+  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+    Map* transitioned_fast_element_map(
+        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+    ASSERT(transitioned_fast_element_map == NULL ||
+           map->elements_kind() != FAST_ELEMENTS);
+    if (transitioned_fast_element_map != NULL) {
+      b(eq, early_success);
+      cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
+    }
+
+    Map* transitioned_double_map(
+        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+    ASSERT(transitioned_double_map == NULL ||
+           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+    if (transitioned_double_map != NULL) {
+      b(eq, early_success);
+      cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
+    }
+  }
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
                               Label* fail,
-                              SmiCheckType smi_check_type) {
+                              SmiCheckType smi_check_type,
+                              CompareMapMode mode) {
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
-  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
-  mov(ip, Operand(map));
-  cmp(scratch, ip);
+
+  Label success;
+  CompareMap(obj, scratch, map, &success, mode);
   b(ne, fail);
+  bind(&success);
 }
 
 
@@ -1862,7 +2070,8 @@
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
-                                             Label* miss) {
+                                             Label* miss,
+                                             bool miss_on_bound_function) {
   // Check that the receiver isn't a smi.
   JumpIfSmi(function, miss);
 
@@ -1870,6 +2079,16 @@
   CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
   b(ne, miss);
 
+  if (miss_on_bound_function) {
+    ldr(scratch,
+        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    ldr(scratch,
+        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+    tst(scratch,
+        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
+    b(ne, miss);
+  }
+
   // Make sure that the function has an instance prototype.
   Label non_instance;
   ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
@@ -1907,47 +2126,24 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Handle<Code> code(Code::cast(result));
-  Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
-  return result;
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
-    ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+                                              int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -2010,14 +2206,10 @@
   mov(pc, lr);
 
   bind(&promote_scheduled_exception);
-  MaybeObject* result
-      = TryTailCallExternalReference(
-          ExternalReference(Runtime::kPromoteScheduledException, isolate()),
-          0,
-          1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+      0,
+      1);
 
   // HandleScope limit has changed. Delete allocated extensions.
   bind(&delete_allocated_handles);
@@ -2029,8 +2221,12 @@
       ExternalReference::delete_handle_scope_extensions(isolate()), 1);
   mov(r0, r4);
   jmp(&leave_exit_frame);
+}
 
-  return result;
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
 }
 
 
@@ -2178,7 +2374,7 @@
     b(gt, not_int32);
 
     // We know the exponent is smaller than 30 (biased).  If it is less than
-    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
     // it rounds to zero.
     const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
     sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
@@ -2429,8 +2625,7 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   mov(r0, Operand(function->nargs));
   mov(r1, Operand(ExternalReference(function, isolate())));
-  CEntryStub stub(1);
-  stub.SaveDoubles();
+  CEntryStub stub(1, kSaveFPRegs);
   CallStub(&stub);
 }
 
@@ -2457,17 +2652,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  mov(r0, Operand(num_arguments));
-  return TryJumpToExternalReference(ext);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -2488,21 +2672,12 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& builtin) {
-#if defined(__thumb__)
-  // Thumb mode builtin.
-  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
-#endif
-  mov(r1, Operand(builtin));
-  CEntryStub stub(1);
-  return TryTailCallStub(&stub);
-}
-
-
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   GetBuiltinEntry(r2, id);
   if (flag == CALL_FUNCTION) {
     call_wrapper.BeforeCall(CallSize(r2));
@@ -2634,14 +2809,20 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
 
   mov(r0, Operand(p0));
   push(r0);
   mov(r0, Operand(Smi::FromInt(p1 - p0)));
   push(r0);
-  CallRuntime(Runtime::kAbort, 2);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
   // will not return here
   if (is_const_pool_blocked()) {
     // If the calling code cares about the exact number of
@@ -2673,6 +2854,47 @@
 }
 
 
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+    ElementsKind expected_kind,
+    ElementsKind transitioned_kind,
+    Register map_in_out,
+    Register scratch,
+    Label* no_map_match) {
+  // Load the global or builtins object from the current context.
+  ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+  // Check that the function's map is the same as the expected cached map.
+  int expected_index =
+      Context::GetContextMapIndexFromElementsKind(expected_kind);
+  ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
+  cmp(map_in_out, ip);
+  b(ne, no_map_match);
+
+  // Use the transitioned cached map.
+  int trans_index =
+      Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+  ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(
+    Register function_in, Register scratch, Register map_out) {
+  ASSERT(!function_in.is(map_out));
+  Label done;
+  ldr(map_out, FieldMemOperand(function_in,
+                               JSFunction::kPrototypeOrInitialMapOffset));
+  if (!FLAG_smi_only_arrays) {
+    LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                        FAST_ELEMENTS,
+                                        map_out,
+                                        scratch,
+                                        &done);
+  }
+  bind(&done);
+}
+
+
 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   // Load the global or builtins object from the current context.
   ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -2733,6 +2955,22 @@
 }
 
 
+void MacroAssembler::UntagAndJumpIfSmi(
+    Register dst, Register src, Label* smi_case) {
+  STATIC_ASSERT(kSmiTag == 0);
+  mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+  b(cc, smi_case);  // Shifter carry is not set for a smi.
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(
+    Register dst, Register src, Label* non_smi_case) {
+  STATIC_ASSERT(kSmiTag == 0);
+  mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+  b(cs, non_smi_case);  // Shifter carry is set for a non-smi.
+}
+
+
 void MacroAssembler::JumpIfEitherSmi(Register reg1,
                                      Register reg2,
                                      Label* on_either_smi) {
@@ -2942,6 +3180,19 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  b(&entry);
+  bind(&loop);
+  str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
+  bind(&entry);
+  cmp(start_offset, end_offset);
+  b(lt, &loop);
+}
+
+
 void MacroAssembler::CountLeadingZeros(Register zeros,   // Answer.
                                        Register source,  // Input.
                                        Register scratch) {
@@ -2953,8 +3204,10 @@
 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
   clz(zeros, source);  // This instruction is only supported after ARM5.
 #else
-  mov(zeros, Operand(0, RelocInfo::NONE));
+  // Order of the next two lines is important: zeros register
+  // can be the same as source register.
   Move(scratch, source);
+  mov(zeros, Operand(0, RelocInfo::NONE));
   // Top 16.
   tst(scratch, Operand(0xffff0000));
   add(zeros, zeros, Operand(16), LeaveCC, eq);
@@ -3101,23 +3354,15 @@
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_reg_arguments,
                                    int num_double_arguments) {
-  CallCFunctionHelper(no_reg,
-                      function,
-                      ip,
-                      num_reg_arguments,
-                      num_double_arguments);
+  mov(ip, Operand(function));
+  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
-                                     Register scratch,
-                                     int num_reg_arguments,
-                                     int num_double_arguments) {
-  CallCFunctionHelper(function,
-                      ExternalReference::the_hole_value_location(isolate()),
-                      scratch,
-                      num_reg_arguments,
-                      num_double_arguments);
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
 }
 
 
@@ -3128,17 +3373,15 @@
 
 
 void MacroAssembler::CallCFunction(Register function,
-                                   Register scratch,
                                    int num_arguments) {
-  CallCFunction(function, scratch, num_arguments, 0);
+  CallCFunction(function, num_arguments, 0);
 }
 
 
 void MacroAssembler::CallCFunctionHelper(Register function,
-                                         ExternalReference function_reference,
-                                         Register scratch,
                                          int num_reg_arguments,
                                          int num_double_arguments) {
+  ASSERT(has_frame());
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -3162,10 +3405,6 @@
   // Just call directly. The function called cannot cause a GC, or
   // allow preemption, so the return address in the link register
   // stays correct.
-  if (function.is(no_reg)) {
-    mov(scratch, Operand(function_reference));
-    function = scratch;
-  }
   Call(function);
   int stack_passed_arguments = CalculateStackPassedWords(
       num_reg_arguments, num_double_arguments);
@@ -3197,6 +3436,185 @@
 }
 
 
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met) {
+  and_(scratch, object, Operand(~Page::kPageAlignmentMask));
+  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+  tst(scratch, Operand(mask));
+  b(cc, condition_met);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register scratch0,
+                                 Register scratch1,
+                                 Label* on_black) {
+  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+                              Register bitmap_scratch,
+                              Register mask_scratch,
+                              Label* has_color,
+                              int first_bit,
+                              int second_bit) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color, word_boundary;
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  tst(ip, Operand(mask_scratch));
+  b(first_bit == 1 ? eq : ne, &other_color);
+  // Shift left 1 by adding.
+  add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
+  b(eq, &word_boundary);
+  tst(ip, Operand(mask_scratch));
+  b(second_bit == 1 ? ne : eq, has_color);
+  jmp(&other_color);
+
+  bind(&word_boundary);
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+  tst(ip, Operand(1));
+  b(second_bit == 1 ? ne : eq, has_color);
+  bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects.  This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+                                      Register scratch,
+                                      Label* not_data_object) {
+  Label is_data_object;
+  ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+  b(eq, &is_data_object);
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  b(ne, not_data_object);
+  bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+  and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+  Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+  Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+  add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
+  mov(ip, Operand(1));
+  mov(mask_reg, Operand(ip, LSL, mask_reg));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Register load_scratch,
+    Label* value_is_white_and_not_data) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  tst(mask_scratch, load_scratch);
+  b(ne, &done);
+
+  if (emit_debug_code()) {
+    // Check for impossible bit pattern.
+    Label ok;
+    // LSL may overflow, making the check conservative.
+    tst(load_scratch, Operand(mask_scratch, LSL, 1));
+    b(eq, &ok);
+    stop("Impossible marking bit pattern");
+    bind(&ok);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = load_scratch;  // Holds map while checking type.
+  Register length = load_scratch;  // Holds length of object after testing type.
+  Label is_data_object;
+
+  // Check for heap-number
+  ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+  mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
+  b(eq, &is_data_object);
+
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = load_scratch;
+  ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  b(ne, value_is_white_and_not_data);
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  tst(instance_type, Operand(kExternalStringTag));
+  mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
+  b(ne, &is_data_object);
+
+  // Sequential string, either ASCII or UC16.
+  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+  // getting the length multiplied by 2.
+  ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
+  tst(instance_type, Operand(kStringEncodingMask));
+  mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
+  add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+  and_(length, length, Operand(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  orr(ip, ip, Operand(mask_scratch));
+  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+  and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+  add(ip, ip, Operand(length));
+  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+  bind(&done);
+}
+
+
 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
   Usat(output_reg, 8, Operand(input_reg));
 }
@@ -3229,8 +3647,8 @@
   bind(&in_bounds);
   Vmov(temp_double_reg, 0.5);
   vadd(temp_double_reg, input_reg, temp_double_reg);
-  vcvt_u32_f64(s0, temp_double_reg);
-  vmov(result_reg, s0);
+  vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
+  vmov(result_reg, temp_double_reg.low());
   bind(&done);
 }
 
@@ -3246,6 +3664,63 @@
 }
 
 
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+  Label next;
+  // Preload a couple of values used in the loop.
+  Register  empty_fixed_array_value = r6;
+  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Register empty_descriptor_array_value = r7;
+  LoadRoot(empty_descriptor_array_value,
+           Heap::kEmptyDescriptorArrayRootIndex);
+  mov(r1, r0);
+  bind(&next);
+
+  // Check that there are no elements.  Register r1 contains the
+  // current JS object we've reached through the prototype chain.
+  ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+  cmp(r2, empty_fixed_array_value);
+  b(ne, call_runtime);
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in r2 for the subsequent
+  // prototype load.
+  ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+  ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
+  JumpIfSmi(r3, call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (r3).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
+  JumpIfSmi(r3, call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  cmp(r1, r0);
+  b(eq, &check_prototype);
+  ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  cmp(r3, empty_fixed_array_value);
+  b(ne, call_runtime);
+
+  // Load the prototype from the map and loop if non-null.
+  bind(&check_prototype);
+  ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
+  cmp(r1, null_value);
+  b(ne, &next);
+}
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 0546e6a..47afa93 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
 
 #include "assembler.h"
+#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -38,12 +39,12 @@
 // Static helper functions
 
 // Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
+inline MemOperand FieldMemOperand(Register object, int offset) {
   return MemOperand(object, offset - kHeapObjectTag);
 }
 
 
-static inline Operand SmiUntagOperand(Register object) {
+inline Operand SmiUntagOperand(Register object) {
   return Operand(object, ASR, kSmiTagSize);
 }
 
@@ -79,6 +80,14 @@
 };
 
 
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -157,40 +166,136 @@
                  Heap::RootListIndex index,
                  Condition cond = al);
 
+  void LoadHeapObject(Register dst, Handle<HeapObject> object);
 
-  // Check if object is in new space.
-  // scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cond,  // eq for new space, ne otherwise
-                  Label* branch);
+  void LoadObject(Register result, Handle<Object> object) {
+    if (object->IsHeapObject()) {
+      LoadHeapObject(result, Handle<HeapObject>::cast(object));
+    } else {
+      Move(result, object);
+    }
+  }
 
+  // ---------------------------------------------------------------------------
+  // GC Support
 
-  // For the page containing |object| mark the region covering [address]
-  // dirty. The object address must be in the first 8K of an allocated page.
-  void RecordWriteHelper(Register object,
-                         Register address,
-                         Register scratch);
+  void IncrementalMarkingRecordWriteHelper(Register object,
+                                           Register value,
+                                           Register address);
 
-  // For the page containing |object| mark the region covering
-  // [object+offset] dirty. The object address must be in the first 8K
-  // of an allocated page.  The 'scratch' registers are used in the
-  // implementation and all 3 registers are clobbered by the
-  // operation, as well as the ip register. RecordWrite updates the
-  // write barrier even when storing smis.
-  void RecordWrite(Register object,
-                   Operand offset,
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
+
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
+
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch) {
+    InNewSpace(object, scratch, ne, branch);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch) {
+    InNewSpace(object, scratch, eq, branch);
+  }
+
+  // Check if an object has a given incremental marking color.
+  void HasColor(Register object,
+                Register scratch0,
+                Register scratch1,
+                Label* has_color,
+                int first_bit,
+                int second_bit);
+
+  void JumpIfBlack(Register object,
                    Register scratch0,
-                   Register scratch1);
+                   Register scratch1,
+                   Label* on_black);
 
-  // For the page containing |object| mark the region covering
-  // [address] dirty. The object address must be in the first 8K of an
-  // allocated page.  All 3 registers are clobbered by the operation,
-  // as well as the ip register. RecordWrite updates the write barrier
-  // even when storing smis.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register scratch);
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Register scratch3,
+                      Label* object_is_white_and_not_data);
+
+  // Detects conservatively whether an object is data-only, i.e. it does need to
+  // be scanned by the garbage collector.
+  void JumpIfDataObject(Register value,
+                        Register scratch,
+                        Label* not_data_object);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      LinkRegisterStatus lr_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // MemOperand(reg, off).
+  inline void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      LinkRegisterStatus lr_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     lr_status,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // For a given |object| notify the garbage collector that the slot |address|
+  // has been written.  |value| is the object being stored. The value and
+  // address registers are clobbered by the operation.
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      LinkRegisterStatus lr_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
   // Push a handle.
   void Push(Handle<Object> handle);
@@ -225,8 +330,11 @@
   }
 
   // Push four registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2,
-            Register src3, Register src4, Condition cond = al) {
+  void Push(Register src1,
+            Register src2,
+            Register src3,
+            Register src4,
+            Condition cond = al) {
     ASSERT(!src1.is(src2));
     ASSERT(!src2.is(src3));
     ASSERT(!src1.is(src3));
@@ -265,6 +373,57 @@
     }
   }
 
+  // Pop three registers.  Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
+    ASSERT(!src1.is(src2));
+    ASSERT(!src2.is(src3));
+    ASSERT(!src1.is(src3));
+    if (src1.code() > src2.code()) {
+      if (src2.code() > src3.code()) {
+        ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+      } else {
+        ldr(src3, MemOperand(sp, 4, PostIndex), cond);
+        ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+      }
+    } else {
+      Pop(src2, src3, cond);
+      str(src1, MemOperand(sp, 4, PostIndex), cond);
+    }
+  }
+
+  // Pop four registers.  Pops rightmost register first (from lower address).
+  void Pop(Register src1,
+           Register src2,
+           Register src3,
+           Register src4,
+           Condition cond = al) {
+    ASSERT(!src1.is(src2));
+    ASSERT(!src2.is(src3));
+    ASSERT(!src1.is(src3));
+    ASSERT(!src1.is(src4));
+    ASSERT(!src2.is(src4));
+    ASSERT(!src3.is(src4));
+    if (src1.code() > src2.code()) {
+      if (src2.code() > src3.code()) {
+        if (src3.code() > src4.code()) {
+          ldm(ia_w,
+              sp,
+              src1.bit() | src2.bit() | src3.bit() | src4.bit(),
+              cond);
+        } else {
+          ldr(src4, MemOperand(sp, 4, PostIndex), cond);
+          ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+        }
+      } else {
+        Pop(src3, src4, cond);
+        ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+      }
+    } else {
+      Pop(src2, src3, src4, cond);
+      ldr(src1, MemOperand(sp, 4, PostIndex), cond);
+    }
+  }
+
   // Push and pop the registers that can hold pointers, as defined by the
   // RegList constant kSafepointSavedRegisters.
   void PushSafepointRegisters();
@@ -318,16 +477,6 @@
             const double imm,
             const Condition cond = al);
 
-
-  // ---------------------------------------------------------------------------
-  // Activation frames
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter exit frame.
   // stack_space - extra stack space, used for alignment before call to C.
   void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -342,6 +491,22 @@
 
   void LoadContext(Register dst, int context_chain_length);
 
+  // Conditionally load the cached Array transitioned map of type
+  // transitioned_kind from the global context if the map in register
+  // map_in_out is the cached Array map in the global context of
+  // expected_kind.
+  void LoadTransitionedArrayMapConditional(
+      ElementsKind expected_kind,
+      ElementsKind transitioned_kind,
+      Register map_in_out,
+      Register scratch,
+      Label* no_map_match);
+
+  // Load the initial map for new Arrays from a JSFunction.
+  void LoadInitialArrayMap(Register function_in,
+                           Register scratch,
+                           Register map_out);
+
   void LoadGlobalFunction(int index, Register function);
 
   // Load the initial map from the global function. The registers
@@ -351,15 +516,15 @@
                                     Register scratch);
 
   void InitializeRootRegister() {
-    ExternalReference roots_address =
-        ExternalReference::roots_address(isolate());
-    mov(kRootRegister, Operand(roots_address));
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(isolate());
+    mov(kRootRegister, Operand(roots_array_start));
   }
 
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
-  // Setup call kind marking in ecx. The method takes ecx as an
+  // Set up call kind marking in ecx. The method takes ecx as an
   // explicit first parameter to make the code more readable at the
   // call sites.
   void SetCallKind(Register dst, CallKind kind);
@@ -387,9 +552,10 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
+                      const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
   void IsObjectJSObjectType(Register heap_object,
@@ -416,20 +582,18 @@
   // Exception handling
 
   // Push a new try handler and link into try handler chain.
-  // The return address must be passed in register lr.
-  // On exit, r0 contains TOS (code slot).
-  void PushTryHandler(CodeLocation try_location, HandlerType type);
+  void PushTryHandler(StackHandler::Kind kind, int handler_index);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   // Must preserve the result register.
   void PopTryHandler();
 
-  // Passes thrown value (in r0) to the handler of top of the try handler chain.
+  // Passes thrown value to the handler of top of the try handler chain.
   void Throw(Register value);
 
   // Propagates an uncatchable exception to the top of the current JS stack's
   // handler chain.
-  void ThrowUncatchable(UncatchableExceptionType type, Register value);
+  void ThrowUncatchable(Register value);
 
   // ---------------------------------------------------------------------------
   // Inline caching support
@@ -457,7 +621,7 @@
   }
 
   // Check if the given instruction is a 'type' marker.
-  // ie. check if is is a mov r<type>, r<type> (referenced as nop(type))
+  // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
   // These instructions are generated to mark special location in the code,
   // like some special IC code.
   static inline bool IsMarkedCode(Instr instr, int type) {
@@ -576,6 +740,13 @@
                  Register length,
                  Register scratch);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
@@ -587,7 +758,8 @@
   void TryGetFunctionPrototype(Register function,
                                Register result,
                                Register scratch,
-                               Label* miss);
+                               Label* miss,
+                               bool miss_on_bound_function = false);
 
   // Compare object type for heap object.  heap_object contains a non-Smi
   // whose object type should be compared with the given type.  This both
@@ -615,15 +787,52 @@
                          Register scratch,
                          Label* fail);
 
-  // Check if the map of an object is equal to a specified map (either
-  // given directly or as an index into the root list) and branch to
-  // label if not. Skip the smi check if not required (object is known
-  // to be a heap object)
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Register scratch,
+                               Label* fail);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Register scratch,
+                                Label* fail);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements. Otherwise jump to fail, in which
+  // case scratch2, scratch3 and scratch4 are unmodified.
+  void StoreNumberToDoubleElements(Register value_reg,
+                                   Register key_reg,
+                                   Register receiver_reg,
+                                   Register elements_reg,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Register scratch3,
+                                   Register scratch4,
+                                   Label* fail);
+
+  // Compare an object's map with the specified map and its transitioned
+  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
+  // set with result of map compare. If multiple map compares are required, the
+  // compare sequences branches to early_success.
+  void CompareMap(Register obj,
+                  Register scratch,
+                  Handle<Map> map,
+                  Label* early_success,
+                  CompareMapMode mode = REQUIRE_EXACT_MAP);
+
+  // Check if the map of an object is equal to a specified map and branch to
+  // label if not. Skip the smi check if not required (object is known to be a
+  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+  // against maps that are ElementsKind transition maps of the specified map.
   void CheckMap(Register obj,
                 Register scratch,
                 Handle<Map> map,
                 Label* fail,
-                SmiCheckType smi_check_type);
+                SmiCheckType smi_check_type,
+                CompareMapMode mode = REQUIRE_EXACT_MAP);
 
 
   void CheckMap(Register obj,
@@ -715,7 +924,7 @@
   // Truncates a double using a specific rounding mode.
   // Clears the z flag (ne condition) if an overflow occurs.
   // If exact_conversion is true, the z flag is also cleared if the conversion
-  // was inexact, ie. if the double value could not be converted exactly
+  // was inexact, i.e. if the double value could not be converted exactly
   // to a 32bit integer.
   void EmitVFPTruncate(VFPRoundingMode rounding_mode,
                        SwVfpRegister result,
@@ -761,20 +970,9 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = al);
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
-
   // Call a code stub.
   void TailCallStub(CodeStub* stub, Condition cond = al);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
-                                               Condition cond = al);
-
   // Call a runtime routine.
   void CallRuntime(const Runtime::Function* f, int num_arguments);
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -793,12 +991,6 @@
                                  int num_arguments,
                                  int result_size);
 
-  // Tail call of a runtime routine (jump). Try to generate the code if
-  // necessary. Do not perform a GC but instead return a retry after GC
-  // failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
@@ -837,28 +1029,25 @@
   // return address (unless this is somehow accounted for by the called
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
-  void CallCFunction(Register function, Register scratch, int num_arguments);
+  void CallCFunction(Register function, int num_arguments);
   void CallCFunction(ExternalReference function,
                      int num_reg_arguments,
                      int num_double_arguments);
-  void CallCFunction(Register function, Register scratch,
+  void CallCFunction(Register function,
                      int num_reg_arguments,
                      int num_double_arguments);
 
   void GetCFunctionDoubleResult(const DoubleRegister dst);
 
-  // Calls an API function. Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions. Restores context.
-  // stack_space - space to be unwound on exit (includes the call js
-  // arguments space and the additional space allocated for the fast call).
-  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
-                                           int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Restores context.  stack_space
+  // - space to be unwound on exit (includes the call JS arguments space and
+  // the additional space allocated for the fast call).
+  void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
 
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
@@ -909,6 +1098,9 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   // EABI variant for double arguments in use.
   bool use_eabi_hardfloat() {
@@ -967,6 +1159,14 @@
     mov(dst, Operand(src, ASR, kSmiTagSize), s);
   }
 
+  // Untag the source value into destination and jump if source is a smi.
+  // Souce and destination can be the same register.
+  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+  // Untag the source value into destination and jump if source is not a smi.
+  // Souce and destination can be the same register.
+  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
   // Jump the register contains a smi.
   inline void JumpIfSmi(Register value, Label* smi_label) {
     tst(value, Operand(kSmiTagMask));
@@ -1055,10 +1255,16 @@
 
   void LoadInstanceDescriptors(Register map, Register descriptors);
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
+  // Expects object in r0 and returns map with validated enum cache
+  // in r0.  Assumes that any other register can be used as a scratch.
+  void CheckEnumCache(Register null_value, Label* call_runtime);
+
  private:
   void CallCFunctionHelper(Register function,
-                           ExternalReference function_reference,
-                           Register scratch,
                            int num_reg_arguments,
                            int num_double_arguments);
 
@@ -1070,20 +1276,34 @@
                       Handle<Code> code_constant,
                       Register code_reg,
                       Label* done,
+                      bool* definitely_mismatches,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void InitializeNewString(Register string,
                            Register length,
                            Heap::RootListIndex map_index,
                            Register scratch1,
                            Register scratch2);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cond,  // eq for new space, ne otherwise.
+                  Label* branch);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Leaves addr_reg unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
+
+  // Helper for throwing exceptions.  Compute a handler address and jump to
+  // it.  See the implementation for register usage.
+  void JumpToHandlerEntry();
+
   // Compute memory operands for safepoint stack slots.
   static int SafepointRegisterStackIndex(int reg_code);
   MemOperand SafepointRegisterSlot(Register reg);
@@ -1091,6 +1311,7 @@
 
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -1136,12 +1357,12 @@
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
-static MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextOperand(Register context, int index) {
   return MemOperand(context, Context::SlotOffset(index));
 }
 
 
-static inline MemOperand GlobalObjectOperand()  {
+inline MemOperand GlobalObjectOperand()  {
   return ContextOperand(cp, Context::GLOBAL_INDEX);
 }
 
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index cd76edb..10ff2dd 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -371,9 +371,12 @@
     // Isolate.
     __ mov(r3, Operand(ExternalReference::isolate_address()));
 
-    ExternalReference function =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-    __ CallCFunction(function, argument_count);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference function =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+      __ CallCFunction(function, argument_count);
+    }
 
     // Check if function returned non-zero for success or zero for failure.
     __ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -469,7 +472,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUC16CharCode);
+  ASSERT(minus < String::kMaxUtf16CodeUnit);
   __ sub(r0, current_character(), Operand(minus));
   __ and_(r0, r0, Operand(mask));
   __ cmp(r0, Operand(c));
@@ -568,7 +571,7 @@
     ExternalReference map = ExternalReference::re_word_character_map();
     __ mov(r0, Operand(map));
     __ ldrb(r0, MemOperand(r0, current_character()));
-    __ tst(r0, Operand(r0));
+    __ cmp(r0, Operand(0));
     BranchOrBacktrack(eq, on_no_match);
     return true;
   }
@@ -582,7 +585,7 @@
     ExternalReference map = ExternalReference::re_word_character_map();
     __ mov(r0, Operand(map));
     __ ldrb(r0, MemOperand(r0, current_character()));
-    __ tst(r0, Operand(r0));
+    __ cmp(r0, Operand(0));
     BranchOrBacktrack(ne, on_no_match);
     if (mode_ != ASCII) {
       __ bind(&done);
@@ -611,6 +614,12 @@
 
   // Entry code:
   __ bind(&entry_label_);
+
+  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
+  // is generated.
+  FrameScope scope(masm_, StackFrame::MANUAL);
+
+  // Actually emit code to start a new stack frame.
   // Push arguments
   // Save callee-save registers.
   // Start new stack frame.
@@ -672,7 +681,7 @@
 
   // Determine whether the start index is zero, that is at the start of the
   // string, and store that value in a local variable.
-  __ tst(r1, Operand(r1));
+  __ cmp(r1, Operand(0));
   __ mov(r1, Operand(1), LeaveCC, eq);
   __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
   __ str(r1, MemOperand(frame_pointer(), kAtStart));
@@ -1046,7 +1055,7 @@
   ASSERT(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
-  MaybeObject* result = Execution::HandleStackGuardInterrupt();
+  MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
 
   if (*code_handle != re_code) {  // Return address no longer valid
     int delta = code_handle->address() - re_code->address();
@@ -1102,6 +1111,11 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+    // Subject string might have been a ConsString that underwent
+    // short-circuiting during GC. That will not change start_address but
+    // will change pointer inside the subject handle.
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 6af5355..629c209 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -53,7 +53,7 @@
 // code.
 class ArmDebugger {
  public:
-  explicit ArmDebugger(Simulator* sim);
+  explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
   ~ArmDebugger();
 
   void Stop(Instruction* instr);
@@ -84,11 +84,6 @@
 };
 
 
-ArmDebugger::ArmDebugger(Simulator* sim) {
-  sim_ = sim;
-}
-
-
 ArmDebugger::~ArmDebugger() {
 }
 
@@ -296,6 +291,13 @@
     if (line == NULL) {
       break;
     } else {
+      char* last_input = sim_->last_debugger_input();
+      if (strcmp(line, "\n") == 0 && last_input != NULL) {
+        line = last_input;
+      } else {
+        // Ownership is transferred to sim_;
+        sim_->set_last_debugger_input(line);
+      }
       // Use sscanf to parse the individual parts of the command line. At the
       // moment no command expects more than two parameters.
       int argc = SScanF(line,
@@ -611,7 +613,6 @@
         PrintF("Unknown command: %s\n", cmd);
       }
     }
-    DeleteArray(line);
   }
 
   // Add all the breakpoints back to stop execution and enter the debugger
@@ -645,6 +646,12 @@
 }
 
 
+void Simulator::set_last_debugger_input(char* input) {
+  DeleteArray(last_debugger_input_);
+  last_debugger_input_ = input;
+}
+
+
 void Simulator::FlushICache(v8::internal::HashMap* i_cache,
                             void* start_addr,
                             size_t size) {
@@ -734,7 +741,7 @@
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
-  // Setup simulator support first. Some of this information is needed to
+  // Set up simulator support first. Some of this information is needed to
   // setup the architecture state.
   size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
   stack_ = reinterpret_cast<char*>(malloc(stack_size));
@@ -743,7 +750,7 @@
   break_pc_ = NULL;
   break_instr_ = 0;
 
-  // Setup architecture state.
+  // Set up architecture state.
   // All registers are initialized to zero to start with.
   for (int i = 0; i < num_registers; i++) {
     registers_[i] = 0;
@@ -781,6 +788,8 @@
   registers_[pc] = bad_lr;
   registers_[lr] = bad_lr;
   InitializeCoverage();
+
+  last_debugger_input_ = NULL;
 }
 
 
@@ -1268,9 +1277,9 @@
 
 // Returns the limit of the stack area to enable checking for stack overflows.
 uintptr_t Simulator::StackLimit() const {
-  // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+  // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
   // pushing values.
-  return reinterpret_cast<uintptr_t>(stack_) + 256;
+  return reinterpret_cast<uintptr_t>(stack_) + 1024;
 }
 
 
@@ -1618,6 +1627,8 @@
   ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
 
   intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+  // Catch null pointers a little earlier.
+  ASSERT(start_address > 8191 || start_address < 0);
   int reg = 0;
   while (rlist != 0) {
     if ((rlist & 1) != 0) {
@@ -3313,7 +3324,7 @@
 int32_t Simulator::Call(byte* entry, int argument_count, ...) {
   va_list parameters;
   va_start(parameters, argument_count);
-  // Setup arguments
+  // Set up arguments
 
   // First four arguments passed in registers.
   ASSERT(argument_count >= 4);
@@ -3356,7 +3367,7 @@
   int32_t r10_val = get_register(r10);
   int32_t r11_val = get_register(r11);
 
-  // Setup the callee-saved registers with a known value. To be able to check
+  // Set up the callee-saved registers with a known value. To be able to check
   // that they are preserved properly across JS execution.
   int32_t callee_saved_value = icount_;
   set_register(r4, callee_saved_value);
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 391ef69..585f1e0 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -194,6 +194,10 @@
   // Pop an address from the JS stack.
   uintptr_t PopAddress();
 
+  // Debugger input.
+  void set_last_debugger_input(char* input);
+  char* last_debugger_input() { return last_debugger_input_; }
+
   // ICache checking.
   static void FlushICache(v8::internal::HashMap* i_cache, void* start,
                           size_t size);
@@ -360,6 +364,9 @@
   bool pc_modified_;
   int icount_;
 
+  // Debugger input.
+  char* last_debugger_input_;
+
   // Icache simulation
   v8::internal::HashMap* i_cache_;
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index f856592..06f8385 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,47 +43,83 @@
                        MacroAssembler* masm,
                        Code::Flags flags,
                        StubCache::Table table,
+                       Register receiver,
                        Register name,
+                       // Number of the cache entry, not scaled.
                        Register offset,
                        Register scratch,
-                       Register scratch2) {
+                       Register scratch2,
+                       Register offset_scratch) {
   ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
   ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
 
   uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
   uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
 
   // Check the relative positions of the address fields.
   ASSERT(value_off_addr > key_off_addr);
   ASSERT((value_off_addr - key_off_addr) % 4 == 0);
   ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+  ASSERT(map_off_addr > key_off_addr);
+  ASSERT((map_off_addr - key_off_addr) % 4 == 0);
+  ASSERT((map_off_addr - key_off_addr) < (256 * 4));
 
   Label miss;
-  Register offsets_base_addr = scratch;
+  Register base_addr = scratch;
+  scratch = no_reg;
+
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ add(offset_scratch, offset, Operand(offset, LSL, 1));
+
+  // Calculate the base address of the entry.
+  __ mov(base_addr, Operand(key_offset));
+  __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
 
   // Check that the key in the entry matches the name.
-  __ mov(offsets_base_addr, Operand(key_offset));
-  __ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1));
+  __ ldr(ip, MemOperand(base_addr, 0));
   __ cmp(name, ip);
   __ b(ne, &miss);
 
-  // Get the code entry from the cache.
-  __ add(offsets_base_addr, offsets_base_addr,
-         Operand(value_off_addr - key_off_addr));
-  __ ldr(scratch2, MemOperand(offsets_base_addr, offset, LSL, 1));
-
-  // Check that the flags match what we're looking for.
-  __ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
-  __ bic(scratch2, scratch2, Operand(Code::kFlagsNotUsedInLookup));
-  __ cmp(scratch2, Operand(flags));
+  // Check the map matches.
+  __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
+  __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ cmp(ip, scratch2);
   __ b(ne, &miss);
 
-  // Re-load code entry from cache.
-  __ ldr(offset, MemOperand(offsets_base_addr, offset, LSL, 1));
+  // Get the code entry from the cache.
+  Register code = scratch2;
+  scratch2 = no_reg;
+  __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+  // Check that the flags match what we're looking for.
+  Register flags_reg = base_addr;
+  base_addr = no_reg;
+  __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+  // It's a nice optimization if this constant is encodable in the bic insn.
+
+  uint32_t mask = Code::kFlagsNotUsedInLookup;
+  ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
+  __ bic(flags_reg, flags_reg, Operand(mask));
+  // Using cmn and the negative instead of cmp means we can use movw.
+  if (flags < 0) {
+    __ cmn(flags_reg, Operand(-flags));
+  } else {
+    __ cmp(flags_reg, Operand(flags));
+  }
+  __ b(ne, &miss);
+
+#ifdef DEBUG
+    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+      __ jmp(&miss);
+    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+      __ jmp(&miss);
+    }
+#endif
 
   // Jump to the first instruction in the code stub.
-  __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(offset);
+  __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
 
   // Miss: fall through.
   __ bind(&miss);
@@ -95,13 +131,12 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss_label,
-    Register receiver,
-    String* name,
-    Register scratch0,
-    Register scratch1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                             Label* miss_label,
+                                             Register receiver,
+                                             Handle<String> name,
+                                             Register scratch0,
+                                             Register scratch1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -138,20 +173,15 @@
   __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
 
 
-  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
-      masm,
-      miss_label,
-      &done,
-      receiver,
-      properties,
-      name,
-      scratch1);
-  if (result->IsFailure()) return result;
-
+  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     receiver,
+                                                     properties,
+                                                     name,
+                                                     scratch1);
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  return result;
 }
 
 
@@ -161,13 +191,14 @@
                               Register name,
                               Register scratch,
                               Register extra,
-                              Register extra2) {
+                              Register extra2,
+                              Register extra3) {
   Isolate* isolate = masm->isolate();
   Label miss;
 
-  // Make sure that code is valid. The shifting code relies on the
-  // entry size being 8.
-  ASSERT(sizeof(Entry) == 8);
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 12.
+  ASSERT(sizeof(Entry) == 12);
 
   // Make sure the flags does not name a specific type.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -187,6 +218,11 @@
   ASSERT(!scratch.is(no_reg));
   ASSERT(!extra.is(no_reg));
   ASSERT(!extra2.is(no_reg));
+  ASSERT(!extra3.is(no_reg));
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
+                      extra2, extra3);
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
@@ -195,27 +231,51 @@
   __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
   __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ add(scratch, scratch, Operand(ip));
-  __ eor(scratch, scratch, Operand(flags));
-  __ and_(scratch,
-          scratch,
-          Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+  uint32_t mask = kPrimaryTableSize - 1;
+  // We shift out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.
+  __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
+  // Mask down the eor argument to the minimum to keep the immediate
+  // ARM-encodable.
+  __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
+  // Prefer and_ to ubfx here because ubfx takes 2 cycles.
+  __ and_(scratch, scratch, Operand(mask));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
+  ProbeTable(isolate,
+             masm,
+             flags,
+             kPrimary,
+             receiver,
+             name,
+             scratch,
+             extra,
+             extra2,
+             extra3);
 
   // Primary miss: Compute hash for secondary probe.
-  __ sub(scratch, scratch, Operand(name));
-  __ add(scratch, scratch, Operand(flags));
-  __ and_(scratch,
-          scratch,
-          Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+  __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
+  uint32_t mask2 = kSecondaryTableSize - 1;
+  __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
+  __ and_(scratch, scratch, Operand(mask2));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
+  ProbeTable(isolate,
+             masm,
+             flags,
+             kSecondary,
+             receiver,
+             name,
+             scratch,
+             extra,
+             extra2,
+             extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
   __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
+                      extra2, extra3);
 }
 
 
@@ -238,7 +298,10 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -246,8 +309,8 @@
   __ cmp(prototype, ip);
   __ b(ne, miss);
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(isolate->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ Move(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -259,8 +322,10 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst, Register src,
-                                            JSObject* holder, int index) {
+                                            Register dst,
+                                            Register src,
+                                            Handle<JSObject> holder,
+                                            int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -367,9 +432,9 @@
 // may be clobbered.  Upon branch to miss_label, the receiver and name
 // registers have their original values.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      JSObject* object,
+                                      Handle<JSObject> object,
                                       int index,
-                                      Map* transition,
+                                      Handle<Map> transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
@@ -377,13 +442,9 @@
   // r0 : value
   Label exit;
 
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver_reg, miss_label);
-
-  // Check that the map of the receiver hasn't changed.
-  __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-  __ cmp(scratch, Operand(Handle<Map>(object->map())));
-  __ b(ne, miss_label);
+  // Check that the map of the object hasn't changed.
+  __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
+              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -395,11 +456,11 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ push(receiver_reg);
-    __ mov(r2, Operand(Handle<Map>(transition)));
+    __ mov(r2, Operand(transition));
     __ Push(r2, r0);
     __ TailCallExternalReference(
         ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -409,10 +470,10 @@
     return;
   }
 
-  if (transition != NULL) {
+  if (!transition.is_null()) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
-    __ mov(ip, Operand(Handle<Map>(transition)));
+    __ mov(ip, Operand(transition));
     __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
   }
 
@@ -431,7 +492,13 @@
 
     // Update the write barrier for the array address.
     // Pass the now unused name_reg as a scratch register.
-    __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+    __ mov(name_reg, r0);
+    __ RecordWriteField(receiver_reg,
+                        offset,
+                        name_reg,
+                        scratch,
+                        kLRHasNotBeenSaved,
+                        kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -444,7 +511,13 @@
 
     // Update the write barrier for the array address.
     // Ok to clobber receiver_reg and name_reg, since we return.
-    __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+    __ mov(name_reg, r0);
+    __ RecordWriteField(scratch,
+                        offset,
+                        name_reg,
+                        receiver_reg,
+                        kLRHasNotBeenSaved,
+                        kDontSaveFPRegs);
   }
 
   // Return the value (register r0).
@@ -455,20 +528,15 @@
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Code* code = NULL;
-  if (kind == Code::LOAD_IC) {
-    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
-  } else {
-    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
-  }
-
-  Handle<Code> ic(code);
-  __ Jump(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code = (kind == Code::LOAD_IC)
+      ? masm->isolate()->builtins()->LoadIC_Miss()
+      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
 static void GenerateCallFunction(MacroAssembler* masm,
-                                 Object* object,
+                                 Handle<Object> object,
                                  const ParameterCount& arguments,
                                  Label* miss,
                                  Code::ExtraICState extra_ic_state) {
@@ -501,12 +569,12 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
   Register scratch = name;
-  __ mov(scratch, Operand(Handle<Object>(interceptor)));
+  __ mov(scratch, Operand(interceptor));
   __ push(scratch);
   __ push(receiver);
   __ push(holder);
@@ -515,11 +583,12 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
@@ -532,6 +601,7 @@
   __ CallStub(&stub);
 }
 
+
 static const int kFastApiCallArguments = 3;
 
 // Reserves space for the extra arguments to FastHandleApiCall in the
@@ -553,44 +623,42 @@
 }
 
 
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
                                       const CallOptimization& optimization,
                                       int argc) {
   // ----------- S t a t e -------------
   //  -- sp[0]              : holder (set by CheckPrototypes)
-  //  -- sp[4]              : callee js function
+  //  -- sp[4]              : callee JS function
   //  -- sp[8]              : call data
-  //  -- sp[12]             : last js argument
+  //  -- sp[12]             : last JS argument
   //  -- ...
-  //  -- sp[(argc + 3) * 4] : first js argument
+  //  -- sp[(argc + 3) * 4] : first JS argument
   //  -- sp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ mov(r5, Operand(Handle<JSFunction>(function)));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ LoadHeapObject(r5, function);
   __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ Move(r0, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ Move(r0, api_call_info);
     __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
   } else {
-    __ Move(r6, Handle<Object>(call_data));
+    __ Move(r6, call_data);
   }
-  // Store js function and call data.
+  // Store JS function and call data.
   __ stm(ib, sp, r5.bit() | r6.bit());
 
   // r2 points to call data as expected by Arguments
   // (refer to layout above).
   __ add(r2, sp, Operand(2 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
   const int kApiStackSpace = 4;
+
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
   // r0 = v8::Arguments&
@@ -608,17 +676,18 @@
   __ mov(ip, Operand(0));
   __ str(ip, MemOperand(r0, 3 * kPointerSize));
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated). Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
   const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
   ExternalReference ref = ExternalReference(&fun,
                                             ExternalReference::DIRECT_API_CALL,
                                             masm->isolate());
-  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  AllowExternalCallThatCantCauseGC scope(masm);
+
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
+
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -630,86 +699,63 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
     // Check that the receiver isn't a smi.
     __ JumpIfSmi(receiver, miss);
-
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
-
     Counters* counters = masm->isolate()->counters();
-
     int depth1 = kInvalidProtoDepth;
     int depth2 = kInvalidProtoDepth;
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
-       !lookup->holder()->IsGlobalObject()) {
-     depth1 =
-         optimization.GetPrototypeDepthOfExpectedType(object,
-                                                      interceptor_holder);
-     if (depth1 == kInvalidProtoDepth) {
-       depth2 =
-           optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                        lookup->holder());
-     }
-     can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                            (depth2 != kInvalidProtoDepth);
+        !lookup->holder()->IsGlobalObject()) {
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
+      if (depth1 == kInvalidProtoDepth) {
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
+      }
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     __ IncrementCounter(counters->call_const_interceptor(), 1,
-                      scratch1, scratch2);
+                        scratch1, scratch2);
 
     if (can_do_fast_api_call) {
       __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -722,9 +768,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver,
-                                        interceptor_holder, scratch1,
-                                        scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -737,10 +783,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -751,16 +798,13 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result = GenerateFastApiDirectCall(masm,
-                                                      optimization,
-                                                      arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
           : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION, call_kind);
+                        JUMP_FUNCTION, NullCallWrapper(), call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -775,64 +819,53 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
     }
-
-    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
     // Call a runtime function to load the interceptor property.
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
-
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
-
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
                           masm->isolate()),
         5);
-
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
-    __ Push(holder, name_);
-
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
-
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
-
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(holder, name_);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+    }
     // If interceptor returns no-result sentinel, call the constant function.
     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
     __ cmp(r0, scratch);
@@ -849,52 +882,42 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+                                      Handle<GlobalObject> global,
+                                      Handle<String> name,
+                                      Register scratch,
+                                      Label* miss) {
+  Handle<JSGlobalPropertyCell> cell =
+      GlobalObject::EnsurePropertyCell(global, name);
   ASSERT(cell->value()->IsTheHole());
-  __ mov(scratch, Operand(Handle<Object>(cell)));
+  __ mov(scratch, Operand(cell));
   __ ldr(scratch,
          FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   __ cmp(scratch, ip);
   __ b(ne, miss);
-  return cell;
 }
 
+
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
-    MacroAssembler* masm,
-    JSObject* object,
-    JSObject* holder,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  JSObject* current = object;
-  while (current != holder) {
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+                                       Handle<JSObject> object,
+                                       Handle<JSObject> holder,
+                                       Handle<String> name,
+                                       Register scratch,
+                                       Label* miss) {
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
     if (current->IsGlobalObject()) {
-      // Returns a cell or a failure.
-      MaybeObject* result = GenerateCheckPropertyCell(
-          masm,
-          GlobalObject::cast(current),
-          name,
-          scratch,
-          miss);
-      if (result->IsFailure()) return result;
+      GenerateCheckPropertyCell(masm,
+                                Handle<GlobalObject>::cast(current),
+                                name,
+                                scratch,
+                                miss);
     }
-    ASSERT(current->IsJSObject());
-    current = JSObject::cast(current->GetPrototype());
+    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
   }
-  return NULL;
 }
 
 
@@ -1008,13 +1031,13 @@
 #define __ ACCESS_MASM(masm())
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
                                        Register object_reg,
-                                       JSObject* holder,
+                                       Handle<JSObject> holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       String* name,
+                                       Handle<String> name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -1032,83 +1055,51 @@
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
-  JSObject* current = object;
-  while (current != holder) {
-    depth++;
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
+    ++depth;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    ASSERT(current->GetPrototype()->IsJSObject());
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
-        Object* lookup_result = NULL;  // Initialization to please compiler.
-        if (!maybe_lookup_result->ToObject(&lookup_result)) {
-          set_failure(Failure::cast(maybe_lookup_result));
-          return reg;
-        }
-        name = String::cast(lookup_result);
+        name = factory()->LookupSymbol(name);
       }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
+      ASSERT(current->property_dictionary()->FindEntry(*name) ==
              StringDictionary::kNotFound);
 
-      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
-                                                                      miss,
-                                                                      reg,
-                                                                      name,
-                                                                      scratch1,
-                                                                      scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+                                       scratch1, scratch2);
 
       __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-
-      // Branch on the result of the map check.
-      __ b(ne, miss);
-
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-        // Restore scratch register to be the map of the object.  In the
-        // new space case below, we load the prototype from the map in
-        // the scratch register.
-        __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-
-      reg = holder_reg;  // from now the object is in holder_reg
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
+      reg = holder_reg;  // From now on the object will be in holder_reg.
       __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      // Check the map of the current object.
-      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-      // Branch on the result of the map check.
-      __ b(ne, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
+      Handle<Map> current_map(current->map());
+      __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
+                  ALLOW_ELEMENT_TRANSITION_MAPS);
+
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
       }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ mov(reg, Operand(Handle<JSObject>(prototype)));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (heap()->InNewSpace(*prototype)) {
+        // The prototype is in new space; we cannot store a reference to it
+        // in the code.  Load it from the map.
+        __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        // The prototype is in old space; load it directly.
+        __ mov(reg, Operand(prototype));
+      }
     }
 
     if (save_at_depth == depth) {
@@ -1119,143 +1110,130 @@
     current = prototype;
   }
 
-  // Check the holder map.
-  __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-  __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-  __ b(ne, miss);
-
   // Log the check depth.
   LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
 
+  // Check the holder map.
+  __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
+              DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+
   // Perform security check for access to the global object.
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
   if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  };
+  }
 
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-  MaybeObject* result = GenerateCheckPropertyCells(masm(),
-                                                   object,
-                                                   holder,
-                                                   name,
-                                                   scratch1,
-                                                   miss);
-  if (result->IsFailure()) set_failure(Failure::cast(result));
+  // If we've skipped any global objects, it's not enough to verify that
+  // their maps haven't changed.  We also need to check that the property
+  // cell for the property is still empty.
+  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(JSObject* object,
-                                     JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+                                     Handle<JSObject> holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     String* name,
+                                     Handle<String> name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
-                      name, miss);
+  Register reg = CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
   GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
   __ Ret();
 }
 
 
-void StubCompiler::GenerateLoadConstant(JSObject* object,
-                                        JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Object* value,
-                                        String* name,
+                                        Handle<JSFunction> value,
+                                        Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, name,
-                  miss);
+  CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ mov(r0, Operand(Handle<Object>(value)));
+  __ LoadHeapObject(r0, value);
   __ Ret();
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
-                      name, miss);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Build AccessorInfo::args_ list on the stack and push property name below
   // the exit frame to make GC aware of them and store pointers to them.
   __ push(receiver);
   __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
-  Handle<AccessorInfo> callback_handle(callback);
-  if (heap()->InNewSpace(callback_handle->data())) {
-    __ Move(scratch3, callback_handle);
+  if (heap()->InNewSpace(callback->data())) {
+    __ Move(scratch3, callback);
     __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
   } else {
-    __ Move(scratch3, Handle<Object>(callback_handle->data()));
+    __ Move(scratch3, Handle<Object>(callback->data()));
   }
   __ Push(reg, scratch3, name_reg);
   __ mov(r0, sp);  // r0 = Handle<String>
 
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
   const int kApiStackSpace = 1;
+  FrameScope frame_scope(masm(), StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
+
   // Create AccessorInfo instance on the stack above the exit frame with
-  // scratch2 (internal::Object **args_) as the data.
+  // scratch2 (internal::Object** args_) as the data.
   __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
   __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = AccessorInfo&
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
   const int kStackUnwindSpace = 4;
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_GETTER_CALL,
                         masm()->isolate());
-  return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1267,13 +1245,13 @@
   // and CALLBACKS, so inline only them, other cases may be added
   // later.
   bool compile_followup_inline = false;
-  if (lookup->IsProperty() && lookup->IsCacheable()) {
+  if (lookup->IsFound() && lookup->IsCacheable()) {
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+               lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1288,48 +1266,45 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ Push(receiver, holder_reg, name_reg);
+      } else {
+        __ Push(holder_reg, name_reg);
+      }
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method.)
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+      __ cmp(r0, scratch1);
+      __ b(eq, &interceptor_failed);
+      frame_scope.GenerateLeaveFrame();
+      __ Ret();
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ Push(receiver, holder_reg, name_reg);
-    } else {
-      __ Push(holder_reg, name_reg);
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+      // Leave the internal frame.
     }
-
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method.)
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ cmp(r0, scratch1);
-    __ b(eq, &interceptor_failed);
-    __ LeaveInternalFrame();
-    __ Ret();
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
-
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1341,21 +1316,21 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), r0, holder_reg,
-                               lookup->holder(), lookup->GetFieldIndex());
+                               Handle<JSObject>(lookup->holder()),
+                               lookup->GetFieldIndex());
       __ Ret();
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
       // Important invariant in CALLBACKS case: the code above must be
       // structured to never clobber |receiver| register.
-      __ Move(scratch2, Handle<AccessorInfo>(callback));
+      __ Move(scratch2, callback);
       // holder_reg is either receiver or scratch1.
       if (!receiver.is(holder_reg)) {
         ASSERT(scratch1.is(holder_reg));
@@ -1392,17 +1367,17 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ cmp(r2, Operand(Handle<String>(name)));
+    __ cmp(r2, Operand(name));
     __ b(ne, miss);
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1412,27 +1387,22 @@
   // Get the receiver from the stack.
   __ ldr(r0, MemOperand(sp, argc * kPointerSize));
 
-  // If the object is the holder then we know that it's a global
-  // object which can only happen for contextual calls. In this case,
-  // the receiver cannot be a smi.
-  if (object != holder) {
-    __ JumpIfSmi(r0, miss);
-  }
-
   // Check that the maps haven't changed.
+  __ JumpIfSmi(r0, miss);
   CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
-  __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(r3, Operand(cell));
   __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(function)) {
+  if (heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1446,30 +1416,26 @@
     __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
     __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
     __ cmp(r4, r3);
-    __ b(ne, miss);
   } else {
-    __ cmp(r1, Operand(Handle<JSFunction>(function)));
-    __ b(ne, miss);
+    __ cmp(r1, Operand(function));
   }
+  __ b(ne, miss);
 }
 
 
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+  Handle<Code> code =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_ic_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
+                                               extra_state_);
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1489,23 +1455,23 @@
   Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
   GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1515,14 +1481,12 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   Register receiver = r1;
-
   // Get the receiver from the stack
   const int argc = arguments().immediate();
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
@@ -1531,8 +1495,8 @@
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object), receiver,
-                  holder, r3, r0, r4, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
+                  name, &miss);
 
   if (argc == 0) {
     // Nothing to do, just return the length.
@@ -1542,21 +1506,21 @@
   } else {
     Label call_builtin;
 
-    Register elements = r3;
-    Register end_elements = r5;
-
-    // Get the elements array of the object.
-    __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
-    // Check that the elements are in fast mode and writable.
-    __ CheckMap(elements,
-                r0,
-                Heap::kFixedArrayMapRootIndex,
-                &call_builtin,
-                DONT_DO_SMI_CHECK);
-
     if (argc == 1) {  // Otherwise fall through to call the builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements;
+
+      Register elements = r6;
+      Register end_elements = r5;
+      // Get the elements array of the object.
+      __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+      // Check that the elements are in fast mode and writable.
+      __ CheckMap(elements,
+                  r0,
+                  Heap::kFixedArrayMapRootIndex,
+                  &call_builtin,
+                  DONT_DO_SMI_CHECK);
+
 
       // Get the array's length into r0 and calculate new length.
       __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1564,18 +1528,22 @@
       STATIC_ASSERT(kSmiTag == 0);
       __ add(r0, r0, Operand(Smi::FromInt(argc)));
 
-      // Get the element's length.
+      // Get the elements' length.
       __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
       __ cmp(r0, r4);
       __ b(gt, &attempt_to_grow_elements);
 
+      // Check if value is a smi.
+      Label with_write_barrier;
+      __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+      __ JumpIfNotSmi(r4, &with_write_barrier);
+
       // Save new length.
       __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
 
-      // Push the element.
-      __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+      // Store the value.
       // We may need a register containing the address end_elements below,
       // so write back the value in end_elements.
       __ add(end_elements, elements,
@@ -1585,14 +1553,51 @@
       __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
 
       // Check for a smi.
-      __ JumpIfNotSmi(r4, &with_write_barrier);
-      __ bind(&exit);
       __ Drop(argc + 1);
       __ Ret();
 
       __ bind(&with_write_barrier);
-      __ InNewSpace(elements, r4, eq, &exit);
-      __ RecordWriteHelper(elements, end_elements, r4);
+
+      __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+      if (FLAG_smi_only_arrays  && !FLAG_trace_elements_transitions) {
+        Label fast_object, not_fast_object;
+        __ CheckFastObjectElements(r3, r7, &not_fast_object);
+        __ jmp(&fast_object);
+        // In case of fast smi-only, convert to fast object, otherwise bail out.
+        __ bind(&not_fast_object);
+        __ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
+        // edx: receiver
+        // r3: map
+        __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                               FAST_ELEMENTS,
+                                               r3,
+                                               r7,
+                                               &call_builtin);
+        __ mov(r2, receiver);
+        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+        __ bind(&fast_object);
+      } else {
+        __ CheckFastObjectElements(r3, r3, &call_builtin);
+      }
+
+      // Save new length.
+      __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+      // Store the value.
+      // We may need a register containing the address end_elements below,
+      // so write back the value in end_elements.
+      __ add(end_elements, elements,
+             Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+      __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+      __ RecordWrite(elements,
+                     end_elements,
+                     r4,
+                     kLRHasNotBeenSaved,
+                     kDontSaveFPRegs,
+                     EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
       __ Drop(argc + 1);
       __ Ret();
 
@@ -1604,6 +1609,15 @@
         __ b(&call_builtin);
       }
 
+      __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
+      // Growing elements that are SMI-only requires special handling in case
+      // the new element is non-Smi. For now, delegate to the builtin.
+      Label no_fast_elements_check;
+      __ JumpIfSmi(r2, &no_fast_elements_check);
+      __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(r7, r7, &call_builtin);
+      __ bind(&no_fast_elements_check);
+
       Isolate* isolate = masm()->isolate();
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate);
@@ -1616,26 +1630,25 @@
              Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
       __ add(end_elements, end_elements, Operand(kEndElementsOffset));
       __ mov(r7, Operand(new_space_allocation_top));
-      __ ldr(r6, MemOperand(r7));
-      __ cmp(end_elements, r6);
+      __ ldr(r3, MemOperand(r7));
+      __ cmp(end_elements, r3);
       __ b(ne, &call_builtin);
 
       __ mov(r9, Operand(new_space_allocation_limit));
       __ ldr(r9, MemOperand(r9));
-      __ add(r6, r6, Operand(kAllocationDelta * kPointerSize));
-      __ cmp(r6, r9);
+      __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
+      __ cmp(r3, r9);
       __ b(hi, &call_builtin);
 
       // We fit and could grow elements.
       // Update new_space_allocation_top.
-      __ str(r6, MemOperand(r7));
+      __ str(r3, MemOperand(r7));
       // Push the argument.
-      __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
-      __ str(r6, MemOperand(end_elements));
+      __ str(r2, MemOperand(end_elements));
       // Fill the rest with holes.
-      __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
+      __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
-        __ str(r6, MemOperand(end_elements, i * kPointerSize));
+        __ str(r3, MemOperand(end_elements, i * kPointerSize));
       }
 
       // Update elements' and array's sizes.
@@ -1656,19 +1669,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1678,25 +1691,22 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss, return_undefined, call_builtin;
-
   Register receiver = r1;
   Register elements = r3;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack
   const int argc = arguments().immediate();
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object),
-                  receiver, holder, elements, r4, r0, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+                  r4, r0, name, &miss);
 
   // Get the elements array of the object.
   __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1745,20 +1755,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -1768,21 +1777,19 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
   GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
@@ -1790,13 +1797,12 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             r0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
-                  r1, r3, r4, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  r0, holder, r1, r3, r4, name, &miss);
 
   Register receiver = r1;
   Register index = r4;
-  Register scratch = r3;
   Register result = r0;
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
   if (argc > 0) {
@@ -1805,20 +1811,19 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1829,22 +1834,21 @@
 
   __ bind(&miss);
   // Restore function name in r2.
-  __ Move(r2, Handle<String>(name));
+  __ Move(r2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -1854,21 +1858,18 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
-
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
   GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
@@ -1876,14 +1877,13 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             r0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
-                  r1, r3, r4, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  r0, holder, r1, r3, r4, name, &miss);
 
   Register receiver = r0;
   Register index = r4;
-  Register scratch1 = r1;
-  Register scratch2 = r3;
+  Register scratch = r3;
   Register result = r0;
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
   if (argc > 0) {
@@ -1892,21 +1892,20 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1917,22 +1916,21 @@
 
   __ bind(&miss);
   // Restore function name in r2.
-  __ Move(r2, Handle<String>(name));
+  __ Move(r2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -1945,22 +1943,23 @@
 
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
 
-    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1976,34 +1975,35 @@
   // Convert the smi code to uint16.
   __ and_(code, code, Operand(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, r0);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, r0);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+  __ InvokeFunction(
+      function,  arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2013,31 +2013,28 @@
   // -----------------------------------
 
   if (!CpuFeatures::IsSupported(VFP3)) {
-      return heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   CpuFeatures::Scope scope_vfp3(VFP3);
-
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss, slow;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2069,7 +2066,7 @@
   __ vmrs(r3);
   // Set custom FPCSR:
   //  - Set rounding mode to "Round towards Minus Infinity"
-  //    (ie bits [23:22] = 0b10).
+  //    (i.e. bits [23:22] = 0b10).
   //  - Clear vfp cumulative exception flags (bits [3:0]).
   //  - Make sure Flush-to-zero mode control bit is unset (bit 22).
   __ bic(r9, r3,
@@ -2135,23 +2132,24 @@
   __ bind(&slow);
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2161,25 +2159,22 @@
   // -----------------------------------
 
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
   GenerateNameCheck(name, &miss);
-
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2236,39 +2231,38 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   Counters* counters = isolate()->counters();
 
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
-
   GenerateNameCheck(name, &miss_before_stack_reserved);
 
   // Get the receiver from the stack.
@@ -2284,44 +2278,40 @@
   ReserveSpaceForFastApiCall(masm(), r0);
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+  CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
                   depth, &miss);
 
-  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiDirectCall(masm(), optimization, argc);
 
   __ bind(&miss);
   FreeSpaceForFastApiCall(masm());
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack
@@ -2336,16 +2326,14 @@
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(masm()->isolate()->counters()->call_const(),
                           1, r0, r3);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                      &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                      name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2356,28 +2344,25 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         // Check that the object is a two-byte string or a symbol.
         __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
         __ b(ge, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
-                        r1, r4, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            r0, holder, r3, r1, r4, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(r1, &fast);
@@ -2387,18 +2372,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
-                        r1, r4, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            r0, holder, r3, r1, r4, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ LoadRoot(ip, Heap::kTrueValueRootIndex);
@@ -2411,112 +2396,92 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
-                        r1, r4, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            r0, holder, r3, r1, r4, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
-    }
-
-    default:
-      UNREACHABLE();
   }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
-
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), r2, extra_ic_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         r1,
-                                         r3,
-                                         r4,
-                                         r0,
-                                         &miss);
-  if (result->IsFailure()) {
-      return result;
-  }
+  CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
+  compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
+                   &miss);
 
   // Move returned value, the function to call, to r1.
   __ mov(r1, r0);
   // Restore receiver.
   __ ldr(r0, MemOperand(sp, argc * kPointerSize));
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
-
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy if
@@ -2526,45 +2491,37 @@
     __ str(r3, MemOperand(sp, argc * kPointerSize));
   }
 
-  // Setup the context (function already in r1).
+  // Set up the context (function already in r1).
   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
   // Jump to the cached code (tail call).
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
-  ASSERT(function->is_compiled());
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  if (V8::UseCrankshaft()) {
-    // TODO(kasperl): For now, we always call indirectly through the
-    // code field in the function to allow recompilation to take effect
-    // without changing any of the call sites.
-    __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-    __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
-                  NullCallWrapper(), call_kind);
-  } else {
-    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
-                  JUMP_FUNCTION, call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
+                NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                   int index,
-                                                  Map* transition,
-                                                  String* name) {
+                                                  Handle<Map> transition,
+                                                  Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2573,24 +2530,20 @@
   // -----------------------------------
   Label miss;
 
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     r1, r2, r3,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
   __ bind(&miss);
   Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
-                                                     AccessorInfo* callback,
-                                                     String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+    Handle<JSObject> object,
+    Handle<AccessorInfo> callback,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2599,13 +2552,9 @@
   // -----------------------------------
   Label miss;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(r1, &miss);
-
   // Check that the map of the object hasn't changed.
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ cmp(r3, Operand(Handle<Map>(object->map())));
-  __ b(ne, &miss);
+  __ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
+              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -2617,7 +2566,7 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   __ push(r1);  // receiver
-  __ mov(ip, Operand(Handle<AccessorInfo>(callback)));  // callback info
+  __ mov(ip, Operand(callback));  // callback info
   __ Push(ip, r2, r0);
 
   // Do tail-call to the runtime system.
@@ -2636,8 +2585,9 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
-                                                        String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+    Handle<JSObject> receiver,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2646,13 +2596,9 @@
   // -----------------------------------
   Label miss;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(r1, &miss);
-
   // Check that the map of the object hasn't changed.
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ cmp(r3, Operand(Handle<Map>(receiver->map())));
-  __ b(ne, &miss);
+  __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
+              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
@@ -2684,9 +2630,10 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
-                                                   JSGlobalPropertyCell* cell,
-                                                   String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+    Handle<GlobalObject> object,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2704,7 +2651,7 @@
   // cell could have been deleted and reintroducing the global needs
   // to update the property details in the property dictionary of the
   // global object. We bail out to the runtime system to do that.
-  __ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(r4, Operand(cell));
   __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
   __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
   __ cmp(r5, r6);
@@ -2712,6 +2659,7 @@
 
   // Store the value in the cell.
   __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
+  // Cells are always rescanned, so no write barrier here.
 
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
@@ -2728,9 +2676,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
-                                                      JSObject* object,
-                                                      JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+                                                      Handle<JSObject> object,
+                                                      Handle<JSObject> last) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- lr    : return address
@@ -2746,15 +2694,8 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                  GlobalObject::cast(last),
-                                                  name,
-                                                  r1,
-                                                  &miss);
-    if (cell->IsFailure()) {
-      miss.Unuse();
-      return cell;
-    }
+    GenerateCheckPropertyCell(
+        masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2766,14 +2707,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, heap()->empty_string());
+  return GetCode(NONEXISTENT, factory()->empty_string());
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2790,24 +2731,19 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
   Label miss;
-
-  MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
-                                             callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2816,10 +2752,10 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value,
-                                                   String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> value,
+                                                   Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2836,9 +2772,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2846,17 +2782,9 @@
   // -----------------------------------
   Label miss;
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(object,
-                          holder,
-                          &lookup,
-                          r0,
-                          r2,
-                          r3,
-                          r1,
-                          r4,
-                          name,
+  GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2866,11 +2794,12 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 String* name,
-                                                 bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name,
+    bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2878,18 +2807,12 @@
   // -----------------------------------
   Label miss;
 
-  // If the object is the holder then we know that it's a global
-  // object which can only happen for contextual calls. In this case,
-  // the receiver cannot be a smi.
-  if (object != holder) {
-    __ JumpIfSmi(r0, &miss);
-  }
-
   // Check that the map of the global has not changed.
+  __ JumpIfSmi(r0, &miss);
   CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
 
   // Get the value from the cell.
-  __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(r3, Operand(cell));
   __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -2913,9 +2836,9 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
-                                                     JSObject* receiver,
-                                                     JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+                                                     Handle<JSObject> receiver,
+                                                     Handle<JSObject> holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
@@ -2925,7 +2848,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
@@ -2936,11 +2859,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -2949,16 +2872,11 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
-                                             r4, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2966,10 +2884,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
-                                                        JSObject* receiver,
-                                                        JSObject* holder,
-                                                        Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<JSFunction> value) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -2978,7 +2897,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
@@ -2990,9 +2909,10 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3001,20 +2921,12 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          r1,
-                          r0,
-                          r2,
-                          r3,
-                          r4,
-                          name,
+  GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3023,7 +2935,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3032,7 +2945,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   GenerateLoadArrayLength(masm(), r1, r2, &miss);
@@ -3043,7 +2956,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3055,7 +2969,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
@@ -3068,7 +2982,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3080,7 +2995,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
 
   // Check the name hasn't changed.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
@@ -3092,33 +3007,29 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
   //  -- r1    : receiver
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
-  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(r1,
-                 r2,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+  __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3130,11 +3041,9 @@
   int receiver_count = receiver_maps->length();
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
   for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    Handle<Code> code(handler_ics->at(current));
-    __ mov(ip, Operand(map));
+    __ mov(ip, Operand(receiver_maps->at(current)));
     __ cmp(r2, ip);
-    __ Jump(code, RelocInfo::CODE_TARGET, eq);
+    __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq);
   }
 
   __ bind(&miss);
@@ -3142,14 +3051,14 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                        int index,
-                                                       Map* transition,
-                                                       String* name) {
+                                                       Handle<Map> transition,
+                                                       Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : name
@@ -3162,17 +3071,12 @@
   __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
 
   // Check that the name has not changed.
-  __ cmp(r1, Operand(Handle<String>(name)));
+  __ cmp(r1, Operand(name));
   __ b(ne, &miss);
 
   // r3 is used as scratch register. r1 and r2 keep their values if a jump to
   // the miss label is generated.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     r2, r1, r3,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
   __ bind(&miss);
 
   __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
@@ -3180,11 +3084,12 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -3192,29 +3097,25 @@
   //  -- lr    : return address
   //  -- r3    : scratch
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  MaybeObject* maybe_stub =
-      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(r2,
-                 r3,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub =
+      KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
+
+  __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -3227,12 +3128,18 @@
 
   int receiver_count = receiver_maps->length();
   __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    Handle<Code> code(handler_ics->at(current));
-    __ mov(ip, Operand(map));
+  for (int i = 0; i < receiver_count; ++i) {
+    __ mov(ip, Operand(receiver_maps->at(i)));
     __ cmp(r3, ip);
-    __ Jump(code, RelocInfo::CODE_TARGET, eq);
+    if (transitioned_maps->at(i).is_null()) {
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
+    } else {
+      Label next_map;
+      __ b(ne, &next_map);
+      __ mov(r3, Operand(transitioned_maps->at(i)));
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
+      __ bind(&next_map);
+    }
   }
 
   __ bind(&miss);
@@ -3240,11 +3147,12 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // ----------- S t a t e -------------
   //  -- r0    : argc
   //  -- r1    : constructor
@@ -3290,12 +3198,7 @@
   // r2: initial map
   // r7: undefined
   __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-  __ AllocateInNewSpace(r3,
-                        r4,
-                        r5,
-                        r6,
-                        &generic_stub_call,
-                        SIZE_IN_WORDS);
+  __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
 
   // Allocated the JSObject, now initialize the fields. Map is set to initial
   // map and properties and elements are set to empty fixed array.
@@ -3327,7 +3230,7 @@
   // r7: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       Label not_passed, next;
@@ -3454,6 +3357,7 @@
     case EXTERNAL_FLOAT_ELEMENTS:
     case EXTERNAL_DOUBLE_ELEMENTS:
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3540,6 +3444,7 @@
       }
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3784,9 +3689,9 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
@@ -3880,6 +3785,7 @@
       }
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3943,6 +3849,7 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
+          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4082,6 +3989,7 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
+          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4157,9 +4065,9 @@
   __ Ret();
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
@@ -4234,8 +4142,11 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind,
+    KeyedAccessGrowMode grow_mode) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -4244,13 +4155,16 @@
   //  -- r3    : scratch
   //  -- r4    : scratch (elements)
   // -----------------------------------
-  Label miss_force_generic;
+  Label miss_force_generic, transition_elements_kind, grow, slow;
+  Label finish_store, check_capacity;
 
   Register value_reg = r0;
   Register key_reg = r1;
   Register receiver_reg = r2;
-  Register scratch = r3;
-  Register elements_reg = r4;
+  Register scratch = r4;
+  Register elements_reg = r3;
+  Register length_reg = r5;
+  Register scratch2 = r6;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -4258,16 +4172,13 @@
   // Check that the key is a smi.
   __ JumpIfNotSmi(key_reg, &miss_force_generic);
 
-  // Get the elements array and make sure it is a fast element array, not 'cow'.
-  __ ldr(elements_reg,
-         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-  __ CheckMap(elements_reg,
-              scratch,
-              Heap::kFixedArrayMapRootIndex,
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+  }
 
   // Check that the key is within bounds.
+  __ ldr(elements_reg,
+         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
   if (is_js_array) {
     __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
   } else {
@@ -4275,17 +4186,46 @@
   }
   // Compare smis.
   __ cmp(key_reg, scratch);
-  __ b(hs, &miss_force_generic);
+  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+    __ b(hs, &grow);
+  } else {
+    __ b(hs, &miss_force_generic);
+  }
 
-  __ add(scratch,
-         elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ str(value_reg,
-         MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ RecordWrite(scratch,
-                 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
-                 receiver_reg , elements_reg);
+  // Make sure elements is a fast element array, not 'cow'.
+  __ CheckMap(elements_reg,
+              scratch,
+              Heap::kFixedArrayMapRootIndex,
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
 
+  __ bind(&finish_store);
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ add(scratch,
+           elements_reg,
+           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+    __ add(scratch,
+           scratch,
+           Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+    __ str(value_reg, MemOperand(scratch));
+  } else {
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    __ add(scratch,
+           elements_reg,
+           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+    __ add(scratch,
+           scratch,
+           Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+    __ str(value_reg, MemOperand(scratch));
+    __ mov(receiver_reg, value_reg);
+    __ RecordWrite(elements_reg,  // Object.
+                   scratch,       // Address.
+                   receiver_reg,  // Value.
+                   kLRHasNotBeenSaved,
+                   kDontSaveFPRegs);
+  }
   // value_reg (r0) is preserved.
   // Done.
   __ Ret();
@@ -4294,12 +4234,84 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+    // Grow the array by a single element if possible.
+    __ bind(&grow);
+
+    // Make sure the array is only growing by a single element, anything else
+    // must be handled by the runtime. Flags already set by previous compare.
+    __ b(ne, &miss_force_generic);
+
+    // Check for the empty array, and preallocate a small backing store if
+    // possible.
+    __ ldr(length_reg,
+           FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ ldr(elements_reg,
+           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+    __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
+    __ b(ne, &check_capacity);
+
+    int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+    __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
+                          TAG_OBJECT);
+
+    __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+    __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+    __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+    __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+      __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
+    }
+
+    // Store the element at index zero.
+    __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
+
+    // Install the new backing store in the JSArray.
+    __ str(elements_reg,
+           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+    __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+                        scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                        EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+    // Increment the length of the array.
+    __ mov(length_reg, Operand(Smi::FromInt(1)));
+    __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ Ret();
+
+    __ bind(&check_capacity);
+    // Check for cow elements, in general they are not handled by this stub
+    __ CheckMap(elements_reg,
+                scratch,
+                Heap::kFixedCOWArrayMapRootIndex,
+                &miss_force_generic,
+                DONT_DO_SMI_CHECK);
+
+    __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+    __ cmp(length_reg, scratch);
+    __ b(hs, &slow);
+
+    // Grow the array and finish the store.
+    __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
+    __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ jmp(&finish_store);
+
+    __ bind(&slow);
+    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+    __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+  }
 }
 
 
 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
     MacroAssembler* masm,
-    bool is_js_array) {
+    bool is_js_array,
+    KeyedAccessGrowMode grow_mode) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -4309,16 +4321,18 @@
   //  -- r4    : scratch
   //  -- r5    : scratch
   // -----------------------------------
-  Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+  Label miss_force_generic, transition_elements_kind, grow, slow;
+  Label finish_store, check_capacity;
 
   Register value_reg = r0;
   Register key_reg = r1;
   Register receiver_reg = r2;
-  Register scratch = r3;
-  Register elements_reg = r4;
-  Register mantissa_reg = r5;
-  Register exponent_reg = r6;
+  Register elements_reg = r3;
+  Register scratch1 = r4;
+  Register scratch2 = r5;
+  Register scratch3 = r6;
   Register scratch4 = r7;
+  Register length_reg = r7;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -4329,90 +4343,30 @@
 
   // Check that the key is within bounds.
   if (is_js_array) {
-    __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
   } else {
-    __ ldr(scratch,
+    __ ldr(scratch1,
            FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
   }
   // Compare smis, unsigned compare catches both negative and out-of-bound
   // indexes.
-  __ cmp(key_reg, scratch);
-  __ b(hs, &miss_force_generic);
-
-  // Handle smi values specially.
-  __ JumpIfSmi(value_reg, &smi_value);
-
-  // Ensure that the object is a heap number
-  __ CheckMap(value_reg,
-              scratch,
-              masm->isolate()->factory()->heap_number_map(),
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
-
-  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
-  // in the exponent.
-  __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
-  __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
-  __ cmp(exponent_reg, scratch);
-  __ b(ge, &maybe_nan);
-
-  __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
-  __ bind(&have_double_value);
-  __ add(scratch, elements_reg,
-         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
-  __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ str(exponent_reg, FieldMemOperand(scratch, offset));
-  __ Ret();
-
-  __ bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  __ b(gt, &is_nan);
-  __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-  __ cmp(mantissa_reg, Operand(0));
-  __ b(eq, &have_double_value);
-  __ bind(&is_nan);
-  // Load canonical NaN for storing into the double array.
-  uint64_t nan_int64 = BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-  __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
-  __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
-  __ jmp(&have_double_value);
-
-  __ bind(&smi_value);
-  __ add(scratch, elements_reg,
-         Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  __ add(scratch, scratch,
-         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
-  // scratch is now effective address of the double element
-
-  FloatingPointHelper::Destination destination;
-  if (CpuFeatures::IsSupported(VFP3)) {
-    destination = FloatingPointHelper::kVFPRegisters;
+  __ cmp(key_reg, scratch1);
+  if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+    __ b(hs, &grow);
   } else {
-    destination = FloatingPointHelper::kCoreRegisters;
+    __ b(hs, &miss_force_generic);
   }
 
-  Register untagged_value = receiver_reg;
-  __ SmiUntag(untagged_value, value_reg);
-  FloatingPointHelper::ConvertIntToDouble(
-      masm,
-      untagged_value,
-      destination,
-      d0,
-      mantissa_reg,
-      exponent_reg,
-      scratch4,
-      s2);
-  if (destination == FloatingPointHelper::kVFPRegisters) {
-    CpuFeatures::Scope scope(VFP3);
-    __ vstr(d0, scratch, 0);
-  } else {
-    __ str(mantissa_reg, MemOperand(scratch, 0));
-    __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
-  }
+  __ bind(&finish_store);
+  __ StoreNumberToDoubleElements(value_reg,
+                                 key_reg,
+                                 receiver_reg,
+                                 elements_reg,
+                                 scratch1,
+                                 scratch2,
+                                 scratch3,
+                                 scratch4,
+                                 &transition_elements_kind);
   __ Ret();
 
   // Handle store cache miss, replacing the ic with the generic stub.
@@ -4420,6 +4374,77 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+    // Grow the array by a single element if possible.
+    __ bind(&grow);
+
+    // Make sure the array is only growing by a single element, anything else
+    // must be handled by the runtime. Flags already set by previous compare.
+    __ b(ne, &miss_force_generic);
+
+    // Transition on values that can't be stored in a FixedDoubleArray.
+    Label value_is_smi;
+    __ JumpIfSmi(value_reg, &value_is_smi);
+    __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+    __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+    __ b(ne, &transition_elements_kind);
+    __ bind(&value_is_smi);
+
+    // Check for the empty array, and preallocate a small backing store if
+    // possible.
+    __ ldr(length_reg,
+           FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ ldr(elements_reg,
+           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+    __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
+    __ b(ne, &check_capacity);
+
+    int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+    __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
+                          TAG_OBJECT);
+
+    // Initialize the new FixedDoubleArray. Leave elements unitialized for
+    // efficiency, they are guaranteed to be initialized before use.
+    __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
+    __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+    __ mov(scratch1,
+           Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+    __ str(scratch1,
+           FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+
+    // Install the new backing store in the JSArray.
+    __ str(elements_reg,
+           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+    __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+                        scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                        EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+    // Increment the length of the array.
+    __ mov(length_reg, Operand(Smi::FromInt(1)));
+    __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ jmp(&finish_store);
+
+    __ bind(&check_capacity);
+    // Make sure that the backing store can hold additional elements.
+    __ ldr(scratch1,
+           FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+    __ cmp(length_reg, scratch1);
+    __ b(hs, &slow);
+
+    // Grow the array and finish the store.
+    __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
+    __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ jmp(&finish_store);
+
+    __ bind(&slow);
+    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+    __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+  }
 }
 
 
diff --git a/src/array.js b/src/array.js
index 98fe3ac..daa75d5 100644
--- a/src/array.js
+++ b/src/array.js
@@ -27,7 +27,7 @@
 
 // This file relies on the fact that the following declarations have been made
 // in runtime.js:
-// const $Array = global.Array;
+// var $Array = global.Array;
 
 // -------------------------------------------------------------------
 
@@ -201,17 +201,14 @@
 
 
 function ConvertToLocaleString(e) {
-  if (e == null) {
+  if (IS_NULL_OR_UNDEFINED(e)) {
     return '';
   } else {
-    // e_obj's toLocaleString might be overwritten, check if it is a function.
-    // Call ToString if toLocaleString is not a function.
-    // See issue 877615.
+    // According to ES5, section 15.4.4.3, the toLocaleString conversion
+    // must throw a TypeError if ToObject(e).toLocaleString isn't
+    // callable.
     var e_obj = ToObject(e);
-    if (IS_SPEC_FUNCTION(e_obj.toLocaleString))
-      return ToString(e_obj.toLocaleString());
-    else
-      return ToString(e);
+    return %ToString(e_obj.toLocaleString());
   }
 }
 
@@ -331,8 +328,9 @@
     // would be the appropriate test.  We follow KJS in consulting the
     // prototype.
     var current = array[index];
-    if (!IS_UNDEFINED(current) || index in array)
+    if (!IS_UNDEFINED(current) || index in array) {
       deleted_elements[i] = current;
+    }
   }
 }
 
@@ -381,18 +379,31 @@
 
 
 function ArrayToString() {
-  if (!IS_ARRAY(this)) {
-    throw new $TypeError('Array.prototype.toString is not generic');
+  var array;
+  var func;
+  if (IS_ARRAY(this)) {
+    func = this.join;
+    if (func === ArrayJoin) {
+      return Join(this, this.length, ',', ConvertToString);
+    }
+    array = this;
+  } else {
+    array = ToObject(this);
+    func = array.join;
   }
-  return Join(this, this.length, ',', ConvertToString);
+  if (!IS_SPEC_FUNCTION(func)) {
+    return %_CallFunction(array, ObjectToString);
+  }
+  return %_CallFunction(array, func);
 }
 
 
 function ArrayToLocaleString() {
-  if (!IS_ARRAY(this)) {
-    throw new $TypeError('Array.prototype.toString is not generic');
-  }
-  return Join(this, this.length, ',', ConvertToLocaleString);
+  var array = ToObject(this);
+  var arrayLen = array.length;
+  var len = TO_UINT32(arrayLen);
+  if (len === 0) return "";
+  return Join(array, len, ',', ConvertToLocaleString);
 }
 
 
@@ -485,12 +496,12 @@
 
     if (j_complement <= i) {
       high = j;
-      while (keys[--high_counter] == j);
+      while (keys[--high_counter] == j) { }
       low = j_complement;
     }
     if (j_complement >= i) {
       low = i;
-      while (keys[++low_counter] == i);
+      while (keys[++low_counter] == i) { }
       high = len - i - 1;
     }
 
@@ -566,10 +577,11 @@
 
   var first = this[0];
 
-  if (IS_ARRAY(this))
+  if (IS_ARRAY(this)) {
     SmartMove(this, 0, 1, len, 0);
-  else
+  } else {
     SimpleMove(this, 0, 1, len, 0);
+  }
 
   this.length = len - 1;
 
@@ -586,10 +598,11 @@
   var len = TO_UINT32(this.length);
   var num_arguments = %_ArgumentsLength();
 
-  if (IS_ARRAY(this))
+  if (IS_ARRAY(this)) {
     SmartMove(this, 0, 0, len, num_arguments);
-  else
+  } else {
     SimpleMove(this, 0, 0, len, num_arguments);
+  }
 
   for (var i = 0; i < num_arguments; i++) {
     this[i] = %_Arguments(i);
@@ -744,7 +757,7 @@
   }
   var receiver = %GetDefaultReceiver(comparefn);
 
-  function InsertionSort(a, from, to) {
+  var InsertionSort = function InsertionSort(a, from, to) {
     for (var i = from + 1; i < to; i++) {
       var element = a[i];
       for (var j = i - 1; j >= from; j--) {
@@ -758,9 +771,9 @@
       }
       a[j + 1] = element;
     }
-  }
+  };
 
-  function QuickSort(a, from, to) {
+  var QuickSort = function QuickSort(a, from, to) {
     // Insertion sort is faster for short arrays.
     if (to - from <= 10) {
       InsertionSort(a, from, to);
@@ -828,12 +841,12 @@
     }
     QuickSort(a, from, low_end);
     QuickSort(a, high_start, to);
-  }
+  };
 
   // Copy elements in the range 0..length from obj's prototype chain
   // to obj itself, if obj has holes. Return one more than the maximal index
   // of a prototype property.
-  function CopyFromPrototype(obj, length) {
+  var CopyFromPrototype = function CopyFromPrototype(obj, length) {
     var max = 0;
     for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
       var indices = %GetArrayKeys(proto, length);
@@ -860,12 +873,12 @@
       }
     }
     return max;
-  }
+  };
 
   // Set a value of "undefined" on all indices in the range from..to
   // where a prototype of obj has an element. I.e., shadow all prototype
   // elements in that range.
-  function ShadowPrototypeElements(obj, from, to) {
+  var ShadowPrototypeElements = function(obj, from, to) {
     for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
       var indices = %GetArrayKeys(proto, to);
       if (indices.length > 0) {
@@ -888,9 +901,9 @@
         }
       }
     }
-  }
+  };
 
-  function SafeRemoveArrayHoles(obj) {
+  var SafeRemoveArrayHoles = function SafeRemoveArrayHoles(obj) {
     // Copy defined elements from the end to fill in all holes and undefineds
     // in the beginning of the array.  Write undefineds and holes at the end
     // after loop is finished.
@@ -945,7 +958,7 @@
 
     // Return the number of defined elements.
     return first_undefined;
-  }
+  };
 
   var length = TO_UINT32(this.length);
   if (length < 2) return this;
@@ -993,25 +1006,32 @@
                         ["Array.prototype.filter"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = ToUint32(array.length);
+
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
+  } else if (!IS_SPEC_OBJECT(receiver)) {
+    receiver = ToObject(receiver);
   }
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length = ToUint32(this.length);
-  var result = [];
-  var result_length = 0;
+
+  var result = new $Array();
+  var accumulator = new InternalArray();
+  var accumulator_length = 0;
   for (var i = 0; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      if (%_CallFunction(receiver, current, i, this, f)) {
-        result[result_length++] = current;
+    if (i in array) {
+      var element = array[i];
+      if (%_CallFunction(receiver, element, i, array, f)) {
+        accumulator[accumulator_length++] = element;
       }
     }
   }
+  %MoveArrayContents(accumulator, result);
   return result;
 }
 
@@ -1022,19 +1042,24 @@
                         ["Array.prototype.forEach"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = TO_UINT32(array.length);
+
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
+  } else if (!IS_SPEC_OBJECT(receiver)) {
+    receiver = ToObject(receiver);
   }
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length =  TO_UINT32(this.length);
+
   for (var i = 0; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      %_CallFunction(receiver, current, i, this, f);
+    if (i in array) {
+      var element = array[i];
+      %_CallFunction(receiver, element, i, array, f);
     }
   }
 }
@@ -1048,19 +1073,24 @@
                         ["Array.prototype.some"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = TO_UINT32(array.length);
+
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
+  } else if (!IS_SPEC_OBJECT(receiver)) {
+    receiver = ToObject(receiver);
   }
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length = TO_UINT32(this.length);
+
   for (var i = 0; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      if (%_CallFunction(receiver, current, i, this, f)) return true;
+    if (i in array) {
+      var element = array[i];
+      if (%_CallFunction(receiver, element, i, array, f)) return true;
     }
   }
   return false;
@@ -1073,19 +1103,24 @@
                         ["Array.prototype.every"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = TO_UINT32(array.length);
+
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
+  } else if (!IS_SPEC_OBJECT(receiver)) {
+    receiver = ToObject(receiver);
   }
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length = TO_UINT32(this.length);
+
   for (var i = 0; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      if (!%_CallFunction(receiver, current, i, this, f)) return false;
+    if (i in array) {
+      var element = array[i];
+      if (!%_CallFunction(receiver, element, i, array, f)) return false;
     }
   }
   return true;
@@ -1097,21 +1132,26 @@
                         ["Array.prototype.map"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = TO_UINT32(array.length);
+
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
+  } else if (!IS_SPEC_OBJECT(receiver)) {
+    receiver = ToObject(receiver);
   }
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length = TO_UINT32(this.length);
+
   var result = new $Array();
   var accumulator = new InternalArray(length);
   for (var i = 0; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      accumulator[i] = %_CallFunction(receiver, current, i, this, f);
+    if (i in array) {
+      var element = array[i];
+      accumulator[i] = %_CallFunction(receiver, element, i, array, f);
     }
   }
   %MoveArrayContents(accumulator, result);
@@ -1245,19 +1285,20 @@
                         ["Array.prototype.reduce"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = ToUint32(array.length);
+
   if (!IS_SPEC_FUNCTION(callback)) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
 
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length = ToUint32(this.length);
   var i = 0;
-
   find_initial: if (%_ArgumentsLength() < 2) {
     for (; i < length; i++) {
-      current = this[i];
-      if (!IS_UNDEFINED(current) || i in this) {
+      current = array[i];
+      if (!IS_UNDEFINED(current) || i in array) {
         i++;
         break find_initial;
       }
@@ -1267,9 +1308,9 @@
 
   var receiver = %GetDefaultReceiver(callback);
   for (; i < length; i++) {
-    var element = this[i];
-    if (!IS_UNDEFINED(element) || i in this) {
-      current = %_CallFunction(receiver, current, element, i, this, callback);
+    if (i in array) {
+      var element = array[i];
+      current = %_CallFunction(receiver, current, element, i, array, callback);
     }
   }
   return current;
@@ -1281,15 +1322,20 @@
                         ["Array.prototype.reduceRight"]);
   }
 
+  // Pull out the length so that side effects are visible before the
+  // callback function is checked.
+  var array = ToObject(this);
+  var length = ToUint32(array.length);
+
   if (!IS_SPEC_FUNCTION(callback)) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
-  var i = ToUint32(this.length) - 1;
 
+  var i = length - 1;
   find_initial: if (%_ArgumentsLength() < 2) {
     for (; i >= 0; i--) {
-      current = this[i];
-      if (!IS_UNDEFINED(current) || i in this) {
+      current = array[i];
+      if (!IS_UNDEFINED(current) || i in array) {
         i--;
         break find_initial;
       }
@@ -1299,9 +1345,9 @@
 
   var receiver = %GetDefaultReceiver(callback);
   for (; i >= 0; i--) {
-    var element = this[i];
-    if (!IS_UNDEFINED(element) || i in this) {
-      current = %_CallFunction(receiver, current, element, i, this, callback);
+    if (i in array) {
+      var element = array[i];
+      current = %_CallFunction(receiver, current, element, i, array, callback);
     }
   }
   return current;
@@ -1327,7 +1373,7 @@
 
   var specialFunctions = %SpecialArrayFunctions({});
 
-  function getFunction(name, jsBuiltin, len) {
+  var getFunction = function(name, jsBuiltin, len) {
     var f = jsBuiltin;
     if (specialFunctions.hasOwnProperty(name)) {
       f = specialFunctions[name];
@@ -1336,13 +1382,13 @@
       %FunctionSetLength(f, len);
     }
     return f;
-  }
+  };
 
   // Set up non-enumerable functions of the Array.prototype object and
   // set their names.
   // Manipulate the length of some of the functions to meet
   // expectations set by ECMA-262 or Mozilla.
-  InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
+  InstallFunctions($Array.prototype, DONT_ENUM, $Array(
     "toString", getFunction("toString", ArrayToString),
     "toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
     "join", getFunction("join", ArrayJoin),
diff --git a/src/assembler.cc b/src/assembler.cc
index ad5f350..4944202 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 Sun Microsystems Inc.
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
 // All Rights Reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -30,23 +30,43 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
-#include "v8.h"
+#include "assembler.h"
 
-#include "arguments.h"
+#include <math.h>  // For cos, log, pow, sin, tan, etc.
+#include "api.h"
+#include "builtins.h"
+#include "counters.h"
+#include "cpu.h"
+#include "debug.h"
 #include "deoptimizer.h"
 #include "execution.h"
-#include "ic-inl.h"
-#include "factory.h"
-#include "runtime.h"
-#include "runtime-profiler.h"
-#include "serialize.h"
-#include "stub-cache.h"
-#include "regexp-stack.h"
-#include "ast.h"
-#include "regexp-macro-assembler.h"
+#include "ic.h"
+#include "isolate.h"
+#include "jsregexp.h"
+#include "lazy-instance.h"
 #include "platform.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-stack.h"
+#include "runtime.h"
+#include "serialize.h"
+#include "store-buffer-inl.h"
+#include "stub-cache.h"
+#include "token.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/assembler-mips-inl.h"
+#else
+#error "Unknown architecture."
+#endif
+
 // Include native regexp-macro-assembler.
 #ifndef V8_INTERPRETED_REGEXP
 #if V8_TARGET_ARCH_IA32
@@ -65,15 +85,36 @@
 namespace v8 {
 namespace internal {
 
+// -----------------------------------------------------------------------------
+// Common double constants.
 
-const double DoubleConstant::min_int = kMinInt;
-const double DoubleConstant::one_half = 0.5;
-const double DoubleConstant::minus_zero = -0.0;
-const double DoubleConstant::uint8_max_value = 255;
-const double DoubleConstant::zero = 0.0;
-const double DoubleConstant::canonical_non_hole_nan = OS::nan_value();
-const double DoubleConstant::the_hole_nan = BitCast<double>(kHoleNanInt64);
-const double DoubleConstant::negative_infinity = -V8_INFINITY;
+struct DoubleConstant BASE_EMBEDDED {
+  double min_int;
+  double one_half;
+  double minus_zero;
+  double zero;
+  double uint8_max_value;
+  double negative_infinity;
+  double canonical_non_hole_nan;
+  double the_hole_nan;
+};
+
+struct InitializeDoubleConstants {
+  static void Construct(DoubleConstant* double_constants) {
+    double_constants->min_int = kMinInt;
+    double_constants->one_half = 0.5;
+    double_constants->minus_zero = -0.0;
+    double_constants->uint8_max_value = 255;
+    double_constants->zero = 0.0;
+    double_constants->canonical_non_hole_nan = OS::nan_value();
+    double_constants->the_hole_nan = BitCast<double>(kHoleNanInt64);
+    double_constants->negative_infinity = -V8_INFINITY;
+  }
+};
+
+static LazyInstance<DoubleConstant, InitializeDoubleConstants>::type
+    double_constants = LAZY_INSTANCE_INITIALIZER;
+
 const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
 
 // -----------------------------------------------------------------------------
@@ -516,6 +557,7 @@
 
 
 RelocIterator::RelocIterator(Code* code, int mode_mask) {
+  rinfo_.host_ = code;
   rinfo_.pc_ = code->instruction_start();
   rinfo_.data_ = 0;
   // Relocation info is read backwards.
@@ -736,9 +778,38 @@
   : address_(table_ref.address()) {}
 
 
+ExternalReference ExternalReference::
+    incremental_marking_record_write_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate,
+      FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
+}
+
+
+ExternalReference ExternalReference::
+    incremental_evacuation_record_write_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate,
+      FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
+}
+
+
+ExternalReference ExternalReference::
+    store_buffer_overflow_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate,
+      FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
+}
+
+
+ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
+}
+
+
 ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
-  return ExternalReference(Redirect(isolate,
-                                    FUNCTION_ADDR(Runtime::PerformGC)));
+  return
+      ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
 }
 
 
@@ -764,6 +835,17 @@
 }
 
 
+ExternalReference ExternalReference::get_date_field_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
+}
+
+
+ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
+  return ExternalReference(isolate->date_cache()->stamp_address());
+}
+
+
 ExternalReference ExternalReference::transcendental_cache_array_address(
     Isolate* isolate) {
   return ExternalReference(
@@ -785,11 +867,6 @@
 }
 
 
-ExternalReference ExternalReference::global_contexts_list(Isolate* isolate) {
-  return ExternalReference(isolate->heap()->global_contexts_list_address());
-}
-
-
 ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
   return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
 }
@@ -802,19 +879,8 @@
 }
 
 
-ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
-  return ExternalReference(isolate->factory()->the_hole_value().location());
-}
-
-
-ExternalReference ExternalReference::arguments_marker_location(
-    Isolate* isolate) {
-  return ExternalReference(isolate->factory()->arguments_marker().location());
-}
-
-
-ExternalReference ExternalReference::roots_address(Isolate* isolate) {
-  return ExternalReference(isolate->heap()->roots_address());
+ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
+  return ExternalReference(isolate->heap()->roots_array_start());
 }
 
 
@@ -840,9 +906,14 @@
 }
 
 
+ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
+  return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
+}
+
+
 ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
-  Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
-  return ExternalReference(mask);
+  return ExternalReference(reinterpret_cast<Address>(
+      isolate->heap()->NewSpaceMask()));
 }
 
 
@@ -888,49 +959,49 @@
 
 ExternalReference ExternalReference::address_of_min_int() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::min_int)));
+      &double_constants.Pointer()->min_int));
 }
 
 
 ExternalReference ExternalReference::address_of_one_half() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::one_half)));
+      &double_constants.Pointer()->one_half));
 }
 
 
 ExternalReference ExternalReference::address_of_minus_zero() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::minus_zero)));
+      &double_constants.Pointer()->minus_zero));
 }
 
 
 ExternalReference ExternalReference::address_of_zero() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::zero)));
+      &double_constants.Pointer()->zero));
 }
 
 
 ExternalReference ExternalReference::address_of_uint8_max_value() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::uint8_max_value)));
+      &double_constants.Pointer()->uint8_max_value));
 }
 
 
 ExternalReference ExternalReference::address_of_negative_infinity() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::negative_infinity)));
+      &double_constants.Pointer()->negative_infinity));
 }
 
 
 ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::canonical_non_hole_nan)));
+      &double_constants.Pointer()->canonical_non_hole_nan));
 }
 
 
 ExternalReference ExternalReference::address_of_the_hole_nan() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::the_hole_nan)));
+      &double_constants.Pointer()->the_hole_nan));
 }
 
 
@@ -1025,6 +1096,11 @@
 }
 
 
+static double math_tan_double(double x) {
+  return tan(x);
+}
+
+
 static double math_log_double(double x) {
   return log(x);
 }
@@ -1046,6 +1122,14 @@
 }
 
 
+ExternalReference ExternalReference::math_tan_double_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate,
+                                    FUNCTION_ADDR(math_tan_double),
+                                    BUILTIN_FP_CALL));
+}
+
+
 ExternalReference ExternalReference::math_log_double_function(
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
@@ -1074,17 +1158,9 @@
 
 
 double power_double_double(double x, double y) {
-  int y_int = static_cast<int>(y);
-  if (y == y_int) {
-    return power_double_int(x, y_int);  // Returns 1.0 for exponent 0.
-  }
-  if (!isinf(x)) {
-    if (y == 0.5) return sqrt(x + 0.0);  // -0 must be converted to +0.
-    if (y == -0.5) return 1.0 / sqrt(x + 0.0);
-  }
-  if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
-    return OS::nan_value();
-  }
+  // The checks for special cases can be dropped in ia32 because it has already
+  // been done in generated code before bailing out here.
+  if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
   return pow(x, y);
 }
 
@@ -1111,6 +1187,23 @@
 }
 
 
+bool EvalComparison(Token::Value op, double op1, double op2) {
+  ASSERT(Token::IsCompareOp(op));
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT: return (op1 == op2);
+    case Token::NE: return (op1 != op2);
+    case Token::LT: return (op1 < op2);
+    case Token::GT: return (op1 > op2);
+    case Token::LTE: return (op1 <= op2);
+    case Token::GTE: return (op1 >= op2);
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+
 ExternalReference ExternalReference::double_fp_operation(
     Token::Value operation, Isolate* isolate) {
   typedef double BinaryFPOperation(double x, double y);
diff --git a/src/assembler.h b/src/assembler.h
index d58034d..918a2a6 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -30,19 +30,27 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 #ifndef V8_ASSEMBLER_H_
 #define V8_ASSEMBLER_H_
 
+#include "v8.h"
+
 #include "allocation.h"
+#include "builtins.h"
 #include "gdb-jit.h"
+#include "isolate.h"
 #include "runtime.h"
 #include "token.h"
 
 namespace v8 {
+
+class ApiFunction;
+
 namespace internal {
 
+struct StatsCounter;
 const unsigned kNoASTId = -1;
 // -----------------------------------------------------------------------------
 // Platform independent assembler base class.
@@ -59,21 +67,6 @@
   int jit_cookie_;
 };
 
-// -----------------------------------------------------------------------------
-// Common double constants.
-
-class DoubleConstant: public AllStatic {
- public:
-  static const double min_int;
-  static const double one_half;
-  static const double minus_zero;
-  static const double zero;
-  static const double uint8_max_value;
-  static const double negative_infinity;
-  static const double canonical_non_hole_nan;
-  static const double the_hole_nan;
-};
-
 
 // -----------------------------------------------------------------------------
 // Labels represent pc locations; they are typically jump or call targets.
@@ -143,6 +136,9 @@
 };
 
 
+enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
+
+
 // -----------------------------------------------------------------------------
 // Relocation information
 
@@ -216,8 +212,9 @@
 
 
   RelocInfo() {}
-  RelocInfo(byte* pc, Mode rmode, intptr_t data)
-      : pc_(pc), rmode_(rmode), data_(data) {
+
+  RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
+      : pc_(pc), rmode_(rmode), data_(data), host_(host) {
   }
 
   static inline bool IsConstructCall(Mode mode) {
@@ -226,6 +223,9 @@
   static inline bool IsCodeTarget(Mode mode) {
     return mode <= LAST_CODE_ENUM;
   }
+  static inline bool IsEmbeddedObject(Mode mode) {
+    return mode == EMBEDDED_OBJECT;
+  }
   // Is the relocation mode affected by GC?
   static inline bool IsGCRelocMode(Mode mode) {
     return mode <= LAST_GCED_ENUM;
@@ -258,12 +258,13 @@
   void set_pc(byte* pc) { pc_ = pc; }
   Mode rmode() const {  return rmode_; }
   intptr_t data() const { return data_; }
+  Code* host() const { return host_; }
 
   // Apply a relocation by delta bytes
   INLINE(void apply(intptr_t delta));
 
   // Is the pointer this relocation info refers to coded like a plain pointer
-  // or is it strange in some way (eg relative or patched into a series of
+  // or is it strange in some way (e.g. relative or patched into a series of
   // instructions).
   bool IsCodedSpecially();
 
@@ -271,14 +272,17 @@
   // this relocation applies to;
   // can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
   INLINE(Address target_address());
-  INLINE(void set_target_address(Address target));
+  INLINE(void set_target_address(Address target,
+                                 WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
   INLINE(Object* target_object());
   INLINE(Handle<Object> target_object_handle(Assembler* origin));
   INLINE(Object** target_object_address());
-  INLINE(void set_target_object(Object* target));
+  INLINE(void set_target_object(Object* target,
+                                WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
   INLINE(JSGlobalPropertyCell* target_cell());
   INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
-  INLINE(void set_target_cell(JSGlobalPropertyCell* cell));
+  INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
+                              WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
 
 
   // Read the address of the word containing the target_address in an
@@ -353,13 +357,14 @@
   byte* pc_;
   Mode rmode_;
   intptr_t data_;
+  Code* host_;
 #ifdef V8_TARGET_ARCH_MIPS
   // Code and Embedded Object pointers in mips are stored split
   // across two consecutive 32-bit instructions. Heap management
   // routines expect to access these pointers indirectly. The following
   // location provides a place for these pointers to exist natually
   // when accessed via the Iterator.
-  Object *reconstructed_obj_ptr_;
+  Object* reconstructed_obj_ptr_;
   // External-reference pointers are also split across instruction-pairs
   // in mips, but are accessed via indirect pointers. This location
   // provides a place for that pointer to exist naturally. Its address
@@ -561,6 +566,13 @@
   // pattern. This means that they have to be added to the
   // ExternalReferenceTable in serialize.cc manually.
 
+  static ExternalReference incremental_marking_record_write_function(
+      Isolate* isolate);
+  static ExternalReference incremental_evacuation_record_write_function(
+      Isolate* isolate);
+  static ExternalReference store_buffer_overflow_function(
+      Isolate* isolate);
+  static ExternalReference flush_icache_function(Isolate* isolate);
   static ExternalReference perform_gc_function(Isolate* isolate);
   static ExternalReference fill_heap_number_with_random_function(
       Isolate* isolate);
@@ -568,23 +580,19 @@
   static ExternalReference transcendental_cache_array_address(Isolate* isolate);
   static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
 
+  static ExternalReference get_date_field_function(Isolate* isolate);
+  static ExternalReference date_cache_stamp(Isolate* isolate);
+
   // Deoptimization support.
   static ExternalReference new_deoptimizer_function(Isolate* isolate);
   static ExternalReference compute_output_frames_function(Isolate* isolate);
-  static ExternalReference global_contexts_list(Isolate* isolate);
 
   // Static data in the keyed lookup cache.
   static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
   static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
 
-  // Static variable Factory::the_hole_value.location()
-  static ExternalReference the_hole_value_location(Isolate* isolate);
-
-  // Static variable Factory::arguments_marker.location()
-  static ExternalReference arguments_marker_location(Isolate* isolate);
-
-  // Static variable Heap::roots_address()
-  static ExternalReference roots_address(Isolate* isolate);
+  // Static variable Heap::roots_array_start()
+  static ExternalReference roots_array_start(Isolate* isolate);
 
   // Static variable StackGuard::address_of_jslimit()
   static ExternalReference address_of_stack_limit(Isolate* isolate);
@@ -606,6 +614,10 @@
   static ExternalReference new_space_start(Isolate* isolate);
   static ExternalReference new_space_mask(Isolate* isolate);
   static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
+  static ExternalReference new_space_mark_bits(Isolate* isolate);
+
+  // Write barrier.
+  static ExternalReference store_buffer_top(Isolate* isolate);
 
   // Used for fast allocation in generated code.
   static ExternalReference new_space_allocation_top_address(Isolate* isolate);
@@ -635,6 +647,7 @@
 
   static ExternalReference math_sin_double_function(Isolate* isolate);
   static ExternalReference math_cos_double_function(Isolate* isolate);
+  static ExternalReference math_tan_double_function(Isolate* isolate);
   static ExternalReference math_log_double_function(Isolate* isolate);
 
   Address address() const {return reinterpret_cast<Address>(address_);}
@@ -799,33 +812,33 @@
 // -----------------------------------------------------------------------------
 // Utility functions
 
-static inline bool is_intn(int x, int n)  {
+inline bool is_intn(int x, int n)  {
   return -(1 << (n-1)) <= x && x < (1 << (n-1));
 }
 
-static inline bool is_int8(int x)  { return is_intn(x, 8); }
-static inline bool is_int16(int x)  { return is_intn(x, 16); }
-static inline bool is_int18(int x)  { return is_intn(x, 18); }
-static inline bool is_int24(int x)  { return is_intn(x, 24); }
+inline bool is_int8(int x)  { return is_intn(x, 8); }
+inline bool is_int16(int x)  { return is_intn(x, 16); }
+inline bool is_int18(int x)  { return is_intn(x, 18); }
+inline bool is_int24(int x)  { return is_intn(x, 24); }
 
-static inline bool is_uintn(int x, int n) {
+inline bool is_uintn(int x, int n) {
   return (x & -(1 << n)) == 0;
 }
 
-static inline bool is_uint2(int x)  { return is_uintn(x, 2); }
-static inline bool is_uint3(int x)  { return is_uintn(x, 3); }
-static inline bool is_uint4(int x)  { return is_uintn(x, 4); }
-static inline bool is_uint5(int x)  { return is_uintn(x, 5); }
-static inline bool is_uint6(int x)  { return is_uintn(x, 6); }
-static inline bool is_uint8(int x)  { return is_uintn(x, 8); }
-static inline bool is_uint10(int x)  { return is_uintn(x, 10); }
-static inline bool is_uint12(int x)  { return is_uintn(x, 12); }
-static inline bool is_uint16(int x)  { return is_uintn(x, 16); }
-static inline bool is_uint24(int x)  { return is_uintn(x, 24); }
-static inline bool is_uint26(int x)  { return is_uintn(x, 26); }
-static inline bool is_uint28(int x)  { return is_uintn(x, 28); }
+inline bool is_uint2(int x)  { return is_uintn(x, 2); }
+inline bool is_uint3(int x)  { return is_uintn(x, 3); }
+inline bool is_uint4(int x)  { return is_uintn(x, 4); }
+inline bool is_uint5(int x)  { return is_uintn(x, 5); }
+inline bool is_uint6(int x)  { return is_uintn(x, 6); }
+inline bool is_uint8(int x)  { return is_uintn(x, 8); }
+inline bool is_uint10(int x)  { return is_uintn(x, 10); }
+inline bool is_uint12(int x)  { return is_uintn(x, 12); }
+inline bool is_uint16(int x)  { return is_uintn(x, 16); }
+inline bool is_uint24(int x)  { return is_uintn(x, 24); }
+inline bool is_uint26(int x)  { return is_uintn(x, 26); }
+inline bool is_uint28(int x)  { return is_uintn(x, 28); }
 
-static inline int NumberOfBitsSet(uint32_t x) {
+inline int NumberOfBitsSet(uint32_t x) {
   unsigned int num_bits_set;
   for (num_bits_set = 0; x; x >>= 1) {
     num_bits_set += x & 1;
@@ -833,6 +846,8 @@
   return num_bits_set;
 }
 
+bool EvalComparison(Token::Value op, double op1, double op2);
+
 // Computes pow(x, y) with the special cases in the spec for Math.pow.
 double power_double_int(double x, int y);
 double power_double_double(double x, double y);
diff --git a/src/ast-inl.h b/src/ast-inl.h
deleted file mode 100644
index 731ad2f..0000000
--- a/src/ast-inl.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_AST_INL_H_
-#define V8_AST_INL_H_
-
-#include "v8.h"
-
-#include "ast.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-SwitchStatement::SwitchStatement(Isolate* isolate,
-                                 ZoneStringList* labels)
-    : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
-      tag_(NULL), cases_(NULL) {
-}
-
-
-Block::Block(Isolate* isolate,
-             ZoneStringList* labels,
-             int capacity,
-             bool is_initializer_block)
-    : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
-      statements_(capacity),
-      is_initializer_block_(is_initializer_block),
-      block_scope_(NULL) {
-}
-
-
-BreakableStatement::BreakableStatement(Isolate* isolate,
-                                       ZoneStringList* labels,
-                                       Type type)
-    : labels_(labels),
-      type_(type),
-      entry_id_(GetNextId(isolate)),
-      exit_id_(GetNextId(isolate)) {
-  ASSERT(labels == NULL || labels->length() > 0);
-}
-
-
-IterationStatement::IterationStatement(Isolate* isolate, ZoneStringList* labels)
-    : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
-      body_(NULL),
-      continue_target_(),
-      osr_entry_id_(GetNextId(isolate)) {
-}
-
-
-DoWhileStatement::DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
-    : IterationStatement(isolate, labels),
-      cond_(NULL),
-      condition_position_(-1),
-      continue_id_(GetNextId(isolate)),
-      back_edge_id_(GetNextId(isolate)) {
-}
-
-
-WhileStatement::WhileStatement(Isolate* isolate, ZoneStringList* labels)
-    : IterationStatement(isolate, labels),
-      cond_(NULL),
-      may_have_function_literal_(true),
-      body_id_(GetNextId(isolate)) {
-}
-
-
-ForStatement::ForStatement(Isolate* isolate, ZoneStringList* labels)
-    : IterationStatement(isolate, labels),
-      init_(NULL),
-      cond_(NULL),
-      next_(NULL),
-      may_have_function_literal_(true),
-      loop_variable_(NULL),
-      continue_id_(GetNextId(isolate)),
-      body_id_(GetNextId(isolate)) {
-}
-
-
-ForInStatement::ForInStatement(Isolate* isolate, ZoneStringList* labels)
-    : IterationStatement(isolate, labels),
-      each_(NULL),
-      enumerable_(NULL),
-      assignment_id_(GetNextId(isolate)) {
-}
-
-
-bool FunctionLiteral::strict_mode() const {
-  return scope()->is_strict_mode();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_AST_INL_H_
diff --git a/src/ast.cc b/src/ast.cc
index 418cc43..4b6ae68 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
-
 #include "ast.h"
+
+#include <math.h>  // For isfinite.
+#include "builtins.h"
+#include "conversions.h"
+#include "hashmap.h"
 #include "parser.h"
+#include "property-details.h"
+#include "property.h"
 #include "scopes.h"
 #include "string-stream.h"
 #include "type-info.h"
@@ -48,16 +53,19 @@
 // ----------------------------------------------------------------------------
 // Implementation of other node functionality.
 
-Assignment* ExpressionStatement::StatementAsSimpleAssignment() {
-  return (expression()->AsAssignment() != NULL &&
-          !expression()->AsAssignment()->is_compound())
-      ? expression()->AsAssignment()
-      : NULL;
+
+bool Expression::IsSmiLiteral() {
+  return AsLiteral() != NULL && AsLiteral()->handle()->IsSmi();
 }
 
 
-CountOperation* ExpressionStatement::StatementAsCountOperation() {
-  return expression()->AsCountOperation();
+bool Expression::IsStringLiteral() {
+  return AsLiteral() != NULL && AsLiteral()->handle()->IsString();
+}
+
+
+bool Expression::IsNullLiteral() {
+  return AsLiteral() != NULL && AsLiteral()->handle()->IsNull();
 }
 
 
@@ -66,9 +74,10 @@
       name_(var->name()),
       var_(NULL),  // Will be set by the call to BindTo.
       is_this_(var->is_this()),
-      inside_with_(false),
       is_trivial_(false),
-      position_(RelocInfo::kNoPosition) {
+      is_lvalue_(false),
+      position_(RelocInfo::kNoPosition),
+      interface_(var->interface()) {
   BindTo(var);
 }
 
@@ -76,15 +85,16 @@
 VariableProxy::VariableProxy(Isolate* isolate,
                              Handle<String> name,
                              bool is_this,
-                             bool inside_with,
-                             int position)
+                             int position,
+                             Interface* interface)
     : Expression(isolate),
       name_(name),
       var_(NULL),
       is_this_(is_this),
-      inside_with_(inside_with),
       is_trivial_(false),
-      position_(position) {
+      is_lvalue_(false),
+      position_(position),
+      interface_(interface) {
   // Names must be canonicalized for fast equality checks.
   ASSERT(name->IsSymbol());
 }
@@ -119,18 +129,7 @@
       assignment_id_(GetNextId(isolate)),
       block_start_(false),
       block_end_(false),
-      is_monomorphic_(false) {
-  ASSERT(Token::IsAssignmentOp(op));
-  if (is_compound()) {
-    binary_operation_ =
-        new(isolate->zone()) BinaryOperation(isolate,
-                                             binary_op(),
-                                             target,
-                                             value,
-                                             pos + 1);
-    compound_load_id_ = GetNextId(isolate);
-  }
-}
+      is_monomorphic_(false) { }
 
 
 Token::Value Assignment::binary_op() const {
@@ -157,12 +156,30 @@
 }
 
 
-ObjectLiteral::Property::Property(Literal* key, Expression* value) {
+int FunctionLiteral::start_position() const {
+  return scope()->start_position();
+}
+
+
+int FunctionLiteral::end_position() const {
+  return scope()->end_position();
+}
+
+
+LanguageMode FunctionLiteral::language_mode() const {
+  return scope()->language_mode();
+}
+
+
+ObjectLiteral::Property::Property(Literal* key,
+                                  Expression* value,
+                                  Isolate* isolate) {
   emit_store_ = true;
   key_ = key;
   value_ = value;
   Object* k = *key->handle();
-  if (k->IsSymbol() && HEAP->Proto_symbol()->Equals(String::cast(k))) {
+  if (k->IsSymbol() &&
+      isolate->heap()->Proto_symbol()->Equals(String::cast(k))) {
     kind_ = PROTOTYPE;
   } else if (value_->AsMaterializedLiteral() != NULL) {
     kind_ = MATERIALIZED_LITERAL;
@@ -175,9 +192,7 @@
 
 
 ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
-  Isolate* isolate = Isolate::Current();
   emit_store_ = true;
-  key_ = new(isolate->zone()) Literal(isolate, value->name());
   value_ = value;
   kind_ = is_getter ? GETTER : SETTER;
 }
@@ -228,55 +243,21 @@
 
 
 void ObjectLiteral::CalculateEmitStore() {
-  HashMap properties(&IsEqualString);
-  HashMap elements(&IsEqualNumber);
-  for (int i = this->properties()->length() - 1; i >= 0; i--) {
-    ObjectLiteral::Property* property = this->properties()->at(i);
+  ZoneHashMap table(Literal::Match);
+  for (int i = properties()->length() - 1; i >= 0; i--) {
+    ObjectLiteral::Property* property = properties()->at(i);
     Literal* literal = property->key();
-    Handle<Object> handle = literal->handle();
-
-    if (handle->IsNull()) {
-      continue;
-    }
-
-    uint32_t hash;
-    HashMap* table;
-    void* key;
-    Factory* factory = Isolate::Current()->factory();
-    if (handle->IsSymbol()) {
-      Handle<String> name(String::cast(*handle));
-      if (name->AsArrayIndex(&hash)) {
-        Handle<Object> key_handle = factory->NewNumberFromUint(hash);
-        key = key_handle.location();
-        table = &elements;
-      } else {
-        key = name.location();
-        hash = name->Hash();
-        table = &properties;
-      }
-    } else if (handle->ToArrayIndex(&hash)) {
-      key = handle.location();
-      table = &elements;
-    } else {
-      ASSERT(handle->IsNumber());
-      double num = handle->Number();
-      char arr[100];
-      Vector<char> buffer(arr, ARRAY_SIZE(arr));
-      const char* str = DoubleToCString(num, buffer);
-      Handle<String> name = factory->NewStringFromAscii(CStrVector(str));
-      key = name.location();
-      hash = name->Hash();
-      table = &properties;
-    }
+    if (literal->handle()->IsNull()) continue;
+    uint32_t hash = literal->Hash();
     // If the key of a computed property is in the table, do not emit
     // a store for the property later.
-    if (property->kind() == ObjectLiteral::Property::COMPUTED) {
-      if (table->Lookup(key, hash, false) != NULL) {
-        property->set_emit_store(false);
-      }
+    if (property->kind() == ObjectLiteral::Property::COMPUTED &&
+        table.Lookup(literal, hash, false) != NULL) {
+      property->set_emit_store(false);
+    } else {
+      // Add key to the table.
+      table.Lookup(literal, hash, true);
     }
-    // Add key to the table.
-    table->Lookup(key, hash, true);
   }
 }
 
@@ -327,292 +308,100 @@
 }
 
 
+static bool IsTypeof(Expression* expr) {
+  UnaryOperation* maybe_unary = expr->AsUnaryOperation();
+  return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
+}
+
+
+// Check for the pattern: typeof <expression> equals <string literal>.
+static bool MatchLiteralCompareTypeof(Expression* left,
+                                      Token::Value op,
+                                      Expression* right,
+                                      Expression** expr,
+                                      Handle<String>* check) {
+  if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
+    *expr = left->AsUnaryOperation()->expression();
+    *check = Handle<String>::cast(right->AsLiteral()->handle());
+    return true;
+  }
+  return false;
+}
+
+
 bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
                                               Handle<String>* check) {
-  if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
+  return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
+      MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
+}
 
-  UnaryOperation* left_unary = left_->AsUnaryOperation();
-  UnaryOperation* right_unary = right_->AsUnaryOperation();
-  Literal* left_literal = left_->AsLiteral();
-  Literal* right_literal = right_->AsLiteral();
 
-  // Check for the pattern: typeof <expression> == <string literal>.
-  if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
-      right_literal != NULL && right_literal->handle()->IsString()) {
-    *expr = left_unary->expression();
-    *check = Handle<String>::cast(right_literal->handle());
+static bool IsVoidOfLiteral(Expression* expr) {
+  UnaryOperation* maybe_unary = expr->AsUnaryOperation();
+  return maybe_unary != NULL &&
+      maybe_unary->op() == Token::VOID &&
+      maybe_unary->expression()->AsLiteral() != NULL;
+}
+
+
+// Check for the pattern: void <literal> equals <expression>
+static bool MatchLiteralCompareUndefined(Expression* left,
+                                         Token::Value op,
+                                         Expression* right,
+                                         Expression** expr) {
+  if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
+    *expr = right;
     return true;
   }
-
-  // Check for the pattern: <string literal> == typeof <expression>.
-  if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
-      left_literal != NULL && left_literal->handle()->IsString()) {
-    *expr = right_unary->expression();
-    *check = Handle<String>::cast(left_literal->handle());
-    return true;
-  }
-
   return false;
 }
 
 
 bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
-  if (op_ != Token::EQ_STRICT) return false;
+  return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
+      MatchLiteralCompareUndefined(right_, op_, left_, expr);
+}
 
-  UnaryOperation* left_unary = left_->AsUnaryOperation();
-  UnaryOperation* right_unary = right_->AsUnaryOperation();
 
-  // Check for the pattern: <expression> === void <literal>.
-  if (right_unary != NULL && right_unary->op() == Token::VOID &&
-      right_unary->expression()->AsLiteral() != NULL) {
-    *expr = left_;
+// Check for the pattern: null equals <expression>
+static bool MatchLiteralCompareNull(Expression* left,
+                                    Token::Value op,
+                                    Expression* right,
+                                    Expression** expr) {
+  if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
+    *expr = right;
     return true;
   }
-
-  // Check for the pattern: void <literal> === <expression>.
-  if (left_unary != NULL && left_unary->op() == Token::VOID &&
-      left_unary->expression()->AsLiteral() != NULL) {
-    *expr = right_;
-    return true;
-  }
-
   return false;
 }
 
 
+bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
+  return MatchLiteralCompareNull(left_, op_, right_, expr) ||
+      MatchLiteralCompareNull(right_, op_, left_, expr);
+}
+
+
 // ----------------------------------------------------------------------------
 // Inlining support
 
 bool Declaration::IsInlineable() const {
-  return proxy()->var()->IsStackAllocated() && fun() == NULL;
+  return proxy()->var()->IsStackAllocated();
 }
 
-
-bool TargetCollector::IsInlineable() const {
-  UNREACHABLE();
+bool FunctionDeclaration::IsInlineable() const {
   return false;
 }
 
 
-bool ForInStatement::IsInlineable() const {
-  return false;
-}
-
-
-bool WithStatement::IsInlineable() const {
-  return false;
-}
-
-
-bool SwitchStatement::IsInlineable() const {
-  return false;
-}
-
-
-bool TryStatement::IsInlineable() const {
-  return false;
-}
-
-
-bool TryCatchStatement::IsInlineable() const {
-  return false;
-}
-
-
-bool TryFinallyStatement::IsInlineable() const {
-  return false;
-}
-
-
-bool DebuggerStatement::IsInlineable() const {
-  return false;
-}
-
-
-bool Throw::IsInlineable() const {
-  return exception()->IsInlineable();
-}
-
-
-bool MaterializedLiteral::IsInlineable() const {
-  // TODO(1322): Allow materialized literals.
-  return false;
-}
-
-
-bool FunctionLiteral::IsInlineable() const {
-  // TODO(1322): Allow materialized literals.
-  return false;
-}
-
-
-bool ThisFunction::IsInlineable() const {
-  return false;
-}
-
-
-bool SharedFunctionInfoLiteral::IsInlineable() const {
-  return false;
-}
-
-
-bool ForStatement::IsInlineable() const {
-  return (init() == NULL || init()->IsInlineable())
-      && (cond() == NULL || cond()->IsInlineable())
-      && (next() == NULL || next()->IsInlineable())
-      && body()->IsInlineable();
-}
-
-
-bool WhileStatement::IsInlineable() const {
-  return cond()->IsInlineable()
-      && body()->IsInlineable();
-}
-
-
-bool DoWhileStatement::IsInlineable() const {
-  return cond()->IsInlineable()
-      && body()->IsInlineable();
-}
-
-
-bool ContinueStatement::IsInlineable() const {
-  return true;
-}
-
-
-bool BreakStatement::IsInlineable() const {
-  return true;
-}
-
-
-bool EmptyStatement::IsInlineable() const {
-  return true;
-}
-
-
-bool Literal::IsInlineable() const {
-  return true;
-}
-
-
-bool Block::IsInlineable() const {
-  const int count = statements_.length();
-  for (int i = 0; i < count; ++i) {
-    if (!statements_[i]->IsInlineable()) return false;
-  }
-  return true;
-}
-
-
-bool ExpressionStatement::IsInlineable() const {
-  return expression()->IsInlineable();
-}
-
-
-bool IfStatement::IsInlineable() const {
-  return condition()->IsInlineable()
-      && then_statement()->IsInlineable()
-      && else_statement()->IsInlineable();
-}
-
-
-bool ReturnStatement::IsInlineable() const {
-  return expression()->IsInlineable();
-}
-
-
-bool Conditional::IsInlineable() const {
-  return condition()->IsInlineable() && then_expression()->IsInlineable() &&
-      else_expression()->IsInlineable();
-}
-
-
-bool VariableProxy::IsInlineable() const {
-  return var()->IsUnallocated() || var()->IsStackAllocated();
-}
-
-
-bool Assignment::IsInlineable() const {
-  return target()->IsInlineable() && value()->IsInlineable();
-}
-
-
-bool Property::IsInlineable() const {
-  return obj()->IsInlineable() && key()->IsInlineable();
-}
-
-
-bool Call::IsInlineable() const {
-  if (!expression()->IsInlineable()) return false;
-  const int count = arguments()->length();
-  for (int i = 0; i < count; ++i) {
-    if (!arguments()->at(i)->IsInlineable()) return false;
-  }
-  return true;
-}
-
-
-bool CallNew::IsInlineable() const {
-  if (!expression()->IsInlineable()) return false;
-  const int count = arguments()->length();
-  for (int i = 0; i < count; ++i) {
-    if (!arguments()->at(i)->IsInlineable()) return false;
-  }
-  return true;
-}
-
-
-bool CallRuntime::IsInlineable() const {
-  // Don't try to inline JS runtime calls because we don't (currently) even
-  // optimize them.
-  if (is_jsruntime()) return false;
-  // Don't inline the %_ArgumentsLength or %_Arguments because their
-  // implementation will not work.  There is no stack frame to get them
-  // from.
-  if (function()->intrinsic_type == Runtime::INLINE &&
-      (name()->IsEqualTo(CStrVector("_ArgumentsLength")) ||
-       name()->IsEqualTo(CStrVector("_Arguments")))) {
-    return false;
-  }
-  const int count = arguments()->length();
-  for (int i = 0; i < count; ++i) {
-    if (!arguments()->at(i)->IsInlineable()) return false;
-  }
-  return true;
-}
-
-
-bool UnaryOperation::IsInlineable() const {
-  return expression()->IsInlineable();
-}
-
-
-bool BinaryOperation::IsInlineable() const {
-  return left()->IsInlineable() && right()->IsInlineable();
-}
-
-
-bool CompareOperation::IsInlineable() const {
-  return left()->IsInlineable() && right()->IsInlineable();
-}
-
-
-bool CompareToNull::IsInlineable() const {
-  return expression()->IsInlineable();
-}
-
-
-bool CountOperation::IsInlineable() const {
-  return expression()->IsInlineable();
-}
-
-
 // ----------------------------------------------------------------------------
 // Recording of type feedback
 
 void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
   // Record type feedback from the oracle in the AST.
+  is_uninitialized_ = oracle->LoadIsUninitialized(this);
+  if (is_uninitialized_) return;
+
   is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
   receiver_types_.Clear();
   if (key()->IsPropertyName()) {
@@ -677,6 +466,10 @@
   TypeInfo info = oracle->SwitchType(this);
   if (info.IsSmi()) {
     compare_type_ = SMI_ONLY;
+  } else if (info.IsSymbol()) {
+    compare_type_ = SYMBOL_ONLY;
+  } else if (info.IsNonSymbol()) {
+    compare_type_ = STRING_ONLY;
   } else if (info.IsNonPrimitive()) {
     compare_type_ = OBJECT_ONLY;
   } else {
@@ -685,39 +478,47 @@
 }
 
 
-static bool CanCallWithoutIC(Handle<JSFunction> target, int arity) {
-  SharedFunctionInfo* info = target->shared();
-  // If the number of formal parameters of the target function does
-  // not match the number of arguments we're passing, we don't want to
-  // deal with it. Otherwise, we can call it directly.
-  return !target->NeedsArgumentsAdaption() ||
-      info->formal_parameter_count() == arity;
-}
-
-
 bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
+  // If there is an interceptor, we can't compute the target for a direct call.
+  if (type->has_named_interceptor()) return false;
+
   if (check_type_ == RECEIVER_MAP_CHECK) {
-    // For primitive checks the holder is set up to point to the
-    // corresponding prototype object, i.e. one step of the algorithm
-    // below has been already performed.
-    // For non-primitive checks we clear it to allow computing targets
-    // for polymorphic calls.
+    // For primitive checks the holder is set up to point to the corresponding
+    // prototype object, i.e. one step of the algorithm below has been already
+    // performed. For non-primitive checks we clear it to allow computing
+    // targets for polymorphic calls.
     holder_ = Handle<JSObject>::null();
   }
+  LookupResult lookup(type->GetIsolate());
   while (true) {
-    LookupResult lookup;
     type->LookupInDescriptors(NULL, *name, &lookup);
-    // If the function wasn't found directly in the map, we start
-    // looking upwards through the prototype chain.
-    if (!lookup.IsFound() && type->prototype()->IsJSObject()) {
-      holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
-      type = Handle<Map>(holder()->map());
-    } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
-      target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
-      return CanCallWithoutIC(target_, arguments()->length());
-    } else {
-      return false;
+    if (lookup.IsFound()) {
+      switch (lookup.type()) {
+        case CONSTANT_FUNCTION:
+          // We surely know the target for a constant function.
+          target_ =
+              Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
+          return true;
+        case NORMAL:
+        case FIELD:
+        case CALLBACKS:
+        case HANDLER:
+        case INTERCEPTOR:
+          // We don't know the target.
+          return false;
+        case MAP_TRANSITION:
+        case ELEMENTS_TRANSITION:
+        case CONSTANT_TRANSITION:
+        case NULL_DESCRIPTOR:
+          // Perhaps something interesting is up in the prototype chain...
+          break;
+      }
     }
+    // If we reach the end of the prototype chain, we don't know the target.
+    if (!type->prototype()->IsJSObject()) return false;
+    // Go up the prototype chain, recording where we are currently.
+    holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
+    type = Handle<Map>(holder()->map());
   }
 }
 
@@ -726,7 +527,7 @@
                                LookupResult* lookup) {
   target_ = Handle<JSFunction>::null();
   cell_ = Handle<JSGlobalPropertyCell>::null();
-  ASSERT(lookup->IsProperty() &&
+  ASSERT(lookup->IsFound() &&
          lookup->type() == NORMAL &&
          lookup->holder() == *global);
   cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
@@ -734,8 +535,7 @@
     Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
     // If the function is in new space we assume it's more likely to
     // change and thus prefer the general IC code.
-    if (!HEAP->InNewSpace(*candidate) &&
-        CanCallWithoutIC(candidate, arguments()->length())) {
+    if (!HEAP->InNewSpace(*candidate)) {
       target_ = candidate;
       return true;
     }
@@ -746,37 +546,49 @@
 
 void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
                               CallKind call_kind) {
+  is_monomorphic_ = oracle->CallIsMonomorphic(this);
   Property* property = expression()->AsProperty();
-  ASSERT(property != NULL);
-  // Specialize for the receiver types seen at runtime.
-  Literal* key = property->key()->AsLiteral();
-  ASSERT(key != NULL && key->handle()->IsString());
-  Handle<String> name = Handle<String>::cast(key->handle());
-  receiver_types_.Clear();
-  oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+  if (property == NULL) {
+    // Function call.  Specialize for monomorphic calls.
+    if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
+  } else {
+    // Method call.  Specialize for the receiver types seen at runtime.
+    Literal* key = property->key()->AsLiteral();
+    ASSERT(key != NULL && key->handle()->IsString());
+    Handle<String> name = Handle<String>::cast(key->handle());
+    receiver_types_.Clear();
+    oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
 #ifdef DEBUG
-  if (FLAG_enable_slow_asserts) {
-    int length = receiver_types_.length();
-    for (int i = 0; i < length; i++) {
-      Handle<Map> map = receiver_types_.at(i);
-      ASSERT(!map.is_null() && *map != NULL);
+    if (FLAG_enable_slow_asserts) {
+      int length = receiver_types_.length();
+      for (int i = 0; i < length; i++) {
+        Handle<Map> map = receiver_types_.at(i);
+        ASSERT(!map.is_null() && *map != NULL);
+      }
+    }
+#endif
+    check_type_ = oracle->GetCallCheckType(this);
+    if (is_monomorphic_) {
+      Handle<Map> map;
+      if (receiver_types_.length() > 0) {
+        ASSERT(check_type_ == RECEIVER_MAP_CHECK);
+        map = receiver_types_.at(0);
+      } else {
+        ASSERT(check_type_ != RECEIVER_MAP_CHECK);
+        holder_ = Handle<JSObject>(
+            oracle->GetPrototypeForPrimitiveCheck(check_type_));
+        map = Handle<Map>(holder_->map());
+      }
+      is_monomorphic_ = ComputeTarget(map, name);
     }
   }
-#endif
-  is_monomorphic_ = oracle->CallIsMonomorphic(this);
-  check_type_ = oracle->GetCallCheckType(this);
+}
+
+
+void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
   if (is_monomorphic_) {
-    Handle<Map> map;
-    if (receiver_types_.length() > 0) {
-      ASSERT(check_type_ == RECEIVER_MAP_CHECK);
-      map = receiver_types_.at(0);
-    } else {
-      ASSERT(check_type_ != RECEIVER_MAP_CHECK);
-      holder_ = Handle<JSObject>(
-          oracle->GetPrototypeForPrimitiveCheck(check_type_));
-      map = Handle<Map>(holder_->map());
-    }
-    is_monomorphic_ = ComputeTarget(map, name);
+    target_ = oracle->GetCallNewTarget(this);
   }
 }
 
@@ -793,6 +605,13 @@
 }
 
 
+void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
+      ? oracle->GetObjectLiteralStoreMap(this)
+      : Handle<Map>::null();
+}
+
+
 // ----------------------------------------------------------------------------
 // Implementation of AstVisitor
 
@@ -856,8 +675,6 @@
 FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
 #undef MAKE_TYPE_CASE
 
-RegExpEmpty RegExpEmpty::kInstance;
-
 
 static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
   Interval result = Interval::Empty();
@@ -1175,4 +992,172 @@
       entry_id_(AstNode::GetNextId(isolate)) {
 }
 
+
+#define INCREASE_NODE_COUNT(NodeType) \
+  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+    increase_node_count(); \
+  }
+
+INCREASE_NODE_COUNT(VariableDeclaration)
+INCREASE_NODE_COUNT(FunctionDeclaration)
+INCREASE_NODE_COUNT(ModuleDeclaration)
+INCREASE_NODE_COUNT(ImportDeclaration)
+INCREASE_NODE_COUNT(ExportDeclaration)
+INCREASE_NODE_COUNT(ModuleLiteral)
+INCREASE_NODE_COUNT(ModuleVariable)
+INCREASE_NODE_COUNT(ModulePath)
+INCREASE_NODE_COUNT(ModuleUrl)
+INCREASE_NODE_COUNT(Block)
+INCREASE_NODE_COUNT(ExpressionStatement)
+INCREASE_NODE_COUNT(EmptyStatement)
+INCREASE_NODE_COUNT(IfStatement)
+INCREASE_NODE_COUNT(ContinueStatement)
+INCREASE_NODE_COUNT(BreakStatement)
+INCREASE_NODE_COUNT(ReturnStatement)
+INCREASE_NODE_COUNT(Conditional)
+INCREASE_NODE_COUNT(Literal)
+INCREASE_NODE_COUNT(ObjectLiteral)
+INCREASE_NODE_COUNT(Assignment)
+INCREASE_NODE_COUNT(Throw)
+INCREASE_NODE_COUNT(Property)
+INCREASE_NODE_COUNT(UnaryOperation)
+INCREASE_NODE_COUNT(CountOperation)
+INCREASE_NODE_COUNT(BinaryOperation)
+INCREASE_NODE_COUNT(CompareOperation)
+INCREASE_NODE_COUNT(ThisFunction)
+INCREASE_NODE_COUNT(Call)
+INCREASE_NODE_COUNT(CallNew)
+
+#undef INCREASE_NODE_COUNT
+
+
+void AstConstructionVisitor::VisitWithStatement(WithStatement* node) {
+  increase_node_count();
+  add_flag(kDontOptimize);
+  add_flag(kDontInline);
+}
+
+
+void AstConstructionVisitor::VisitSwitchStatement(SwitchStatement* node) {
+  increase_node_count();
+  add_flag(kDontInline);
+}
+
+
+void AstConstructionVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
+  increase_node_count();
+  add_flag(kDontSelfOptimize);
+}
+
+
+void AstConstructionVisitor::VisitWhileStatement(WhileStatement* node) {
+  increase_node_count();
+  add_flag(kDontSelfOptimize);
+}
+
+
+void AstConstructionVisitor::VisitForStatement(ForStatement* node) {
+  increase_node_count();
+  add_flag(kDontSelfOptimize);
+}
+
+
+void AstConstructionVisitor::VisitForInStatement(ForInStatement* node) {
+  increase_node_count();
+  add_flag(kDontSelfOptimize);
+}
+
+
+void AstConstructionVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
+  increase_node_count();
+  add_flag(kDontOptimize);
+  add_flag(kDontInline);
+}
+
+
+void AstConstructionVisitor::VisitTryFinallyStatement(
+    TryFinallyStatement* node) {
+  increase_node_count();
+  add_flag(kDontOptimize);
+  add_flag(kDontInline);
+}
+
+
+void AstConstructionVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
+  increase_node_count();
+  add_flag(kDontOptimize);
+  add_flag(kDontInline);
+}
+
+
+void AstConstructionVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
+  increase_node_count();
+  add_flag(kDontInline);
+}
+
+
+void AstConstructionVisitor::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
+  increase_node_count();
+  add_flag(kDontOptimize);
+  add_flag(kDontInline);
+}
+
+
+void AstConstructionVisitor::VisitVariableProxy(VariableProxy* node) {
+  increase_node_count();
+  // In theory, we'd have to add:
+  // if(node->var()->IsLookupSlot()) { add_flag(kDontInline); }
+  // However, node->var() is usually not bound yet at VariableProxy creation
+  // time, and LOOKUP variables only result from constructs that cannot
+  // be inlined anyway.
+}
+
+
+void AstConstructionVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
+  increase_node_count();
+  add_flag(kDontInline);  // TODO(1322): Allow materialized literals.
+}
+
+
+void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) {
+  increase_node_count();
+  add_flag(kDontInline);  // TODO(1322): Allow materialized literals.
+}
+
+
+void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
+  increase_node_count();
+  if (node->is_jsruntime()) {
+    // Don't try to inline JS runtime calls because we don't (currently) even
+    // optimize them.
+    add_flag(kDontInline);
+  } else if (node->function()->intrinsic_type == Runtime::INLINE &&
+      (node->name()->IsEqualTo(CStrVector("_ArgumentsLength")) ||
+       node->name()->IsEqualTo(CStrVector("_Arguments")))) {
+    // Don't inline the %_ArgumentsLength or %_Arguments because their
+    // implementation will not work.  There is no stack frame to get them
+    // from.
+    add_flag(kDontInline);
+  }
+}
+
+
+Handle<String> Literal::ToString() {
+  if (handle_->IsString()) return Handle<String>::cast(handle_);
+  ASSERT(handle_->IsNumber());
+  char arr[100];
+  Vector<char> buffer(arr, ARRAY_SIZE(arr));
+  const char* str;
+  if (handle_->IsSmi()) {
+    // Optimization only, the heap number case would subsume this.
+    OS::SNPrintF(buffer, "%d", Smi::cast(*handle_)->value());
+    str = arr;
+  } else {
+    str = DoubleToCString(handle_->Number(), buffer);
+  }
+  return FACTORY->NewStringFromAscii(CStrVector(str));
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
index b56205f..b827302 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,14 +28,21 @@
 #ifndef V8_AST_H_
 #define V8_AST_H_
 
-#include "allocation.h"
-#include "execution.h"
+#include "v8.h"
+
+#include "assembler.h"
 #include "factory.h"
+#include "isolate.h"
 #include "jsregexp.h"
+#include "list-inl.h"
 #include "runtime.h"
 #include "small-pointer-list.h"
+#include "smart-array-pointer.h"
 #include "token.h"
+#include "utils.h"
 #include "variables.h"
+#include "interface.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -53,6 +60,19 @@
 // Nodes of the abstract syntax tree. Only concrete classes are
 // enumerated here.
 
+#define DECLARATION_NODE_LIST(V)                \
+  V(VariableDeclaration)                        \
+  V(FunctionDeclaration)                        \
+  V(ModuleDeclaration)                          \
+  V(ImportDeclaration)                          \
+  V(ExportDeclaration)                          \
+
+#define MODULE_NODE_LIST(V)                     \
+  V(ModuleLiteral)                              \
+  V(ModuleVariable)                             \
+  V(ModulePath)                                 \
+  V(ModuleUrl)
+
 #define STATEMENT_NODE_LIST(V)                  \
   V(Block)                                      \
   V(ExpressionStatement)                        \
@@ -90,21 +110,41 @@
   V(CountOperation)                             \
   V(BinaryOperation)                            \
   V(CompareOperation)                           \
-  V(CompareToNull)                              \
   V(ThisFunction)
 
 #define AST_NODE_LIST(V)                        \
-  V(Declaration)                                \
+  DECLARATION_NODE_LIST(V)                      \
+  MODULE_NODE_LIST(V)                           \
   STATEMENT_NODE_LIST(V)                        \
   EXPRESSION_NODE_LIST(V)
 
 // Forward declarations
-class BitVector;
-class DefinitionInfo;
+class AstConstructionVisitor;
+template<class> class AstNodeFactory;
+class AstVisitor;
+class Declaration;
+class Module;
+class BreakableStatement;
+class Expression;
+class IterationStatement;
 class MaterializedLiteral;
+class Statement;
 class TargetCollector;
 class TypeFeedbackOracle;
 
+class RegExpAlternative;
+class RegExpAssertion;
+class RegExpAtom;
+class RegExpBackReference;
+class RegExpCapture;
+class RegExpCharacterClass;
+class RegExpCompiler;
+class RegExpDisjunction;
+class RegExpEmpty;
+class RegExpLookahead;
+class RegExpQuantifier;
+class RegExpText;
+
 #define DEF_FORWARD_DECLARATION(type) class type;
 AST_NODE_LIST(DEF_FORWARD_DECLARATION)
 #undef DEF_FORWARD_DECLARATION
@@ -118,8 +158,31 @@
 
 #define DECLARE_NODE_TYPE(type)                                         \
   virtual void Accept(AstVisitor* v);                                   \
-  virtual AstNode::Type node_type() const { return AstNode::k##type; }  \
-  virtual type* As##type() { return this; }
+  virtual AstNode::Type node_type() const { return AstNode::k##type; }
+
+
+enum AstPropertiesFlag {
+  kDontInline,
+  kDontOptimize,
+  kDontSelfOptimize,
+  kDontSoftInline
+};
+
+
+class AstProperties BASE_EMBEDDED {
+ public:
+  class Flags : public EnumSet<AstPropertiesFlag, int> {};
+
+  AstProperties() : node_count_(0) { }
+
+  Flags* flags() { return &flags_; }
+  int node_count() { return node_count_; }
+  void add_node_count(int count) { node_count_ += count; }
+
+ private:
+  Flags flags_;
+  int node_count_;
+};
 
 
 class AstNode: public ZoneObject {
@@ -138,14 +201,11 @@
   // that emit code (function declarations).
   static const int kDeclarationsId = 3;
 
-  // Override ZoneObject's new to count allocated AST nodes.
   void* operator new(size_t size, Zone* zone) {
-    Isolate* isolate = zone->isolate();
-    isolate->set_ast_node_count(isolate->ast_node_count() + 1);
     return zone->New(static_cast<int>(size));
   }
 
-  AstNode() {}
+  AstNode() { }
 
   virtual ~AstNode() { }
 
@@ -154,10 +214,12 @@
 
   // Type testing & conversion functions overridden by concrete subclasses.
 #define DECLARE_NODE_FUNCTIONS(type)                  \
-  virtual type* As##type() { return NULL; }
+  bool Is##type() { return node_type() == AstNode::k##type; }          \
+  type* As##type() { return Is##type() ? reinterpret_cast<type*>(this) : NULL; }
   AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
 #undef DECLARE_NODE_FUNCTIONS
 
+  virtual Declaration* AsDeclaration() { return NULL; }
   virtual Statement* AsStatement() { return NULL; }
   virtual Expression* AsExpression() { return NULL; }
   virtual TargetCollector* AsTargetCollector() { return NULL; }
@@ -165,19 +227,13 @@
   virtual IterationStatement* AsIterationStatement() { return NULL; }
   virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
 
-  // True if the node is simple enough for us to inline calls containing it.
-  virtual bool IsInlineable() const = 0;
-
-  static int Count() { return Isolate::Current()->ast_node_count(); }
-  static void ResetIds() { Isolate::Current()->set_ast_node_id(0); }
-
  protected:
-  static unsigned GetNextId(Isolate* isolate) {
+  static int GetNextId(Isolate* isolate) {
     return ReserveIdRange(isolate, 1);
   }
 
-  static unsigned ReserveIdRange(Isolate* isolate, int n) {
-    unsigned tmp = isolate->ast_node_id();
+  static int ReserveIdRange(Isolate* isolate, int n) {
+    int tmp = isolate->ast_node_id();
     isolate->set_ast_node_id(tmp + n);
     return tmp;
   }
@@ -197,9 +253,6 @@
 
   virtual Statement* AsStatement()  { return this; }
 
-  virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
-  virtual CountOperation* StatementAsCountOperation() { return NULL; }
-
   bool IsEmpty() { return AsEmptyStatement() != NULL; }
 
   void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -254,10 +307,6 @@
     kTest
   };
 
-  explicit Expression(Isolate* isolate)
-      : id_(GetNextId(isolate)),
-        test_id_(GetNextId(isolate)) {}
-
   virtual int position() const {
     UNREACHABLE();
     return 0;
@@ -265,7 +314,6 @@
 
   virtual Expression* AsExpression()  { return this; }
 
-  virtual bool IsTrivial() { return false; }
   virtual bool IsValidLeftHandSide() { return false; }
 
   // Helpers for ToBoolean conversion.
@@ -277,27 +325,24 @@
   // names because [] for string objects is handled only by keyed ICs.
   virtual bool IsPropertyName() { return false; }
 
-  // Mark the expression as being compiled as an expression
-  // statement. This is used to transform postfix increments to
-  // (faster) prefix increments.
-  virtual void MarkAsStatement() { /* do nothing */ }
-
   // True iff the result can be safely overwritten (to avoid allocation).
   // False for operations that can return one of their operands.
   virtual bool ResultOverwriteAllowed() { return false; }
 
   // True iff the expression is a literal represented as a smi.
-  virtual bool IsSmiLiteral() { return false; }
+  bool IsSmiLiteral();
+
+  // True iff the expression is a string literal.
+  bool IsStringLiteral();
+
+  // True iff the expression is the null literal.
+  bool IsNullLiteral();
 
   // Type feedback information for assignments and properties.
   virtual bool IsMonomorphic() {
     UNREACHABLE();
     return false;
   }
-  virtual bool IsArrayLength() {
-    UNREACHABLE();
-    return false;
-  }
   virtual SmallMapList* GetReceiverTypes() {
     UNREACHABLE();
     return NULL;
@@ -312,9 +357,14 @@
   unsigned id() const { return id_; }
   unsigned test_id() const { return test_id_; }
 
+ protected:
+  explicit Expression(Isolate* isolate)
+      : id_(GetNextId(isolate)),
+        test_id_(GetNextId(isolate)) {}
+
  private:
-  unsigned id_;
-  unsigned test_id_;
+  int id_;
+  int test_id_;
 };
 
 
@@ -343,7 +393,14 @@
   int ExitId() const { return exit_id_; }
 
  protected:
-  BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type);
+  BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
+      : labels_(labels),
+        type_(type),
+        entry_id_(GetNextId(isolate)),
+        exit_id_(GetNextId(isolate)) {
+    ASSERT(labels == NULL || labels->length() > 0);
+  }
+
 
  private:
   ZoneStringList* labels_;
@@ -356,25 +413,8 @@
 
 class Block: public BreakableStatement {
  public:
-  inline Block(Isolate* isolate,
-               ZoneStringList* labels,
-               int capacity,
-               bool is_initializer_block);
-
   DECLARE_NODE_TYPE(Block)
 
-  virtual Assignment* StatementAsSimpleAssignment() {
-    if (statements_.length() != 1) return NULL;
-    return statements_[0]->StatementAsSimpleAssignment();
-  }
-
-  virtual CountOperation* StatementAsCountOperation() {
-    if (statements_.length() != 1) return NULL;
-    return statements_[0]->StatementAsCountOperation();
-  }
-
-  virtual bool IsInlineable() const;
-
   void AddStatement(Statement* statement) { statements_.Add(statement); }
 
   ZoneList<Statement*>* statements() { return &statements_; }
@@ -383,6 +423,19 @@
   Scope* block_scope() const { return block_scope_; }
   void set_block_scope(Scope* block_scope) { block_scope_ = block_scope; }
 
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  Block(Isolate* isolate,
+        ZoneStringList* labels,
+        int capacity,
+        bool is_initializer_block)
+      : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
+        statements_(capacity),
+        is_initializer_block_(is_initializer_block),
+        block_scope_(NULL) {
+  }
+
  private:
   ZoneList<Statement*> statements_;
   bool is_initializer_block_;
@@ -392,39 +445,236 @@
 
 class Declaration: public AstNode {
  public:
+  VariableProxy* proxy() const { return proxy_; }
+  VariableMode mode() const { return mode_; }
+  Scope* scope() const { return scope_; }
+  virtual InitializationFlag initialization() const = 0;
+  virtual bool IsInlineable() const;
+
+  virtual Declaration* AsDeclaration() { return this; }
+
+ protected:
   Declaration(VariableProxy* proxy,
-              Variable::Mode mode,
-              FunctionLiteral* fun,
+              VariableMode mode,
               Scope* scope)
       : proxy_(proxy),
         mode_(mode),
-        fun_(fun),
         scope_(scope) {
-    ASSERT(mode == Variable::VAR ||
-           mode == Variable::CONST ||
-           mode == Variable::LET);
-    // At the moment there are no "const functions"'s in JavaScript...
-    ASSERT(fun == NULL || mode == Variable::VAR || mode == Variable::LET);
+    ASSERT(mode == VAR ||
+           mode == CONST ||
+           mode == CONST_HARMONY ||
+           mode == LET);
   }
 
-  DECLARE_NODE_TYPE(Declaration)
-
-  VariableProxy* proxy() const { return proxy_; }
-  Variable::Mode mode() const { return mode_; }
-  FunctionLiteral* fun() const { return fun_; }  // may be NULL
-  virtual bool IsInlineable() const;
-  Scope* scope() const { return scope_; }
-
  private:
   VariableProxy* proxy_;
-  Variable::Mode mode_;
-  FunctionLiteral* fun_;
+  VariableMode mode_;
 
   // Nested scope from which the declaration originated.
   Scope* scope_;
 };
 
 
+class VariableDeclaration: public Declaration {
+ public:
+  DECLARE_NODE_TYPE(VariableDeclaration)
+
+  virtual InitializationFlag initialization() const {
+    return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
+  }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  VariableDeclaration(VariableProxy* proxy,
+                      VariableMode mode,
+                      Scope* scope)
+      : Declaration(proxy, mode, scope) {
+  }
+};
+
+
+class FunctionDeclaration: public Declaration {
+ public:
+  DECLARE_NODE_TYPE(FunctionDeclaration)
+
+  FunctionLiteral* fun() const { return fun_; }
+  virtual InitializationFlag initialization() const {
+    return kCreatedInitialized;
+  }
+  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  FunctionDeclaration(VariableProxy* proxy,
+                      VariableMode mode,
+                      FunctionLiteral* fun,
+                      Scope* scope)
+      : Declaration(proxy, mode, scope),
+        fun_(fun) {
+    // At the moment there are no "const functions" in JavaScript...
+    ASSERT(mode == VAR || mode == LET);
+    ASSERT(fun != NULL);
+  }
+
+ private:
+  FunctionLiteral* fun_;
+};
+
+
+class ModuleDeclaration: public Declaration {
+ public:
+  DECLARE_NODE_TYPE(ModuleDeclaration)
+
+  Module* module() const { return module_; }
+  virtual InitializationFlag initialization() const {
+    return kCreatedInitialized;
+  }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  ModuleDeclaration(VariableProxy* proxy,
+                    Module* module,
+                    Scope* scope)
+      : Declaration(proxy, LET, scope),
+        module_(module) {
+  }
+
+ private:
+  Module* module_;
+};
+
+
+class ImportDeclaration: public Declaration {
+ public:
+  DECLARE_NODE_TYPE(ImportDeclaration)
+
+  Module* module() const { return module_; }
+  virtual InitializationFlag initialization() const {
+    return kCreatedInitialized;
+  }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  ImportDeclaration(VariableProxy* proxy,
+                    Module* module,
+                    Scope* scope)
+      : Declaration(proxy, LET, scope),
+        module_(module) {
+  }
+
+ private:
+  Module* module_;
+};
+
+
+class ExportDeclaration: public Declaration {
+ public:
+  DECLARE_NODE_TYPE(ExportDeclaration)
+
+  virtual InitializationFlag initialization() const {
+    return kCreatedInitialized;
+  }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  ExportDeclaration(VariableProxy* proxy,
+                    Scope* scope)
+      : Declaration(proxy, LET, scope) {
+  }
+};
+
+
+class Module: public AstNode {
+ public:
+  Interface* interface() const { return interface_; }
+
+ protected:
+  Module() : interface_(Interface::NewModule()) {}
+  explicit Module(Interface* interface) : interface_(interface) {}
+
+ private:
+  Interface* interface_;
+};
+
+
+class ModuleLiteral: public Module {
+ public:
+  DECLARE_NODE_TYPE(ModuleLiteral)
+
+  Block* body() const { return body_; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  ModuleLiteral(Block* body, Interface* interface)
+      : Module(interface),
+        body_(body) {
+  }
+
+ private:
+  Block* body_;
+};
+
+
+class ModuleVariable: public Module {
+ public:
+  DECLARE_NODE_TYPE(ModuleVariable)
+
+  VariableProxy* proxy() const { return proxy_; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  inline explicit ModuleVariable(VariableProxy* proxy);
+
+ private:
+  VariableProxy* proxy_;
+};
+
+
+class ModulePath: public Module {
+ public:
+  DECLARE_NODE_TYPE(ModulePath)
+
+  Module* module() const { return module_; }
+  Handle<String> name() const { return name_; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  ModulePath(Module* module, Handle<String> name)
+      : module_(module),
+        name_(name) {
+  }
+
+ private:
+  Module* module_;
+  Handle<String> name_;
+};
+
+
+class ModuleUrl: public Module {
+ public:
+  DECLARE_NODE_TYPE(ModuleUrl)
+
+  Handle<String> url() const { return url_; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  explicit ModuleUrl(Handle<String> url) : url_(url) {
+  }
+
+ private:
+  Handle<String> url_;
+};
+
+
 class IterationStatement: public BreakableStatement {
  public:
   // Type testing & conversion.
@@ -441,7 +691,11 @@
   Label* continue_target()  { return &continue_target_; }
 
  protected:
-  inline IterationStatement(Isolate* isolate, ZoneStringList* labels);
+  IterationStatement(Isolate* isolate, ZoneStringList* labels)
+      : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+        body_(NULL),
+        osr_entry_id_(GetNextId(isolate)) {
+  }
 
   void Initialize(Statement* body) {
     body_ = body;
@@ -456,8 +710,6 @@
 
 class DoWhileStatement: public IterationStatement {
  public:
-  inline DoWhileStatement(Isolate* isolate, ZoneStringList* labels);
-
   DECLARE_NODE_TYPE(DoWhileStatement)
 
   void Initialize(Expression* cond, Statement* body) {
@@ -477,7 +729,16 @@
   virtual int StackCheckId() const { return back_edge_id_; }
   int BackEdgeId() const { return back_edge_id_; }
 
-  virtual bool IsInlineable() const;
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
+      : IterationStatement(isolate, labels),
+        cond_(NULL),
+        condition_position_(-1),
+        continue_id_(GetNextId(isolate)),
+        back_edge_id_(GetNextId(isolate)) {
+  }
 
  private:
   Expression* cond_;
@@ -489,8 +750,6 @@
 
 class WhileStatement: public IterationStatement {
  public:
-  inline WhileStatement(Isolate* isolate, ZoneStringList* labels);
-
   DECLARE_NODE_TYPE(WhileStatement)
 
   void Initialize(Expression* cond, Statement* body) {
@@ -505,13 +764,22 @@
   void set_may_have_function_literal(bool value) {
     may_have_function_literal_ = value;
   }
-  virtual bool IsInlineable() const;
 
   // Bailout support.
   virtual int ContinueId() const { return EntryId(); }
   virtual int StackCheckId() const { return body_id_; }
   int BodyId() const { return body_id_; }
 
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  WhileStatement(Isolate* isolate, ZoneStringList* labels)
+      : IterationStatement(isolate, labels),
+        cond_(NULL),
+        may_have_function_literal_(true),
+        body_id_(GetNextId(isolate)) {
+  }
+
  private:
   Expression* cond_;
   // True if there is a function literal subexpression in the condition.
@@ -522,8 +790,6 @@
 
 class ForStatement: public IterationStatement {
  public:
-  inline ForStatement(Isolate* isolate, ZoneStringList* labels);
-
   DECLARE_NODE_TYPE(ForStatement)
 
   void Initialize(Statement* init,
@@ -555,7 +821,20 @@
   bool is_fast_smi_loop() { return loop_variable_ != NULL; }
   Variable* loop_variable() { return loop_variable_; }
   void set_loop_variable(Variable* var) { loop_variable_ = var; }
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  ForStatement(Isolate* isolate, ZoneStringList* labels)
+      : IterationStatement(isolate, labels),
+        init_(NULL),
+        cond_(NULL),
+        next_(NULL),
+        may_have_function_literal_(true),
+        loop_variable_(NULL),
+        continue_id_(GetNextId(isolate)),
+        body_id_(GetNextId(isolate)) {
+  }
 
  private:
   Statement* init_;
@@ -571,8 +850,6 @@
 
 class ForInStatement: public IterationStatement {
  public:
-  inline ForInStatement(Isolate* isolate, ZoneStringList* labels);
-
   DECLARE_NODE_TYPE(ForInStatement)
 
   void Initialize(Expression* each, Expression* enumerable, Statement* body) {
@@ -583,35 +860,44 @@
 
   Expression* each() const { return each_; }
   Expression* enumerable() const { return enumerable_; }
-  virtual bool IsInlineable() const;
 
-  // Bailout support.
-  int AssignmentId() const { return assignment_id_; }
   virtual int ContinueId() const { return EntryId(); }
-  virtual int StackCheckId() const { return EntryId(); }
+  virtual int StackCheckId() const { return body_id_; }
+  int BodyId() const { return body_id_; }
+  int PrepareId() const { return prepare_id_; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  ForInStatement(Isolate* isolate, ZoneStringList* labels)
+      : IterationStatement(isolate, labels),
+        each_(NULL),
+        enumerable_(NULL),
+        body_id_(GetNextId(isolate)),
+        prepare_id_(GetNextId(isolate)) {
+  }
 
  private:
   Expression* each_;
   Expression* enumerable_;
-  int assignment_id_;
+  int body_id_;
+  int prepare_id_;
 };
 
 
 class ExpressionStatement: public Statement {
  public:
-  explicit ExpressionStatement(Expression* expression)
-      : expression_(expression) { }
-
   DECLARE_NODE_TYPE(ExpressionStatement)
 
-  virtual bool IsInlineable() const;
-
-  virtual Assignment* StatementAsSimpleAssignment();
-  virtual CountOperation* StatementAsCountOperation();
-
   void set_expression(Expression* e) { expression_ = e; }
   Expression* expression() const { return expression_; }
 
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  explicit ExpressionStatement(Expression* expression)
+      : expression_(expression) { }
+
  private:
   Expression* expression_;
 };
@@ -619,13 +905,15 @@
 
 class ContinueStatement: public Statement {
  public:
-  explicit ContinueStatement(IterationStatement* target)
-      : target_(target) { }
-
   DECLARE_NODE_TYPE(ContinueStatement)
 
   IterationStatement* target() const { return target_; }
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  explicit ContinueStatement(IterationStatement* target)
+      : target_(target) { }
 
  private:
   IterationStatement* target_;
@@ -634,13 +922,15 @@
 
 class BreakStatement: public Statement {
  public:
-  explicit BreakStatement(BreakableStatement* target)
-      : target_(target) { }
-
   DECLARE_NODE_TYPE(BreakStatement)
 
   BreakableStatement* target() const { return target_; }
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  explicit BreakStatement(BreakableStatement* target)
+      : target_(target) { }
 
  private:
   BreakableStatement* target_;
@@ -649,13 +939,15 @@
 
 class ReturnStatement: public Statement {
  public:
-  explicit ReturnStatement(Expression* expression)
-      : expression_(expression) { }
-
   DECLARE_NODE_TYPE(ReturnStatement)
 
   Expression* expression() const { return expression_; }
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  explicit ReturnStatement(Expression* expression)
+      : expression_(expression) { }
 
  private:
   Expression* expression_;
@@ -664,15 +956,17 @@
 
 class WithStatement: public Statement {
  public:
-  WithStatement(Expression* expression, Statement* statement)
-      : expression_(expression), statement_(statement) { }
-
   DECLARE_NODE_TYPE(WithStatement)
 
   Expression* expression() const { return expression_; }
   Statement* statement() const { return statement_; }
 
-  virtual bool IsInlineable() const;
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  WithStatement(Expression* expression, Statement* statement)
+      : expression_(expression),
+        statement_(statement) { }
 
  private:
   Expression* expression_;
@@ -704,6 +998,8 @@
   // Type feedback information.
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+  bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; }
+  bool IsStringCompare() { return compare_type_ == STRING_ONLY; }
   bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
 
  private:
@@ -711,7 +1007,13 @@
   Label body_target_;
   ZoneList<Statement*>* statements_;
   int position_;
-  enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
+  enum CompareTypeFeedback {
+    NONE,
+    SMI_ONLY,
+    SYMBOL_ONLY,
+    STRING_ONLY,
+    OBJECT_ONLY
+  };
   CompareTypeFeedback compare_type_;
   int compare_id_;
   int entry_id_;
@@ -720,8 +1022,6 @@
 
 class SwitchStatement: public BreakableStatement {
  public:
-  inline SwitchStatement(Isolate* isolate, ZoneStringList* labels);
-
   DECLARE_NODE_TYPE(SwitchStatement)
 
   void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
@@ -731,7 +1031,14 @@
 
   Expression* tag() const { return tag_; }
   ZoneList<CaseClause*>* cases() const { return cases_; }
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  SwitchStatement(Isolate* isolate, ZoneStringList* labels)
+      : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+        tag_(NULL),
+        cases_(NULL) { }
 
  private:
   Expression* tag_;
@@ -746,22 +1053,8 @@
 // given if-statement has a then- or an else-part containing code.
 class IfStatement: public Statement {
  public:
-  IfStatement(Isolate* isolate,
-              Expression* condition,
-              Statement* then_statement,
-              Statement* else_statement)
-      : condition_(condition),
-        then_statement_(then_statement),
-        else_statement_(else_statement),
-        if_id_(GetNextId(isolate)),
-        then_id_(GetNextId(isolate)),
-        else_id_(GetNextId(isolate)) {
-  }
-
   DECLARE_NODE_TYPE(IfStatement)
 
-  virtual bool IsInlineable() const;
-
   bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
   bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
 
@@ -773,6 +1066,21 @@
   int ThenId() const { return then_id_; }
   int ElseId() const { return else_id_; }
 
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  IfStatement(Isolate* isolate,
+              Expression* condition,
+              Statement* then_statement,
+              Statement* else_statement)
+      : condition_(condition),
+        then_statement_(then_statement),
+        else_statement_(else_statement),
+        if_id_(GetNextId(isolate)),
+        then_id_(GetNextId(isolate)),
+        else_id_(GetNextId(isolate)) {
+  }
+
  private:
   Expression* condition_;
   Statement* then_statement_;
@@ -787,7 +1095,7 @@
 // stack in the compiler; this should probably be reworked.
 class TargetCollector: public AstNode {
  public:
-  TargetCollector(): targets_(0) { }
+  TargetCollector() : targets_(0) { }
 
   // Adds a jump target to the collector. The collector stores a pointer not
   // a copy of the target to make binding work, so make sure not to pass in
@@ -799,7 +1107,6 @@
   virtual TargetCollector* AsTargetCollector() { return this; }
 
   ZoneList<Label*>* targets() { return &targets_; }
-  virtual bool IsInlineable() const;
 
  private:
   ZoneList<Label*> targets_;
@@ -808,18 +1115,24 @@
 
 class TryStatement: public Statement {
  public:
-  explicit TryStatement(Block* try_block)
-      : try_block_(try_block), escaping_targets_(NULL) { }
-
   void set_escaping_targets(ZoneList<Label*>* targets) {
     escaping_targets_ = targets;
   }
 
+  int index() const { return index_; }
   Block* try_block() const { return try_block_; }
   ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
-  virtual bool IsInlineable() const;
+
+ protected:
+  TryStatement(int index, Block* try_block)
+      : index_(index),
+        try_block_(try_block),
+        escaping_targets_(NULL) { }
 
  private:
+  // Unique (per-function) index of this handler.  This is not an AST ID.
+  int index_;
+
   Block* try_block_;
   ZoneList<Label*>* escaping_targets_;
 };
@@ -827,22 +1140,25 @@
 
 class TryCatchStatement: public TryStatement {
  public:
-  TryCatchStatement(Block* try_block,
-                    Scope* scope,
-                    Variable* variable,
-                    Block* catch_block)
-      : TryStatement(try_block),
-        scope_(scope),
-        variable_(variable),
-        catch_block_(catch_block) {
-  }
-
   DECLARE_NODE_TYPE(TryCatchStatement)
 
   Scope* scope() { return scope_; }
   Variable* variable() { return variable_; }
   Block* catch_block() const { return catch_block_; }
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  TryCatchStatement(int index,
+                    Block* try_block,
+                    Scope* scope,
+                    Variable* variable,
+                    Block* catch_block)
+      : TryStatement(index, try_block),
+        scope_(scope),
+        variable_(variable),
+        catch_block_(catch_block) {
+  }
 
  private:
   Scope* scope_;
@@ -853,14 +1169,16 @@
 
 class TryFinallyStatement: public TryStatement {
  public:
-  TryFinallyStatement(Block* try_block, Block* finally_block)
-      : TryStatement(try_block),
-        finally_block_(finally_block) { }
-
   DECLARE_NODE_TYPE(TryFinallyStatement)
 
   Block* finally_block() const { return finally_block_; }
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  TryFinallyStatement(int index, Block* try_block, Block* finally_block)
+      : TryStatement(index, try_block),
+        finally_block_(finally_block) { }
 
  private:
   Block* finally_block_;
@@ -870,7 +1188,11 @@
 class DebuggerStatement: public Statement {
  public:
   DECLARE_NODE_TYPE(DebuggerStatement)
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  DebuggerStatement() {}
 };
 
 
@@ -878,25 +1200,17 @@
  public:
   DECLARE_NODE_TYPE(EmptyStatement)
 
-  virtual bool IsInlineable() const;
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  EmptyStatement() {}
 };
 
 
 class Literal: public Expression {
  public:
-  Literal(Isolate* isolate, Handle<Object> handle)
-      : Expression(isolate), handle_(handle) { }
-
   DECLARE_NODE_TYPE(Literal)
 
-  virtual bool IsTrivial() { return true; }
-  virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
-
-  // Check if this literal is identical to the other literal.
-  bool IsIdenticalTo(const Literal* other) const {
-    return handle_.is_identical_to(other->handle_);
-  }
-
   virtual bool IsPropertyName() {
     if (handle_->IsSymbol()) {
       uint32_t ignored;
@@ -928,9 +1242,27 @@
   }
 
   Handle<Object> handle() const { return handle_; }
-  virtual bool IsInlineable() const;
+
+  // Support for using Literal as a HashMap key. NOTE: Currently, this works
+  // only for string and number literals!
+  uint32_t Hash() { return ToString()->Hash(); }
+
+  static bool Match(void* literal1, void* literal2) {
+    Handle<String> s1 = static_cast<Literal*>(literal1)->ToString();
+    Handle<String> s2 = static_cast<Literal*>(literal2)->ToString();
+    return s1->Equals(*s2);
+  }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  Literal(Isolate* isolate, Handle<Object> handle)
+      : Expression(isolate),
+        handle_(handle) { }
 
  private:
+  Handle<String> ToString();
+
   Handle<Object> handle_;
 };
 
@@ -938,15 +1270,6 @@
 // Base class for literals that needs space in the corresponding JSFunction.
 class MaterializedLiteral: public Expression {
  public:
-  MaterializedLiteral(Isolate* isolate,
-                      int literal_index,
-                      bool is_simple,
-                      int depth)
-      : Expression(isolate),
-        literal_index_(literal_index),
-        is_simple_(is_simple),
-        depth_(depth) {}
-
   virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
 
   int literal_index() { return literal_index_; }
@@ -956,7 +1279,16 @@
   bool is_simple() const { return is_simple_; }
 
   int depth() const { return depth_; }
-  virtual bool IsInlineable() const;
+
+ protected:
+  MaterializedLiteral(Isolate* isolate,
+                      int literal_index,
+                      bool is_simple,
+                      int depth)
+      : Expression(isolate),
+        literal_index_(literal_index),
+        is_simple_(is_simple),
+        depth_(depth) {}
 
  private:
   int literal_index_;
@@ -982,39 +1314,36 @@
       PROTOTYPE              // Property is __proto__.
     };
 
-    Property(Literal* key, Expression* value);
-    Property(bool is_getter, FunctionLiteral* value);
+    Property(Literal* key, Expression* value, Isolate* isolate);
 
     Literal* key() { return key_; }
     Expression* value() { return value_; }
     Kind kind() { return kind_; }
 
+    // Type feedback information.
+    void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+    bool IsMonomorphic() { return !receiver_type_.is_null(); }
+    Handle<Map> GetReceiverType() { return receiver_type_; }
+
     bool IsCompileTimeValue();
 
     void set_emit_store(bool emit_store);
     bool emit_store();
 
+   protected:
+    template<class> friend class AstNodeFactory;
+
+    Property(bool is_getter, FunctionLiteral* value);
+    void set_key(Literal* key) { key_ = key; }
+
    private:
     Literal* key_;
     Expression* value_;
     Kind kind_;
     bool emit_store_;
+    Handle<Map> receiver_type_;
   };
 
-  ObjectLiteral(Isolate* isolate,
-                Handle<FixedArray> constant_properties,
-                ZoneList<Property*>* properties,
-                int literal_index,
-                bool is_simple,
-                bool fast_elements,
-                int depth,
-                bool has_function)
-      : MaterializedLiteral(isolate, literal_index, is_simple, depth),
-        constant_properties_(constant_properties),
-        properties_(properties),
-        fast_elements_(fast_elements),
-        has_function_(has_function) {}
-
   DECLARE_NODE_TYPE(ObjectLiteral)
 
   Handle<FixedArray> constant_properties() const {
@@ -1037,6 +1366,29 @@
     kHasFunction = 1 << 1
   };
 
+  struct Accessors: public ZoneObject {
+    Accessors() : getter(NULL), setter(NULL) { }
+    Expression* getter;
+    Expression* setter;
+  };
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  ObjectLiteral(Isolate* isolate,
+                Handle<FixedArray> constant_properties,
+                ZoneList<Property*>* properties,
+                int literal_index,
+                bool is_simple,
+                bool fast_elements,
+                int depth,
+                bool has_function)
+      : MaterializedLiteral(isolate, literal_index, is_simple, depth),
+        constant_properties_(constant_properties),
+        properties_(properties),
+        fast_elements_(fast_elements),
+        has_function_(has_function) {}
+
  private:
   Handle<FixedArray> constant_properties_;
   ZoneList<Property*>* properties_;
@@ -1048,6 +1400,14 @@
 // Node for capturing a regexp literal.
 class RegExpLiteral: public MaterializedLiteral {
  public:
+  DECLARE_NODE_TYPE(RegExpLiteral)
+
+  Handle<String> pattern() const { return pattern_; }
+  Handle<String> flags() const { return flags_; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
   RegExpLiteral(Isolate* isolate,
                 Handle<String> pattern,
                 Handle<String> flags,
@@ -1056,11 +1416,6 @@
         pattern_(pattern),
         flags_(flags) {}
 
-  DECLARE_NODE_TYPE(RegExpLiteral)
-
-  Handle<String> pattern() const { return pattern_; }
-  Handle<String> flags() const { return flags_; }
-
  private:
   Handle<String> pattern_;
   Handle<String> flags_;
@@ -1070,6 +1425,17 @@
 // for minimizing the work when constructing it at runtime.
 class ArrayLiteral: public MaterializedLiteral {
  public:
+  DECLARE_NODE_TYPE(ArrayLiteral)
+
+  Handle<FixedArray> constant_elements() const { return constant_elements_; }
+  ZoneList<Expression*>* values() const { return values_; }
+
+  // Return an AST id for an element that is used in simulate instructions.
+  int GetIdForElement(int i) { return first_element_id_ + i; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
   ArrayLiteral(Isolate* isolate,
                Handle<FixedArray> constant_elements,
                ZoneList<Expression*>* values,
@@ -1081,14 +1447,6 @@
         values_(values),
         first_element_id_(ReserveIdRange(isolate, values->length())) {}
 
-  DECLARE_NODE_TYPE(ArrayLiteral)
-
-  Handle<FixedArray> constant_elements() const { return constant_elements_; }
-  ZoneList<Expression*>* values() const { return values_; }
-
-  // Return an AST id for an element that is used in simulate instructions.
-  int GetIdForElement(int i) { return first_element_id_ + i; }
-
  private:
   Handle<FixedArray> constant_elements_;
   ZoneList<Expression*>* values_;
@@ -1098,77 +1456,63 @@
 
 class VariableProxy: public Expression {
  public:
-  VariableProxy(Isolate* isolate, Variable* var);
-
   DECLARE_NODE_TYPE(VariableProxy)
 
   virtual bool IsValidLeftHandSide() {
     return var_ == NULL ? true : var_->IsValidLeftHandSide();
   }
 
-  virtual bool IsTrivial() {
-    // Reading from a mutable variable is a side effect, but the
-    // variable for 'this' is immutable.
-    return is_this_ || is_trivial_;
-  }
-
-  virtual bool IsInlineable() const;
-
   bool IsVariable(Handle<String> n) {
     return !is_this() && name().is_identical_to(n);
   }
 
   bool IsArguments() { return var_ != NULL && var_->is_arguments(); }
 
+  bool IsLValue() {
+    return is_lvalue_;
+  }
+
   Handle<String> name() const { return name_; }
   Variable* var() const { return var_; }
   bool is_this() const { return is_this_; }
-  bool inside_with() const { return inside_with_; }
   int position() const { return position_; }
+  Interface* interface() const { return interface_; }
+
 
   void MarkAsTrivial() { is_trivial_ = true; }
+  void MarkAsLValue() { is_lvalue_ = true; }
 
   // Bind this proxy to the variable var.
   void BindTo(Variable* var);
 
  protected:
-  Handle<String> name_;
-  Variable* var_;  // resolved variable, or NULL
-  bool is_this_;
-  bool inside_with_;
-  bool is_trivial_;
-  int position_;
+  template<class> friend class AstNodeFactory;
+
+  VariableProxy(Isolate* isolate, Variable* var);
 
   VariableProxy(Isolate* isolate,
                 Handle<String> name,
                 bool is_this,
-                bool inside_with,
-                int position = RelocInfo::kNoPosition);
+                int position,
+                Interface* interface);
 
-  friend class Scope;
+  Handle<String> name_;
+  Variable* var_;  // resolved variable, or NULL
+  bool is_this_;
+  bool is_trivial_;
+  // True if this variable proxy is being used in an assignment
+  // or with a increment/decrement operator.
+  bool is_lvalue_;
+  int position_;
+  Interface* interface_;
 };
 
 
 class Property: public Expression {
  public:
-  Property(Isolate* isolate,
-           Expression* obj,
-           Expression* key,
-           int pos)
-      : Expression(isolate),
-        obj_(obj),
-        key_(key),
-        pos_(pos),
-        is_monomorphic_(false),
-        is_array_length_(false),
-        is_string_length_(false),
-        is_string_access_(false),
-        is_function_prototype_(false) { }
-
   DECLARE_NODE_TYPE(Property)
 
   virtual bool IsValidLeftHandSide() { return true; }
-  virtual bool IsInlineable() const;
 
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
@@ -1182,7 +1526,26 @@
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
   virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
-  virtual bool IsArrayLength() { return is_array_length_; }
+  bool IsArrayLength() { return is_array_length_; }
+  bool IsUninitialized() { return is_uninitialized_; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  Property(Isolate* isolate,
+           Expression* obj,
+           Expression* key,
+           int pos)
+      : Expression(isolate),
+        obj_(obj),
+        key_(key),
+        pos_(pos),
+        is_monomorphic_(false),
+        is_uninitialized_(false),
+        is_array_length_(false),
+        is_string_length_(false),
+        is_string_access_(false),
+        is_function_prototype_(false) { }
 
  private:
   Expression* obj_;
@@ -1191,6 +1554,7 @@
 
   SmallMapList receiver_types_;
   bool is_monomorphic_ : 1;
+  bool is_uninitialized_ : 1;
   bool is_array_length_ : 1;
   bool is_string_length_ : 1;
   bool is_string_access_ : 1;
@@ -1200,23 +1564,8 @@
 
 class Call: public Expression {
  public:
-  Call(Isolate* isolate,
-       Expression* expression,
-       ZoneList<Expression*>* arguments,
-       int pos)
-      : Expression(isolate),
-        expression_(expression),
-        arguments_(arguments),
-        pos_(pos),
-        is_monomorphic_(false),
-        check_type_(RECEIVER_MAP_CHECK),
-        return_id_(GetNextId(isolate)) {
-  }
-
   DECLARE_NODE_TYPE(Call)
 
-  virtual bool IsInlineable() const;
-
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
   virtual int position() const { return pos_; }
@@ -1241,6 +1590,21 @@
   bool return_is_recorded_;
 #endif
 
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  Call(Isolate* isolate,
+       Expression* expression,
+       ZoneList<Expression*>* arguments,
+       int pos)
+      : Expression(isolate),
+        expression_(expression),
+        arguments_(arguments),
+        pos_(pos),
+        is_monomorphic_(false),
+        check_type_(RECEIVER_MAP_CHECK),
+        return_id_(GetNextId(isolate)) { }
+
  private:
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
@@ -1259,6 +1623,22 @@
 
 class CallNew: public Expression {
  public:
+  DECLARE_NODE_TYPE(CallNew)
+
+  Expression* expression() const { return expression_; }
+  ZoneList<Expression*>* arguments() const { return arguments_; }
+  virtual int position() const { return pos_; }
+
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  virtual bool IsMonomorphic() { return is_monomorphic_; }
+  Handle<JSFunction> target() { return target_; }
+
+  // Bailout support.
+  int ReturnId() const { return return_id_; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
   CallNew(Isolate* isolate,
           Expression* expression,
           ZoneList<Expression*>* arguments,
@@ -1266,20 +1646,19 @@
       : Expression(isolate),
         expression_(expression),
         arguments_(arguments),
-        pos_(pos) { }
-
-  DECLARE_NODE_TYPE(CallNew)
-
-  virtual bool IsInlineable() const;
-
-  Expression* expression() const { return expression_; }
-  ZoneList<Expression*>* arguments() const { return arguments_; }
-  virtual int position() const { return pos_; }
+        pos_(pos),
+        is_monomorphic_(false),
+        return_id_(GetNextId(isolate)) { }
 
  private:
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
   int pos_;
+
+  bool is_monomorphic_;
+  Handle<JSFunction> target_;
+
+  int return_id_;
 };
 
 
@@ -1289,6 +1668,16 @@
 // implemented in JavaScript (see "v8natives.js").
 class CallRuntime: public Expression {
  public:
+  DECLARE_NODE_TYPE(CallRuntime)
+
+  Handle<String> name() const { return name_; }
+  const Runtime::Function* function() const { return function_; }
+  ZoneList<Expression*>* arguments() const { return arguments_; }
+  bool is_jsruntime() const { return function_ == NULL; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
   CallRuntime(Isolate* isolate,
               Handle<String> name,
               const Runtime::Function* function,
@@ -1298,15 +1687,6 @@
         function_(function),
         arguments_(arguments) { }
 
-  DECLARE_NODE_TYPE(CallRuntime)
-
-  virtual bool IsInlineable() const;
-
-  Handle<String> name() const { return name_; }
-  const Runtime::Function* function() const { return function_; }
-  ZoneList<Expression*>* arguments() const { return arguments_; }
-  bool is_jsruntime() const { return function_ == NULL; }
-
  private:
   Handle<String> name_;
   const Runtime::Function* function_;
@@ -1316,49 +1696,53 @@
 
 class UnaryOperation: public Expression {
  public:
-  UnaryOperation(Isolate* isolate,
-                 Token::Value op,
-                 Expression* expression,
-                 int pos)
-      : Expression(isolate), op_(op), expression_(expression), pos_(pos) {
-    ASSERT(Token::IsUnaryOp(op));
-  }
-
   DECLARE_NODE_TYPE(UnaryOperation)
 
-  virtual bool IsInlineable() const;
-
   virtual bool ResultOverwriteAllowed();
 
   Token::Value op() const { return op_; }
   Expression* expression() const { return expression_; }
   virtual int position() const { return pos_; }
 
+  int MaterializeTrueId() { return materialize_true_id_; }
+  int MaterializeFalseId() { return materialize_false_id_; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  UnaryOperation(Isolate* isolate,
+                 Token::Value op,
+                 Expression* expression,
+                 int pos)
+      : Expression(isolate),
+        op_(op),
+        expression_(expression),
+        pos_(pos),
+        materialize_true_id_(AstNode::kNoNumber),
+        materialize_false_id_(AstNode::kNoNumber) {
+    ASSERT(Token::IsUnaryOp(op));
+    if (op == Token::NOT) {
+      materialize_true_id_ = GetNextId(isolate);
+      materialize_false_id_ = GetNextId(isolate);
+    }
+  }
+
  private:
   Token::Value op_;
   Expression* expression_;
   int pos_;
+
+  // For unary not (Token::NOT), the AST ids where true and false will
+  // actually be materialized, respectively.
+  int materialize_true_id_;
+  int materialize_false_id_;
 };
 
 
 class BinaryOperation: public Expression {
  public:
-  BinaryOperation(Isolate* isolate,
-                  Token::Value op,
-                  Expression* left,
-                  Expression* right,
-                  int pos)
-      : Expression(isolate), op_(op), left_(left), right_(right), pos_(pos) {
-    ASSERT(Token::IsBinaryOp(op));
-    right_id_ = (op == Token::AND || op == Token::OR)
-        ? static_cast<int>(GetNextId(isolate))
-        : AstNode::kNoNumber;
-  }
-
   DECLARE_NODE_TYPE(BinaryOperation)
 
-  virtual bool IsInlineable() const;
-
   virtual bool ResultOverwriteAllowed();
 
   Token::Value op() const { return op_; }
@@ -1369,6 +1753,21 @@
   // Bailout support.
   int RightId() const { return right_id_; }
 
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  BinaryOperation(Isolate* isolate,
+                  Token::Value op,
+                  Expression* left,
+                  Expression* right,
+                  int pos)
+      : Expression(isolate), op_(op), left_(left), right_(right), pos_(pos) {
+    ASSERT(Token::IsBinaryOp(op));
+    right_id_ = (op == Token::AND || op == Token::OR)
+        ? GetNextId(isolate)
+        : AstNode::kNoNumber;
+  }
+
  private:
   Token::Value op_;
   Expression* left_;
@@ -1382,19 +1781,6 @@
 
 class CountOperation: public Expression {
  public:
-  CountOperation(Isolate* isolate,
-                 Token::Value op,
-                 bool is_prefix,
-                 Expression* expr,
-                 int pos)
-      : Expression(isolate),
-        op_(op),
-        is_prefix_(is_prefix),
-        expression_(expr),
-        pos_(pos),
-        assignment_id_(GetNextId(isolate)),
-        count_id_(GetNextId(isolate)) {}
-
   DECLARE_NODE_TYPE(CountOperation)
 
   bool is_prefix() const { return is_prefix_; }
@@ -1410,8 +1796,6 @@
 
   virtual void MarkAsStatement() { is_prefix_ = true; }
 
-  virtual bool IsInlineable() const;
-
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
   virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
@@ -1420,6 +1804,22 @@
   int AssignmentId() const { return assignment_id_; }
   int CountId() const { return count_id_; }
 
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  CountOperation(Isolate* isolate,
+                 Token::Value op,
+                 bool is_prefix,
+                 Expression* expr,
+                 int pos)
+      : Expression(isolate),
+        op_(op),
+        is_prefix_(is_prefix),
+        expression_(expr),
+        pos_(pos),
+        assignment_id_(GetNextId(isolate)),
+        count_id_(GetNextId(isolate)) {}
+
  private:
   Token::Value op_;
   bool is_prefix_;
@@ -1434,6 +1834,26 @@
 
 class CompareOperation: public Expression {
  public:
+  DECLARE_NODE_TYPE(CompareOperation)
+
+  Token::Value op() const { return op_; }
+  Expression* left() const { return left_; }
+  Expression* right() const { return right_; }
+  virtual int position() const { return pos_; }
+
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+  bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+
+  // Match special cases.
+  bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
+  bool IsLiteralCompareUndefined(Expression** expr);
+  bool IsLiteralCompareNull(Expression** expr);
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
   CompareOperation(Isolate* isolate,
                    Token::Value op,
                    Expression* left,
@@ -1448,24 +1868,6 @@
     ASSERT(Token::IsCompareOp(op));
   }
 
-  DECLARE_NODE_TYPE(CompareOperation)
-
-  Token::Value op() const { return op_; }
-  Expression* left() const { return left_; }
-  Expression* right() const { return right_; }
-  virtual int position() const { return pos_; }
-
-  virtual bool IsInlineable() const;
-
-  // Type feedback information.
-  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-  bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
-  bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
-
-  // Match special cases.
-  bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
-  bool IsLiteralCompareUndefined(Expression** expr);
-
  private:
   Token::Value op_;
   Expression* left_;
@@ -1477,27 +1879,23 @@
 };
 
 
-class CompareToNull: public Expression {
- public:
-  CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
-      : Expression(isolate), is_strict_(is_strict), expression_(expression) { }
-
-  DECLARE_NODE_TYPE(CompareToNull)
-
-  virtual bool IsInlineable() const;
-
-  bool is_strict() const { return is_strict_; }
-  Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
-  Expression* expression() const { return expression_; }
-
- private:
-  bool is_strict_;
-  Expression* expression_;
-};
-
-
 class Conditional: public Expression {
  public:
+  DECLARE_NODE_TYPE(Conditional)
+
+  Expression* condition() const { return condition_; }
+  Expression* then_expression() const { return then_expression_; }
+  Expression* else_expression() const { return else_expression_; }
+
+  int then_expression_position() const { return then_expression_position_; }
+  int else_expression_position() const { return else_expression_position_; }
+
+  int ThenId() const { return then_id_; }
+  int ElseId() const { return else_id_; }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
   Conditional(Isolate* isolate,
               Expression* condition,
               Expression* then_expression,
@@ -1511,22 +1909,7 @@
         then_expression_position_(then_expression_position),
         else_expression_position_(else_expression_position),
         then_id_(GetNextId(isolate)),
-        else_id_(GetNextId(isolate)) {
-  }
-
-  DECLARE_NODE_TYPE(Conditional)
-
-  virtual bool IsInlineable() const;
-
-  Expression* condition() const { return condition_; }
-  Expression* then_expression() const { return then_expression_; }
-  Expression* else_expression() const { return else_expression_; }
-
-  int then_expression_position() const { return then_expression_position_; }
-  int else_expression_position() const { return else_expression_position_; }
-
-  int ThenId() const { return then_id_; }
-  int ElseId() const { return else_id_; }
+        else_id_(GetNextId(isolate)) { }
 
  private:
   Expression* condition_;
@@ -1541,16 +1924,8 @@
 
 class Assignment: public Expression {
  public:
-  Assignment(Isolate* isolate,
-             Token::Value op,
-             Expression* target,
-             Expression* value,
-             int pos);
-
   DECLARE_NODE_TYPE(Assignment)
 
-  virtual bool IsInlineable() const;
-
   Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
 
   Token::Value binary_op() const;
@@ -1582,6 +1957,25 @@
   int CompoundLoadId() const { return compound_load_id_; }
   int AssignmentId() const { return assignment_id_; }
 
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  Assignment(Isolate* isolate,
+             Token::Value op,
+             Expression* target,
+             Expression* value,
+             int pos);
+
+  template<class Visitor>
+  void Init(Isolate* isolate, AstNodeFactory<Visitor>* factory) {
+    ASSERT(Token::IsAssignmentOp(op_));
+    if (is_compound()) {
+      binary_operation_ =
+          factory->NewBinaryOperation(binary_op(), target_, value_, pos_ + 1);
+      compound_load_id_ = GetNextId(isolate);
+    }
+  }
+
  private:
   Token::Value op_;
   Expression* target_;
@@ -1601,14 +1995,16 @@
 
 class Throw: public Expression {
  public:
-  Throw(Isolate* isolate, Expression* exception, int pos)
-      : Expression(isolate), exception_(exception), pos_(pos) {}
-
   DECLARE_NODE_TYPE(Throw)
 
   Expression* exception() const { return exception_; }
   virtual int position() const { return pos_; }
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  Throw(Isolate* isolate, Expression* exception, int pos)
+      : Expression(isolate), exception_(exception), pos_(pos) {}
 
  private:
   Expression* exception_;
@@ -1624,38 +2020,15 @@
     DECLARATION
   };
 
-  FunctionLiteral(Isolate* isolate,
-                  Handle<String> name,
-                  Scope* scope,
-                  ZoneList<Statement*>* body,
-                  int materialized_literal_count,
-                  int expected_property_count,
-                  bool has_only_simple_this_property_assignments,
-                  Handle<FixedArray> this_property_assignments,
-                  int num_parameters,
-                  int start_position,
-                  int end_position,
-                  Type type,
-                  bool has_duplicate_parameters)
-      : Expression(isolate),
-        name_(name),
-        scope_(scope),
-        body_(body),
-        materialized_literal_count_(materialized_literal_count),
-        expected_property_count_(expected_property_count),
-        has_only_simple_this_property_assignments_(
-            has_only_simple_this_property_assignments),
-        this_property_assignments_(this_property_assignments),
-        num_parameters_(num_parameters),
-        start_position_(start_position),
-        end_position_(end_position),
-        function_token_position_(RelocInfo::kNoPosition),
-        inferred_name_(HEAP->empty_string()),
-        is_expression_(type != DECLARATION),
-        is_anonymous_(type == ANONYMOUS_EXPRESSION),
-        pretenure_(false),
-        has_duplicate_parameters_(has_duplicate_parameters) {
-  }
+  enum ParameterFlag {
+    kNoDuplicateParameters = 0,
+    kHasDuplicateParameters = 1
+  };
+
+  enum IsFunctionFlag {
+    kGlobalOrEval,
+    kIsFunction
+  };
 
   DECLARE_NODE_TYPE(FunctionLiteral)
 
@@ -1664,21 +2037,24 @@
   ZoneList<Statement*>* body() const { return body_; }
   void set_function_token_position(int pos) { function_token_position_ = pos; }
   int function_token_position() const { return function_token_position_; }
-  int start_position() const { return start_position_; }
-  int end_position() const { return end_position_; }
-  bool is_expression() const { return is_expression_; }
-  bool is_anonymous() const { return is_anonymous_; }
-  bool strict_mode() const;
+  int start_position() const;
+  int end_position() const;
+  int SourceSize() const { return end_position() - start_position(); }
+  bool is_expression() const { return IsExpression::decode(bitfield_); }
+  bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
+  bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
+  LanguageMode language_mode() const;
 
   int materialized_literal_count() { return materialized_literal_count_; }
   int expected_property_count() { return expected_property_count_; }
+  int handler_count() { return handler_count_; }
   bool has_only_simple_this_property_assignments() {
-      return has_only_simple_this_property_assignments_;
+    return HasOnlySimpleThisPropertyAssignments::decode(bitfield_);
   }
   Handle<FixedArray> this_property_assignments() {
       return this_property_assignments_;
   }
-  int num_parameters() { return num_parameters_; }
+  int parameter_count() { return parameter_count_; }
 
   bool AllowsLazyCompilation();
 
@@ -1692,45 +2068,98 @@
     inferred_name_ = inferred_name;
   }
 
-  bool pretenure() { return pretenure_; }
-  void set_pretenure(bool value) { pretenure_ = value; }
-  virtual bool IsInlineable() const;
+  bool pretenure() { return Pretenure::decode(bitfield_); }
+  void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
 
-  bool has_duplicate_parameters() { return has_duplicate_parameters_; }
+  bool has_duplicate_parameters() {
+    return HasDuplicateParameters::decode(bitfield_);
+  }
+
+  bool is_function() { return IsFunction::decode(bitfield_) == kIsFunction; }
+
+  int ast_node_count() { return ast_properties_.node_count(); }
+  AstProperties::Flags* flags() { return ast_properties_.flags(); }
+  void set_ast_properties(AstProperties* ast_properties) {
+    ast_properties_ = *ast_properties;
+  }
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  FunctionLiteral(Isolate* isolate,
+                  Handle<String> name,
+                  Scope* scope,
+                  ZoneList<Statement*>* body,
+                  int materialized_literal_count,
+                  int expected_property_count,
+                  int handler_count,
+                  bool has_only_simple_this_property_assignments,
+                  Handle<FixedArray> this_property_assignments,
+                  int parameter_count,
+                  Type type,
+                  ParameterFlag has_duplicate_parameters,
+                  IsFunctionFlag is_function)
+      : Expression(isolate),
+        name_(name),
+        scope_(scope),
+        body_(body),
+        this_property_assignments_(this_property_assignments),
+        inferred_name_(isolate->factory()->empty_string()),
+        materialized_literal_count_(materialized_literal_count),
+        expected_property_count_(expected_property_count),
+        handler_count_(handler_count),
+        parameter_count_(parameter_count),
+        function_token_position_(RelocInfo::kNoPosition) {
+    bitfield_ =
+        HasOnlySimpleThisPropertyAssignments::encode(
+            has_only_simple_this_property_assignments) |
+        IsExpression::encode(type != DECLARATION) |
+        IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
+        Pretenure::encode(false) |
+        HasDuplicateParameters::encode(has_duplicate_parameters) |
+        IsFunction::encode(is_function);
+  }
 
  private:
   Handle<String> name_;
   Scope* scope_;
   ZoneList<Statement*>* body_;
+  Handle<FixedArray> this_property_assignments_;
+  Handle<String> inferred_name_;
+  AstProperties ast_properties_;
+
   int materialized_literal_count_;
   int expected_property_count_;
-  bool has_only_simple_this_property_assignments_;
-  Handle<FixedArray> this_property_assignments_;
-  int num_parameters_;
-  int start_position_;
-  int end_position_;
+  int handler_count_;
+  int parameter_count_;
   int function_token_position_;
-  Handle<String> inferred_name_;
-  bool is_expression_;
-  bool is_anonymous_;
-  bool pretenure_;
-  bool has_duplicate_parameters_;
+
+  unsigned bitfield_;
+  class HasOnlySimpleThisPropertyAssignments: public BitField<bool, 0, 1> {};
+  class IsExpression: public BitField<bool, 1, 1> {};
+  class IsAnonymous: public BitField<bool, 2, 1> {};
+  class Pretenure: public BitField<bool, 3, 1> {};
+  class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {};
+  class IsFunction: public BitField<IsFunctionFlag, 5, 1> {};
 };
 
 
 class SharedFunctionInfoLiteral: public Expression {
  public:
-  SharedFunctionInfoLiteral(
-      Isolate* isolate,
-      Handle<SharedFunctionInfo> shared_function_info)
-      : Expression(isolate), shared_function_info_(shared_function_info) { }
-
   DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
 
   Handle<SharedFunctionInfo> shared_function_info() const {
     return shared_function_info_;
   }
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  SharedFunctionInfoLiteral(
+      Isolate* isolate,
+      Handle<SharedFunctionInfo> shared_function_info)
+      : Expression(isolate),
+        shared_function_info_(shared_function_info) { }
 
  private:
   Handle<SharedFunctionInfo> shared_function_info_;
@@ -1739,11 +2168,16 @@
 
 class ThisFunction: public Expression {
  public:
-  explicit ThisFunction(Isolate* isolate) : Expression(isolate) {}
   DECLARE_NODE_TYPE(ThisFunction)
-  virtual bool IsInlineable() const;
+
+ protected:
+  template<class> friend class AstNodeFactory;
+
+  explicit ThisFunction(Isolate* isolate): Expression(isolate) {}
 };
 
+#undef DECLARE_NODE_TYPE
+
 
 // ----------------------------------------------------------------------------
 // Regular expressions
@@ -2096,13 +2530,23 @@
   virtual bool IsEmpty();
   virtual int min_match() { return 0; }
   virtual int max_match() { return 0; }
-  static RegExpEmpty* GetInstance() { return &kInstance; }
- private:
-  static RegExpEmpty kInstance;
+  static RegExpEmpty* GetInstance() {
+    static RegExpEmpty* instance = ::new RegExpEmpty();
+    return instance;
+  }
 };
 
 
 // ----------------------------------------------------------------------------
+// Out-of-line inline constructors (to side-step cyclic dependencies).
+
+inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
+    : Module(proxy->interface()),
+      proxy_(proxy) {
+}
+
+
+// ----------------------------------------------------------------------------
 // Basic visitor
 // - leaf node visitors are abstract.
 
@@ -2144,6 +2588,397 @@
 };
 
 
+// ----------------------------------------------------------------------------
+// Construction time visitor.
+
+class AstConstructionVisitor BASE_EMBEDDED {
+ public:
+  AstConstructionVisitor() { }
+
+  AstProperties* ast_properties() { return &properties_; }
+
+ private:
+  template<class> friend class AstNodeFactory;
+
+  // Node visitors.
+#define DEF_VISIT(type) \
+  void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+  void increase_node_count() { properties_.add_node_count(1); }
+  void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
+
+  AstProperties properties_;
+};
+
+
+class AstNullVisitor BASE_EMBEDDED {
+ public:
+  // Node visitors.
+#define DEF_VISIT(type) \
+  void Visit##type(type* node) {}
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+};
+
+
+
+// ----------------------------------------------------------------------------
+// AstNode factory
+
+template<class Visitor>
+class AstNodeFactory BASE_EMBEDDED {
+ public:
+  explicit AstNodeFactory(Isolate* isolate)
+      : isolate_(isolate),
+        zone_(isolate_->zone()) { }
+
+  Visitor* visitor() { return &visitor_; }
+
+#define VISIT_AND_RETURN(NodeType, node) \
+  visitor_.Visit##NodeType((node)); \
+  return node;
+
+  VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
+                                              VariableMode mode,
+                                              Scope* scope) {
+    VariableDeclaration* decl =
+        new(zone_) VariableDeclaration(proxy, mode, scope);
+    VISIT_AND_RETURN(VariableDeclaration, decl)
+  }
+
+  FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
+                                              VariableMode mode,
+                                              FunctionLiteral* fun,
+                                              Scope* scope) {
+    FunctionDeclaration* decl =
+        new(zone_) FunctionDeclaration(proxy, mode, fun, scope);
+    VISIT_AND_RETURN(FunctionDeclaration, decl)
+  }
+
+  ModuleDeclaration* NewModuleDeclaration(VariableProxy* proxy,
+                                          Module* module,
+                                          Scope* scope) {
+    ModuleDeclaration* decl =
+        new(zone_) ModuleDeclaration(proxy, module, scope);
+    VISIT_AND_RETURN(ModuleDeclaration, decl)
+  }
+
+  ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
+                                          Module* module,
+                                          Scope* scope) {
+    ImportDeclaration* decl =
+        new(zone_) ImportDeclaration(proxy, module, scope);
+    VISIT_AND_RETURN(ImportDeclaration, decl)
+  }
+
+  ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
+                                          Scope* scope) {
+    ExportDeclaration* decl =
+        new(zone_) ExportDeclaration(proxy, scope);
+    VISIT_AND_RETURN(ExportDeclaration, decl)
+  }
+
+  ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface) {
+    ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface);
+    VISIT_AND_RETURN(ModuleLiteral, module)
+  }
+
+  ModuleVariable* NewModuleVariable(VariableProxy* proxy) {
+    ModuleVariable* module = new(zone_) ModuleVariable(proxy);
+    VISIT_AND_RETURN(ModuleVariable, module)
+  }
+
+  ModulePath* NewModulePath(Module* origin, Handle<String> name) {
+    ModulePath* module = new(zone_) ModulePath(origin, name);
+    VISIT_AND_RETURN(ModulePath, module)
+  }
+
+  ModuleUrl* NewModuleUrl(Handle<String> url) {
+    ModuleUrl* module = new(zone_) ModuleUrl(url);
+    VISIT_AND_RETURN(ModuleUrl, module)
+  }
+
+  Block* NewBlock(ZoneStringList* labels,
+                  int capacity,
+                  bool is_initializer_block) {
+    Block* block = new(zone_) Block(
+        isolate_, labels, capacity, is_initializer_block);
+    VISIT_AND_RETURN(Block, block)
+  }
+
+#define STATEMENT_WITH_LABELS(NodeType) \
+  NodeType* New##NodeType(ZoneStringList* labels) { \
+    NodeType* stmt = new(zone_) NodeType(isolate_, labels); \
+    VISIT_AND_RETURN(NodeType, stmt); \
+  }
+  STATEMENT_WITH_LABELS(DoWhileStatement)
+  STATEMENT_WITH_LABELS(WhileStatement)
+  STATEMENT_WITH_LABELS(ForStatement)
+  STATEMENT_WITH_LABELS(ForInStatement)
+  STATEMENT_WITH_LABELS(SwitchStatement)
+#undef STATEMENT_WITH_LABELS
+
+  ExpressionStatement* NewExpressionStatement(Expression* expression) {
+    ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
+    VISIT_AND_RETURN(ExpressionStatement, stmt)
+  }
+
+  ContinueStatement* NewContinueStatement(IterationStatement* target) {
+    ContinueStatement* stmt = new(zone_) ContinueStatement(target);
+    VISIT_AND_RETURN(ContinueStatement, stmt)
+  }
+
+  BreakStatement* NewBreakStatement(BreakableStatement* target) {
+    BreakStatement* stmt = new(zone_) BreakStatement(target);
+    VISIT_AND_RETURN(BreakStatement, stmt)
+  }
+
+  ReturnStatement* NewReturnStatement(Expression* expression) {
+    ReturnStatement* stmt = new(zone_) ReturnStatement(expression);
+    VISIT_AND_RETURN(ReturnStatement, stmt)
+  }
+
+  WithStatement* NewWithStatement(Expression* expression,
+                                  Statement* statement) {
+    WithStatement* stmt = new(zone_) WithStatement(expression, statement);
+    VISIT_AND_RETURN(WithStatement, stmt)
+  }
+
+  IfStatement* NewIfStatement(Expression* condition,
+                              Statement* then_statement,
+                              Statement* else_statement) {
+    IfStatement* stmt = new(zone_) IfStatement(
+        isolate_, condition, then_statement, else_statement);
+    VISIT_AND_RETURN(IfStatement, stmt)
+  }
+
+  TryCatchStatement* NewTryCatchStatement(int index,
+                                          Block* try_block,
+                                          Scope* scope,
+                                          Variable* variable,
+                                          Block* catch_block) {
+    TryCatchStatement* stmt = new(zone_) TryCatchStatement(
+        index, try_block, scope, variable, catch_block);
+    VISIT_AND_RETURN(TryCatchStatement, stmt)
+  }
+
+  TryFinallyStatement* NewTryFinallyStatement(int index,
+                                              Block* try_block,
+                                              Block* finally_block) {
+    TryFinallyStatement* stmt =
+        new(zone_) TryFinallyStatement(index, try_block, finally_block);
+    VISIT_AND_RETURN(TryFinallyStatement, stmt)
+  }
+
+  DebuggerStatement* NewDebuggerStatement() {
+    DebuggerStatement* stmt = new(zone_) DebuggerStatement();
+    VISIT_AND_RETURN(DebuggerStatement, stmt)
+  }
+
+  EmptyStatement* NewEmptyStatement() {
+    return new(zone_) EmptyStatement();
+  }
+
+  Literal* NewLiteral(Handle<Object> handle) {
+    Literal* lit = new(zone_) Literal(isolate_, handle);
+    VISIT_AND_RETURN(Literal, lit)
+  }
+
+  Literal* NewNumberLiteral(double number) {
+    return NewLiteral(isolate_->factory()->NewNumber(number, TENURED));
+  }
+
+  ObjectLiteral* NewObjectLiteral(
+      Handle<FixedArray> constant_properties,
+      ZoneList<ObjectLiteral::Property*>* properties,
+      int literal_index,
+      bool is_simple,
+      bool fast_elements,
+      int depth,
+      bool has_function) {
+    ObjectLiteral* lit = new(zone_) ObjectLiteral(
+        isolate_, constant_properties, properties, literal_index,
+        is_simple, fast_elements, depth, has_function);
+    VISIT_AND_RETURN(ObjectLiteral, lit)
+  }
+
+  ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
+                                                    FunctionLiteral* value) {
+    ObjectLiteral::Property* prop =
+        new(zone_) ObjectLiteral::Property(is_getter, value);
+    prop->set_key(NewLiteral(value->name()));
+    return prop;  // Not an AST node, will not be visited.
+  }
+
+  RegExpLiteral* NewRegExpLiteral(Handle<String> pattern,
+                                  Handle<String> flags,
+                                  int literal_index) {
+    RegExpLiteral* lit =
+        new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index);
+    VISIT_AND_RETURN(RegExpLiteral, lit);
+  }
+
+  ArrayLiteral* NewArrayLiteral(Handle<FixedArray> constant_elements,
+                                ZoneList<Expression*>* values,
+                                int literal_index,
+                                bool is_simple,
+                                int depth) {
+    ArrayLiteral* lit = new(zone_) ArrayLiteral(
+        isolate_, constant_elements, values, literal_index, is_simple, depth);
+    VISIT_AND_RETURN(ArrayLiteral, lit)
+  }
+
+  VariableProxy* NewVariableProxy(Variable* var) {
+    VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var);
+    VISIT_AND_RETURN(VariableProxy, proxy)
+  }
+
+  VariableProxy* NewVariableProxy(Handle<String> name,
+                                  bool is_this,
+                                  int position = RelocInfo::kNoPosition,
+                                  Interface* interface =
+                                      Interface::NewValue()) {
+    VariableProxy* proxy =
+        new(zone_) VariableProxy(isolate_, name, is_this, position, interface);
+    VISIT_AND_RETURN(VariableProxy, proxy)
+  }
+
+  Property* NewProperty(Expression* obj, Expression* key, int pos) {
+    Property* prop = new(zone_) Property(isolate_, obj, key, pos);
+    VISIT_AND_RETURN(Property, prop)
+  }
+
+  Call* NewCall(Expression* expression,
+                ZoneList<Expression*>* arguments,
+                int pos) {
+    Call* call = new(zone_) Call(isolate_, expression, arguments, pos);
+    VISIT_AND_RETURN(Call, call)
+  }
+
+  CallNew* NewCallNew(Expression* expression,
+                      ZoneList<Expression*>* arguments,
+                      int pos) {
+    CallNew* call = new(zone_) CallNew(isolate_, expression, arguments, pos);
+    VISIT_AND_RETURN(CallNew, call)
+  }
+
+  CallRuntime* NewCallRuntime(Handle<String> name,
+                              const Runtime::Function* function,
+                              ZoneList<Expression*>* arguments) {
+    CallRuntime* call =
+        new(zone_) CallRuntime(isolate_, name, function, arguments);
+    VISIT_AND_RETURN(CallRuntime, call)
+  }
+
+  UnaryOperation* NewUnaryOperation(Token::Value op,
+                                    Expression* expression,
+                                    int pos) {
+    UnaryOperation* node =
+        new(zone_) UnaryOperation(isolate_, op, expression, pos);
+    VISIT_AND_RETURN(UnaryOperation, node)
+  }
+
+  BinaryOperation* NewBinaryOperation(Token::Value op,
+                                      Expression* left,
+                                      Expression* right,
+                                      int pos) {
+    BinaryOperation* node =
+        new(zone_) BinaryOperation(isolate_, op, left, right, pos);
+    VISIT_AND_RETURN(BinaryOperation, node)
+  }
+
+  CountOperation* NewCountOperation(Token::Value op,
+                                    bool is_prefix,
+                                    Expression* expr,
+                                    int pos) {
+    CountOperation* node =
+        new(zone_) CountOperation(isolate_, op, is_prefix, expr, pos);
+    VISIT_AND_RETURN(CountOperation, node)
+  }
+
+  CompareOperation* NewCompareOperation(Token::Value op,
+                                        Expression* left,
+                                        Expression* right,
+                                        int pos) {
+    CompareOperation* node =
+        new(zone_) CompareOperation(isolate_, op, left, right, pos);
+    VISIT_AND_RETURN(CompareOperation, node)
+  }
+
+  Conditional* NewConditional(Expression* condition,
+                              Expression* then_expression,
+                              Expression* else_expression,
+                              int then_expression_position,
+                              int else_expression_position) {
+    Conditional* cond = new(zone_) Conditional(
+        isolate_, condition, then_expression, else_expression,
+        then_expression_position, else_expression_position);
+    VISIT_AND_RETURN(Conditional, cond)
+  }
+
+  Assignment* NewAssignment(Token::Value op,
+                            Expression* target,
+                            Expression* value,
+                            int pos) {
+    Assignment* assign =
+        new(zone_) Assignment(isolate_, op, target, value, pos);
+    assign->Init(isolate_, this);
+    VISIT_AND_RETURN(Assignment, assign)
+  }
+
+  Throw* NewThrow(Expression* exception, int pos) {
+    Throw* t = new(zone_) Throw(isolate_, exception, pos);
+    VISIT_AND_RETURN(Throw, t)
+  }
+
+  FunctionLiteral* NewFunctionLiteral(
+      Handle<String> name,
+      Scope* scope,
+      ZoneList<Statement*>* body,
+      int materialized_literal_count,
+      int expected_property_count,
+      int handler_count,
+      bool has_only_simple_this_property_assignments,
+      Handle<FixedArray> this_property_assignments,
+      int parameter_count,
+      FunctionLiteral::ParameterFlag has_duplicate_parameters,
+      FunctionLiteral::Type type,
+      FunctionLiteral::IsFunctionFlag is_function) {
+    FunctionLiteral* lit = new(zone_) FunctionLiteral(
+        isolate_, name, scope, body,
+        materialized_literal_count, expected_property_count, handler_count,
+        has_only_simple_this_property_assignments, this_property_assignments,
+        parameter_count, type, has_duplicate_parameters, is_function);
+    // Top-level literal doesn't count for the AST's properties.
+    if (is_function == FunctionLiteral::kIsFunction) {
+      visitor_.VisitFunctionLiteral(lit);
+    }
+    return lit;
+  }
+
+  SharedFunctionInfoLiteral* NewSharedFunctionInfoLiteral(
+      Handle<SharedFunctionInfo> shared_function_info) {
+    SharedFunctionInfoLiteral* lit =
+        new(zone_) SharedFunctionInfoLiteral(isolate_, shared_function_info);
+    VISIT_AND_RETURN(SharedFunctionInfoLiteral, lit)
+  }
+
+  ThisFunction* NewThisFunction() {
+    ThisFunction* fun = new(zone_) ThisFunction(isolate_);
+    VISIT_AND_RETURN(ThisFunction, fun)
+  }
+
+#undef VISIT_AND_RETURN
+
+ private:
+  Isolate* isolate_;
+  Zone* zone_;
+  Visitor visitor_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_AST_H_
diff --git a/src/atomicops_internals_mips_gcc.h b/src/atomicops_internals_mips_gcc.h
index 5113de2..9498fd7 100644
--- a/src/atomicops_internals_mips_gcc.h
+++ b/src/atomicops_internals_mips_gcc.h
@@ -30,7 +30,7 @@
 #ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
 #define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
 
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
 
 namespace v8 {
 namespace internal {
@@ -48,16 +48,19 @@
 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
-  Atomic32 prev;
-  __asm__ __volatile__("1:\n"
-                       "ll %0, %1\n"  // prev = *ptr
+  Atomic32 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %5\n"  // prev = *ptr
                        "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
-                       "nop\n"  // delay slot nop
-                       "sc %2, %1\n"  // *ptr = new_value (with atomic check)
+                       "move %2, %4\n"  // tmp = new_value
+                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
                        "beqz %2, 1b\n"  // start again on atomic error
                        "nop\n"  // delay slot nop
                        "2:\n"
-                       : "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
                        : "Ir" (old_value), "r" (new_value), "m" (*ptr)
                        : "memory");
   return prev;
@@ -68,12 +71,15 @@
 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
                                          Atomic32 new_value) {
   Atomic32 temp, old;
-  __asm__ __volatile__("1:\n"
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
                        "ll %1, %2\n"  // old = *ptr
                        "move %0, %3\n"  // temp = new_value
                        "sc %0, %2\n"  // *ptr = temp (with atomic check)
                        "beqz %0, 1b\n"  // start again on atomic error
                        "nop\n"  // delay slot nop
+                       ".set pop\n"
                        : "=&r" (temp), "=&r" (old), "=m" (*ptr)
                        : "r" (new_value), "m" (*ptr)
                        : "memory");
@@ -87,13 +93,15 @@
                                           Atomic32 increment) {
   Atomic32 temp, temp2;
 
-  __asm__ __volatile__("1:\n"
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
                        "ll %0, %2\n"  // temp = *ptr
-                       "addu %0, %3\n"  // temp = temp + increment
-                       "move %1, %0\n"  // temp2 = temp
-                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
-                       "beqz %0, 1b\n"  // start again on atomic error
-                       "nop\n"  // delay slot nop
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
                        : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
                        : "Ir" (increment), "m" (*ptr)
                        : "memory");
@@ -103,6 +111,7 @@
 
 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                                         Atomic32 increment) {
+  ATOMICOPS_COMPILER_BARRIER();
   Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
   ATOMICOPS_COMPILER_BARRIER();
   return res;
@@ -117,16 +126,19 @@
 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
-  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   ATOMICOPS_COMPILER_BARRIER();
-  return x;
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
 }
 
 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   ATOMICOPS_COMPILER_BARRIER();
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
 }
 
 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -134,7 +146,7 @@
 }
 
 inline void MemoryBarrier() {
-  ATOMICOPS_COMPILER_BARRIER();
+  __asm__ __volatile__("sync" : : : "memory");
 }
 
 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
diff --git a/src/atomicops_internals_x86_macosx.h b/src/atomicops_internals_x86_macosx.h
index 2bac006..bfb02b3 100644
--- a/src/atomicops_internals_x86_macosx.h
+++ b/src/atomicops_internals_x86_macosx.h
@@ -35,7 +35,7 @@
 namespace v8 {
 namespace internal {
 
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
   Atomic32 prev_value;
@@ -49,7 +49,7 @@
   return prev_value;
 }
 
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
                                          Atomic32 new_value) {
   Atomic32 old_value;
   do {
@@ -59,12 +59,12 @@
   return old_value;
 }
 
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
                                           Atomic32 increment) {
   return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
 }
 
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                                           Atomic32 increment) {
   return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
 }
@@ -73,7 +73,7 @@
   OSMemoryBarrier();
 }
 
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   Atomic32 prev_value;
@@ -87,7 +87,7 @@
   return prev_value;
 }
 
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   return Acquire_CompareAndSwap(ptr, old_value, new_value);
@@ -97,12 +97,12 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value;
   MemoryBarrier();
 }
 
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   MemoryBarrier();
   *ptr = value;
 }
@@ -111,13 +111,13 @@
   return *ptr;
 }
 
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
   Atomic32 value = *ptr;
   MemoryBarrier();
   return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
   MemoryBarrier();
   return *ptr;
 }
@@ -126,7 +126,7 @@
 
 // 64-bit implementation on 64-bit platform
 
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
                                          Atomic64 old_value,
                                          Atomic64 new_value) {
   Atomic64 prev_value;
@@ -140,7 +140,7 @@
   return prev_value;
 }
 
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
                                          Atomic64 new_value) {
   Atomic64 old_value;
   do {
@@ -150,17 +150,17 @@
   return old_value;
 }
 
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
                                           Atomic64 increment) {
   return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
 }
 
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
                                         Atomic64 increment) {
   return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
 }
 
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
   Atomic64 prev_value;
@@ -174,7 +174,7 @@
   return prev_value;
 }
 
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
   // The lib kern interface does not distinguish between
@@ -186,12 +186,12 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
   *ptr = value;
   MemoryBarrier();
 }
 
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   MemoryBarrier();
   *ptr = value;
 }
@@ -200,13 +200,13 @@
   return *ptr;
 }
 
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
   Atomic64 value = *ptr;
   MemoryBarrier();
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
   MemoryBarrier();
   return *ptr;
 }
@@ -264,7 +264,7 @@
       old_value, new_value);
 }
 
-inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
   NoBarrier_Store(
       reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
 }
@@ -279,7 +279,7 @@
       reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
 }
 
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
   return NoBarrier_Load(
       reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
 }
diff --git a/src/bignum-dtoa.h b/src/bignum-dtoa.h
index ea1acbb..93ec1f7 100644
--- a/src/bignum-dtoa.h
+++ b/src/bignum-dtoa.h
@@ -44,7 +44,7 @@
   BIGNUM_DTOA_PRECISION
 };
 
-// Converts the given double 'v' to ascii.
+// Converts the given double 'v' to ASCII.
 // The result should be interpreted as buffer * 10^(point-length).
 // The buffer will be null-terminated.
 //
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index f07e625..0e95b4b 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,9 +34,11 @@
 #include "debug.h"
 #include "execution.h"
 #include "global-handles.h"
+#include "isolate-inl.h"
 #include "macro-assembler.h"
 #include "natives.h"
 #include "objects-visiting.h"
+#include "platform.h"
 #include "snapshot.h"
 #include "extensions/externalize-string-extension.h"
 #include "extensions/gc-extension.h"
@@ -74,22 +76,15 @@
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
   if (heap->natives_source_cache()->get(index)->IsUndefined()) {
-    if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
-      // We can use external strings for the natives.
-      Vector<const char> source = Natives::GetRawScriptSource(index);
-      NativesExternalStringResource* resource =
-          new NativesExternalStringResource(this,
-                                            source.start(),
-                                            source.length());
-      Handle<String> source_code =
-          factory->NewExternalStringFromAscii(resource);
-      heap->natives_source_cache()->set(index, *source_code);
-    } else {
-      // Old snapshot code can't cope with external strings at all.
-      Handle<String> source_code =
-        factory->NewStringFromAscii(Natives::GetRawScriptSource(index));
-      heap->natives_source_cache()->set(index, *source_code);
-    }
+    // We can use external strings for the natives.
+    Vector<const char> source = Natives::GetRawScriptSource(index);
+    NativesExternalStringResource* resource =
+        new NativesExternalStringResource(this,
+                                          source.start(),
+                                          source.length());
+    Handle<String> source_code =
+        factory->NewExternalStringFromAscii(resource);
+    heap->natives_source_cache()->set(index, *source_code);
   }
   Handle<Object> cached_source(heap->natives_source_cache()->get(index));
   return Handle<String>::cast(cached_source);
@@ -177,6 +172,10 @@
   Handle<JSFunction> GetThrowTypeErrorFunction();
 
   void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
+
+  // Make the "arguments" and "caller" properties throw a TypeError on access.
+  void PoisonArgumentsAndCaller(Handle<Map> map);
+
   // Creates the global objects using the global and the template passed in
   // through the API.  We call this regardless of whether we are building a
   // context from scratch or using a deserialized one from the partial snapshot
@@ -197,7 +196,7 @@
   // detached from the other objects in the snapshot.
   void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
   // New context initialization.  Used for creating a context from scratch.
-  void InitializeGlobal(Handle<GlobalObject> inner_global,
+  bool InitializeGlobal(Handle<GlobalObject> inner_global,
                         Handle<JSFunction> empty_function);
   void InitializeExperimentalGlobal();
   // Installs the contents of the native .js files on the global objects.
@@ -209,12 +208,30 @@
   void InstallBuiltinFunctionIds();
   void InstallJSFunctionResultCaches();
   void InitializeNormalizedMapCaches();
+
+  enum ExtensionTraversalState {
+    UNVISITED, VISITED, INSTALLED
+  };
+
+  class ExtensionStates {
+   public:
+    ExtensionStates();
+    ExtensionTraversalState get_state(RegisteredExtension* extension);
+    void set_state(RegisteredExtension* extension,
+                   ExtensionTraversalState state);
+   private:
+    HashMap map_;
+    DISALLOW_COPY_AND_ASSIGN(ExtensionStates);
+  };
+
   // Used both for deserialized and from-scratch contexts to add the extensions
   // provided.
   static bool InstallExtensions(Handle<Context> global_context,
                                 v8::ExtensionConfiguration* extensions);
-  static bool InstallExtension(const char* name);
-  static bool InstallExtension(v8::RegisteredExtension* current);
+  static bool InstallExtension(const char* name,
+                               ExtensionStates* extension_states);
+  static bool InstallExtension(v8::RegisteredExtension* current,
+                               ExtensionStates* extension_states);
   static void InstallSpecialObjects(Handle<Context> global_context);
   bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
   bool ConfigureApiObject(Handle<JSObject> object,
@@ -242,14 +259,10 @@
 
   Handle<Map> CreateStrictModeFunctionMap(
       PrototypePropertyMode prototype_mode,
-      Handle<JSFunction> empty_function,
-      Handle<FixedArray> arguments_callbacks,
-      Handle<FixedArray> caller_callbacks);
+      Handle<JSFunction> empty_function);
 
   Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor(
-      PrototypePropertyMode propertyMode,
-      Handle<FixedArray> arguments,
-      Handle<FixedArray> caller);
+      PrototypePropertyMode propertyMode);
 
   static bool CompileBuiltin(Isolate* isolate, int index);
   static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@@ -278,7 +291,7 @@
 
 void Bootstrapper::Iterate(ObjectVisitor* v) {
   extensions_cache_.Iterate(v);
-  v->Synchronize("Extensions");
+  v->Synchronize(VisitorSynchronization::kExtensions);
 }
 
 
@@ -357,55 +370,56 @@
   } else {
     attributes = DONT_ENUM;
   }
-  SetLocalPropertyNoThrow(target, symbol, function, attributes);
+  CHECK_NOT_EMPTY_HANDLE(isolate,
+                         JSObject::SetLocalPropertyIgnoreAttributes(
+                             target, symbol, function, attributes));
   if (is_ecma_native) {
     function->shared()->set_instance_class_name(*symbol);
   }
+  function->shared()->set_native(true);
   return function;
 }
 
 
 Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
     PrototypePropertyMode prototypeMode) {
-  Handle<DescriptorArray> descriptors =
-      factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
-                                    ? 4
-                                    : 5);
-  PropertyAttributes attributes =
-      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+  int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
+  Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
+  PropertyAttributes attribs = static_cast<PropertyAttributes>(
+      DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+  DescriptorArray::WhitenessWitness witness(*descriptors);
 
   {  // Add length.
-    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
-    CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
-    descriptors->Set(0, &d);
+    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
+    CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
+    descriptors->Set(0, &d, witness);
   }
   {  // Add name.
-    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
-    CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
-    descriptors->Set(1, &d);
+    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
+    CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
+    descriptors->Set(1, &d, witness);
   }
   {  // Add arguments.
-    Handle<Foreign> foreign =
-        factory()->NewForeign(&Accessors::FunctionArguments);
-    CallbacksDescriptor d(*factory()->arguments_symbol(), *foreign, attributes);
-    descriptors->Set(2, &d);
+    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionArguments));
+    CallbacksDescriptor d(*factory()->arguments_symbol(), *f, attribs);
+    descriptors->Set(2, &d, witness);
   }
   {  // Add caller.
-    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionCaller);
-    CallbacksDescriptor d(*factory()->caller_symbol(), *foreign, attributes);
-    descriptors->Set(3, &d);
+    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionCaller));
+    CallbacksDescriptor d(*factory()->caller_symbol(), *f, attribs);
+    descriptors->Set(3, &d, witness);
   }
   if (prototypeMode != DONT_ADD_PROTOTYPE) {
     // Add prototype.
     if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
-      attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
+      attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
     }
-    Handle<Foreign> foreign =
-        factory()->NewForeign(&Accessors::FunctionPrototype);
-    CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
-    descriptors->Set(4, &d);
+    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
+    CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
+    descriptors->Set(4, &d, witness);
   }
-  descriptors->Sort();
+  descriptors->Sort(witness);
   return descriptors;
 }
 
@@ -478,7 +492,7 @@
   // 262 15.3.4.
   Handle<String> symbol = factory->LookupAsciiSymbol("Empty");
   Handle<JSFunction> empty_function =
-      factory->NewFunctionWithoutPrototype(symbol, kNonStrictMode);
+      factory->NewFunctionWithoutPrototype(symbol, CLASSIC_MODE);
 
   // --- E m p t y ---
   Handle<Code> code =
@@ -513,49 +527,46 @@
 
 
 Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
-    PrototypePropertyMode prototypeMode,
-    Handle<FixedArray> arguments,
-    Handle<FixedArray> caller) {
-  Handle<DescriptorArray> descriptors =
-      factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
-                                    ? 4
-                                    : 5);
-  PropertyAttributes attributes = static_cast<PropertyAttributes>(
-      DONT_ENUM | DONT_DELETE | READ_ONLY);
+    PrototypePropertyMode prototypeMode) {
+  int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
+  Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
+  PropertyAttributes attribs = static_cast<PropertyAttributes>(
+      DONT_ENUM | DONT_DELETE);
 
-  {  // length
-    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
-    CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
-    descriptors->Set(0, &d);
+  DescriptorArray::WhitenessWitness witness(*descriptors);
+
+  {  // Add length.
+    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
+    CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
+    descriptors->Set(0, &d, witness);
   }
-  {  // name
-    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
-    CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
-    descriptors->Set(1, &d);
+  {  // Add name.
+    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
+    CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
+    descriptors->Set(1, &d, witness);
   }
-  {  // arguments
-    CallbacksDescriptor d(*factory()->arguments_symbol(),
-                          *arguments,
-                          attributes);
-    descriptors->Set(2, &d);
+  {  // Add arguments.
+    Handle<AccessorPair> arguments(factory()->NewAccessorPair());
+    CallbacksDescriptor d(*factory()->arguments_symbol(), *arguments, attribs);
+    descriptors->Set(2, &d, witness);
   }
-  {  // caller
-    CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes);
-    descriptors->Set(3, &d);
+  {  // Add caller.
+    Handle<AccessorPair> caller(factory()->NewAccessorPair());
+    CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
+    descriptors->Set(3, &d, witness);
   }
 
-  // prototype
   if (prototypeMode != DONT_ADD_PROTOTYPE) {
-    if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
-      attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
+    // Add prototype.
+    if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
+      attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY);
     }
-    Handle<Foreign> foreign =
-        factory()->NewForeign(&Accessors::FunctionPrototype);
-    CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
-    descriptors->Set(4, &d);
+    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
+    CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
+    descriptors->Set(4, &d, witness);
   }
 
-  descriptors->Sort();
+  descriptors->Sort(witness);
   return descriptors;
 }
 
@@ -565,7 +576,7 @@
   if (throw_type_error_function.is_null()) {
     Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
     throw_type_error_function =
-      factory()->NewFunctionWithoutPrototype(name, kNonStrictMode);
+      factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
     Handle<Code> code(isolate()->builtins()->builtin(
         Builtins::kStrictModePoisonPill));
     throw_type_error_function->set_map(
@@ -574,7 +585,7 @@
     throw_type_error_function->shared()->set_code(*code);
     throw_type_error_function->shared()->DontAdaptArguments();
 
-    PreventExtensions(throw_type_error_function);
+    JSObject::PreventExtensions(throw_type_error_function);
   }
   return throw_type_error_function;
 }
@@ -582,14 +593,10 @@
 
 Handle<Map> Genesis::CreateStrictModeFunctionMap(
     PrototypePropertyMode prototype_mode,
-    Handle<JSFunction> empty_function,
-    Handle<FixedArray> arguments_callbacks,
-    Handle<FixedArray> caller_callbacks) {
+    Handle<JSFunction> empty_function) {
   Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   Handle<DescriptorArray> descriptors =
-      ComputeStrictFunctionInstanceDescriptor(prototype_mode,
-                                              arguments_callbacks,
-                                              caller_callbacks);
+      ComputeStrictFunctionInstanceDescriptor(prototype_mode);
   map->set_instance_descriptors(*descriptors);
   map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
   map->set_prototype(*empty_function);
@@ -598,23 +605,15 @@
 
 
 void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
-  // Create the callbacks arrays for ThrowTypeError functions.
-  // The get/set callacks are filled in after the maps are created below.
-  Factory* factory = empty->GetIsolate()->factory();
-  Handle<FixedArray> arguments = factory->NewFixedArray(2, TENURED);
-  Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
-
   // Allocate map for the strict mode function instances.
   Handle<Map> strict_mode_function_instance_map =
-      CreateStrictModeFunctionMap(
-          ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
+      CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
   global_context()->set_strict_mode_function_instance_map(
       *strict_mode_function_instance_map);
 
   // Allocate map for the prototype-less strict mode instances.
   Handle<Map> strict_mode_function_without_prototype_map =
-      CreateStrictModeFunctionMap(
-          DONT_ADD_PROTOTYPE, empty, arguments, caller);
+      CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
   global_context()->set_strict_mode_function_without_prototype_map(
       *strict_mode_function_without_prototype_map);
 
@@ -622,26 +621,38 @@
   // only for processing of builtins.
   // Later the map is replaced with writable prototype map, allocated below.
   Handle<Map> strict_mode_function_map =
-      CreateStrictModeFunctionMap(
-          ADD_READONLY_PROTOTYPE, empty, arguments, caller);
+      CreateStrictModeFunctionMap(ADD_READONLY_PROTOTYPE, empty);
   global_context()->set_strict_mode_function_map(
       *strict_mode_function_map);
 
   // The final map for the strict mode functions. Writeable prototype.
   // This map is installed in MakeFunctionInstancePrototypeWritable.
   strict_mode_function_instance_map_writable_prototype_ =
-      CreateStrictModeFunctionMap(
-          ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
+      CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
 
-  // Create the ThrowTypeError function instance.
-  Handle<JSFunction> throw_function =
-      GetThrowTypeErrorFunction();
+  // Complete the callbacks.
+  PoisonArgumentsAndCaller(strict_mode_function_instance_map);
+  PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map);
+  PoisonArgumentsAndCaller(strict_mode_function_map);
+  PoisonArgumentsAndCaller(
+      strict_mode_function_instance_map_writable_prototype_);
+}
 
-  // Complete the callback fixed arrays.
-  arguments->set(0, *throw_function);
-  arguments->set(1, *throw_function);
-  caller->set(0, *throw_function);
-  caller->set(1, *throw_function);
+
+static void SetAccessors(Handle<Map> map,
+                         Handle<String> name,
+                         Handle<JSFunction> func) {
+  DescriptorArray* descs = map->instance_descriptors();
+  int number = descs->Search(*name);
+  AccessorPair* accessors = AccessorPair::cast(descs->GetValue(number));
+  accessors->set_getter(*func);
+  accessors->set_setter(*func);
+}
+
+
+void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) {
+  SetAccessors(map, factory()->arguments_symbol(), GetThrowTypeErrorFunction());
+  SetAccessors(map, factory()->caller_symbol(), GetThrowTypeErrorFunction());
 }
 
 
@@ -727,11 +738,10 @@
     Handle<JSObject> prototype =
         Handle<JSObject>(
             JSObject::cast(js_global_function->instance_prototype()));
-    SetLocalPropertyNoThrow(
-        prototype,
-        factory()->constructor_symbol(),
-        isolate()->object_function(),
-        NONE);
+    CHECK_NOT_EMPTY_HANDLE(isolate(),
+                           JSObject::SetLocalPropertyIgnoreAttributes(
+                               prototype, factory()->constructor_symbol(),
+                               isolate()->object_function(), NONE));
   } else {
     Handle<FunctionTemplateInfo> js_global_constructor(
         FunctionTemplateInfo::cast(js_global_template->constructor()));
@@ -808,7 +818,7 @@
                    factory()->LookupAsciiSymbol("global"),
                    inner_global,
                    attributes);
-  // Setup the reference from the global object to the builtins object.
+  // Set up the reference from the global object to the builtins object.
   JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
   TransferNamedProperties(inner_global_from_snapshot, inner_global);
   TransferIndexedProperties(inner_global_from_snapshot, inner_global);
@@ -817,7 +827,7 @@
 
 // This is only called if we are not using snapshots.  The equivalent
 // work in the snapshot case is done in HookUpInnerGlobal.
-void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
+bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
                                Handle<JSFunction> empty_function) {
   // --- G l o b a l   C o n t e x t ---
   // Use the empty function as closure (no scope info).
@@ -837,8 +847,10 @@
   Heap* heap = isolate->heap();
 
   Handle<String> object_name = Handle<String>(heap->Object_symbol());
-  SetLocalPropertyNoThrow(inner_global, object_name,
-                          isolate->object_function(), DONT_ENUM);
+  CHECK_NOT_EMPTY_HANDLE(isolate,
+                         JSObject::SetLocalPropertyIgnoreAttributes(
+                             inner_global, object_name,
+                             isolate->object_function(), DONT_ENUM));
 
   Handle<JSObject> global = Handle<JSObject>(global_context()->global());
 
@@ -865,15 +877,12 @@
             factory->NewForeign(&Accessors::ArrayLength),
             static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
 
-    // Cache the fast JavaScript array map
-    global_context()->set_js_array_map(array_function->initial_map());
-    global_context()->js_array_map()->set_instance_descriptors(
-        *array_descriptors);
     // array_function is used internally. JS code creating array object should
     // search for the 'Array' property on the global object and use that one
     // as the constructor. 'Array' property on a global object can be
     // overwritten by JS code.
     global_context()->set_array_function(*array_function);
+    array_function->initial_map()->set_instance_descriptors(*array_descriptors);
   }
 
   {  // --- N u m b e r ---
@@ -918,7 +927,7 @@
   {  // --- D a t e ---
     // Builtin functions for Date.prototype.
     Handle<JSFunction> date_fun =
-        InstallFunction(global, "Date", JS_VALUE_TYPE, JSValue::kSize,
+        InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
                         isolate->initial_object_prototype(),
                         Builtins::kIllegal, true);
 
@@ -940,6 +949,7 @@
     ASSERT_EQ(0, initial_map->inobject_properties());
 
     Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
+    DescriptorArray::WhitenessWitness witness(*descriptors);
     PropertyAttributes final =
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
     int enum_index = 0;
@@ -949,7 +959,7 @@
                             JSRegExp::kSourceFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(0, &field);
+      descriptors->Set(0, &field, witness);
     }
     {
       // ECMA-262, section 15.10.7.2.
@@ -957,7 +967,7 @@
                             JSRegExp::kGlobalFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(1, &field);
+      descriptors->Set(1, &field, witness);
     }
     {
       // ECMA-262, section 15.10.7.3.
@@ -965,7 +975,7 @@
                             JSRegExp::kIgnoreCaseFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(2, &field);
+      descriptors->Set(2, &field, witness);
     }
     {
       // ECMA-262, section 15.10.7.4.
@@ -973,7 +983,7 @@
                             JSRegExp::kMultilineFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(3, &field);
+      descriptors->Set(3, &field, witness);
     }
     {
       // ECMA-262, section 15.10.7.5.
@@ -983,10 +993,10 @@
                             JSRegExp::kLastIndexFieldIndex,
                             writable,
                             enum_index++);
-      descriptors->Set(4, &field);
+      descriptors->Set(4, &field, witness);
     }
     descriptors->SetNextEnumerationIndex(enum_index);
-    descriptors->Sort();
+    descriptors->Sort(witness);
 
     initial_map->set_inobject_properties(5);
     initial_map->set_pre_allocated_property_fields(5);
@@ -995,18 +1005,42 @@
         initial_map->instance_size() + 5 * kPointerSize);
     initial_map->set_instance_descriptors(*descriptors);
     initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
+
+    // RegExp prototype object is itself a RegExp.
+    Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
+    proto_map->set_prototype(global_context()->initial_object_prototype());
+    Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
+    proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
+                                 heap->empty_string());
+    proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
+                                 heap->false_value());
+    proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
+                                 heap->false_value());
+    proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
+                                 heap->false_value());
+    proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+                                 Smi::FromInt(0),
+                                 SKIP_WRITE_BARRIER);  // It's a Smi.
+    initial_map->set_prototype(*proto);
+    factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
+                                   JSRegExp::IRREGEXP, factory->empty_string(),
+                                   JSRegExp::Flags(0), 0);
   }
 
   {  // -- J S O N
     Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
-    Handle<JSFunction> cons = factory->NewFunction(
-        name,
-        factory->the_hole_value());
-    cons->SetInstancePrototype(global_context()->initial_object_prototype());
+    Handle<JSFunction> cons = factory->NewFunction(name,
+                                                   factory->the_hole_value());
+    { MaybeObject* result = cons->SetInstancePrototype(
+        global_context()->initial_object_prototype());
+      if (result->IsFailure()) return false;
+    }
     cons->SetInstanceClassName(*name);
     Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
     ASSERT(json_object->IsJSObject());
-    SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
+    CHECK_NOT_EMPTY_HANDLE(isolate,
+                           JSObject::SetLocalPropertyIgnoreAttributes(
+                                 global, name, json_object, DONT_ENUM));
     global_context()->set_json_object(*json_object);
   }
 
@@ -1036,21 +1070,23 @@
     global_context()->set_arguments_boilerplate(*result);
     // Note: length must be added as the first property and
     //       callee must be added as the second property.
-    SetLocalPropertyNoThrow(result, factory->length_symbol(),
-                            factory->undefined_value(),
-                            DONT_ENUM);
-    SetLocalPropertyNoThrow(result, factory->callee_symbol(),
-                            factory->undefined_value(),
-                            DONT_ENUM);
+    CHECK_NOT_EMPTY_HANDLE(isolate,
+                           JSObject::SetLocalPropertyIgnoreAttributes(
+                               result, factory->length_symbol(),
+                               factory->undefined_value(), DONT_ENUM));
+    CHECK_NOT_EMPTY_HANDLE(isolate,
+                           JSObject::SetLocalPropertyIgnoreAttributes(
+                               result, factory->callee_symbol(),
+                               factory->undefined_value(), DONT_ENUM));
 
 #ifdef DEBUG
-    LookupResult lookup;
+    LookupResult lookup(isolate);
     result->LocalLookup(heap->callee_symbol(), &lookup);
-    ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
+    ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
     ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
 
     result->LocalLookup(heap->length_symbol(), &lookup);
-    ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
+    ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
     ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
 
     ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
@@ -1063,11 +1099,6 @@
   }
 
   {  // --- aliased_arguments_boilerplate_
-    Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
-    Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
-    new_map->set_pre_allocated_property_fields(2);
-    Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
-    new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
     // Set up a well-formed parameter map to make assertions happy.
     Handle<FixedArray> elements = factory->NewFixedArray(2);
     elements->set_map(heap->non_strict_arguments_elements_map());
@@ -1076,7 +1107,16 @@
     elements->set(0, *array);
     array = factory->NewFixedArray(0);
     elements->set(1, *array);
+
+    Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
+    Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
+    new_map->set_pre_allocated_property_fields(2);
+    Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
+    // Set elements kind after allocating the object because
+    // NewJSObjectFromMap assumes a fast elements map.
+    new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
     result->set_elements(*elements);
+    ASSERT(result->HasNonStrictArgumentsElements());
     global_context()->set_aliased_arguments_boilerplate(*result);
   }
 
@@ -1085,33 +1125,34 @@
       static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
 
     // Create the ThrowTypeError functions.
-    Handle<FixedArray> callee = factory->NewFixedArray(2, TENURED);
-    Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
+    Handle<AccessorPair> callee = factory->NewAccessorPair();
+    Handle<AccessorPair> caller = factory->NewAccessorPair();
 
     Handle<JSFunction> throw_function =
         GetThrowTypeErrorFunction();
 
     // Install the ThrowTypeError functions.
-    callee->set(0, *throw_function);
-    callee->set(1, *throw_function);
-    caller->set(0, *throw_function);
-    caller->set(1, *throw_function);
+    callee->set_getter(*throw_function);
+    callee->set_setter(*throw_function);
+    caller->set_getter(*throw_function);
+    caller->set_setter(*throw_function);
 
     // Create the descriptor array for the arguments object.
     Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
+    DescriptorArray::WhitenessWitness witness(*descriptors);
     {  // length
       FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
-      descriptors->Set(0, &d);
+      descriptors->Set(0, &d, witness);
     }
     {  // callee
       CallbacksDescriptor d(*factory->callee_symbol(), *callee, attributes);
-      descriptors->Set(1, &d);
+      descriptors->Set(1, &d, witness);
     }
     {  // caller
       CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
-      descriptors->Set(2, &d);
+      descriptors->Set(2, &d, witness);
     }
-    descriptors->Sort();
+    descriptors->Sort(witness);
 
     // Create the map. Allocate one in-object field for length.
     Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
@@ -1131,14 +1172,15 @@
     global_context()->set_strict_mode_arguments_boilerplate(*result);
 
     // Add length property only for strict mode boilerplate.
-    SetLocalPropertyNoThrow(result, factory->length_symbol(),
-                            factory->undefined_value(),
-                            DONT_ENUM);
+    CHECK_NOT_EMPTY_HANDLE(isolate,
+                           JSObject::SetLocalPropertyIgnoreAttributes(
+                               result, factory->length_symbol(),
+                               factory->undefined_value(), DONT_ENUM));
 
 #ifdef DEBUG
-    LookupResult lookup;
+    LookupResult lookup(isolate);
     result->LocalLookup(heap->length_symbol(), &lookup);
-    ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
+    ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
     ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
 
     ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1195,6 +1237,15 @@
 
   // Initialize the data slot.
   global_context()->set_data(heap->undefined_value());
+
+  {
+    // Initialize the random seed slot.
+    Handle<ByteArray> zeroed_byte_array(
+        factory->NewByteArray(kRandomStateSize));
+    global_context()->set_random_seed(*zeroed_byte_array);
+    memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
+  }
+  return true;
 }
 
 
@@ -1202,12 +1253,26 @@
   Handle<JSObject> global = Handle<JSObject>(global_context()->global());
 
   // TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
-  // longer need to live behind a flag, so WeakMap gets added to the snapshot.
-  if (FLAG_harmony_weakmaps) {  // -- W e a k M a p
-    Handle<JSObject> prototype =
-        factory()->NewJSObject(isolate()->object_function(), TENURED);
-    InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
-                    prototype, Builtins::kIllegal, true);
+  // longer need to live behind a flag, so functions get added to the snapshot.
+  if (FLAG_harmony_collections) {
+    {  // -- S e t
+      Handle<JSObject> prototype =
+          factory()->NewJSObject(isolate()->object_function(), TENURED);
+      InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
+                      prototype, Builtins::kIllegal, true);
+    }
+    {  // -- M a p
+      Handle<JSObject> prototype =
+          factory()->NewJSObject(isolate()->object_function(), TENURED);
+      InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
+                      prototype, Builtins::kIllegal, true);
+    }
+    {  // -- W e a k M a p
+      Handle<JSObject> prototype =
+          factory()->NewJSObject(isolate()->object_function(), TENURED);
+      InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+                      prototype, Builtins::kIllegal, true);
+    }
   }
 }
 
@@ -1236,6 +1301,12 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate->debugger()->set_compiling_natives(true);
 #endif
+  // During genesis, the boilerplate for stack overflow won't work until the
+  // environment has been at least partially initialized. Add a stack check
+  // before entering JS code to catch overflow early.
+  StackLimitCheck check(Isolate::Current());
+  if (check.HasOverflowed()) return false;
+
   bool result = CompileScriptCached(name,
                                     source,
                                     NULL,
@@ -1279,7 +1350,7 @@
     if (cache != NULL) cache->Add(name, function_info);
   }
 
-  // Setup the function context. Conceptually, we should clone the
+  // Set up the function context. Conceptually, we should clone the
   // function before overwriting the context but since we're in a
   // single-threaded environment it is not strictly necessary.
   ASSERT(top_context->IsGlobalContext());
@@ -1327,6 +1398,8 @@
                  configure_instance_fun);
   INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
   INSTALL_NATIVE(JSObject, "functionCache", function_cache);
+  INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
+                 to_complete_property_descriptor);
 }
 
 void Genesis::InstallExperimentalNativeFunctions() {
@@ -1334,6 +1407,7 @@
     INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
     INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
     INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
+    INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
   }
 }
 
@@ -1363,7 +1437,7 @@
   builtins->set_global_context(*global_context());
   builtins->set_global_receiver(*builtins);
 
-  // Setup the 'global' properties of the builtins object. The
+  // Set up the 'global' properties of the builtins object. The
   // 'global' property that refers to the global object is the only
   // way to get from code running in the builtins context to the
   // global object.
@@ -1371,9 +1445,11 @@
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
   Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
   Handle<Object> global_obj(global_context()->global());
-  SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
+  CHECK_NOT_EMPTY_HANDLE(isolate(),
+                         JSObject::SetLocalPropertyIgnoreAttributes(
+                             builtins, global_symbol, global_obj, attributes));
 
-  // Setup the reference from the global object to the builtins object.
+  // Set up the reference from the global object to the builtins object.
   JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
 
   // Create a bridge function that has context in the global context.
@@ -1536,16 +1612,13 @@
     // doesn't inherit from Object.prototype.
     // To be used only for internal work by builtins. Instances
     // must not be leaked to user code.
-    // Only works correctly when called as a constructor. The normal
-    // Array code uses Array.prototype as prototype when called as
-    // a function.
     Handle<JSFunction> array_function =
         InstallFunction(builtins,
                         "InternalArray",
                         JS_ARRAY_TYPE,
                         JSArray::kSize,
                         isolate()->initial_object_prototype(),
-                        Builtins::kArrayCode,
+                        Builtins::kInternalArrayCode,
                         true);
     Handle<JSObject> prototype =
         factory()->NewJSObject(isolate()->object_function(), TENURED);
@@ -1555,6 +1628,18 @@
         isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
     array_function->shared()->DontAdaptArguments();
 
+    // InternalArrays should not use Smi-Only array optimizations. There are too
+    // many places in the C++ runtime code (e.g. RegEx) that assume that
+    // elements in InternalArrays can be set to non-Smi values without going
+    // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
+    // transition easy to trap. Moreover, they rarely are smi-only.
+    MaybeObject* maybe_map =
+        array_function->initial_map()->CopyDropTransitions();
+    Map* new_map;
+    if (!maybe_map->To<Map>(&new_map)) return false;
+    new_map->set_elements_kind(FAST_ELEMENTS);
+    array_function->set_initial_map(new_map);
+
     // Make "length" magic on instances.
     Handle<DescriptorArray> array_descriptors =
         factory()->CopyAppendForeignDescriptor(
@@ -1565,6 +1650,8 @@
 
     array_function->initial_map()->set_instance_descriptors(
         *array_descriptors);
+
+    global_context()->set_internal_array_function(*array_function);
   }
 
   if (FLAG_disable_native_files) {
@@ -1586,7 +1673,7 @@
   InstallNativeFunctions();
 
   // Store the map for the string prototype after the natives has been compiled
-  // and the String function has been setup.
+  // and the String function has been set up.
   Handle<JSFunction> string_function(global_context()->string_function());
   ASSERT(JSObject::cast(
       string_function->initial_map()->prototype())->HasFastProperties());
@@ -1648,15 +1735,17 @@
     initial_map->set_prototype(*array_prototype);
 
     // Update map with length accessor from Array and add "index" and "input".
-    Handle<Map> array_map(global_context()->js_array_map());
-    Handle<DescriptorArray> array_descriptors(
-        array_map->instance_descriptors());
-    ASSERT_EQ(1, array_descriptors->number_of_descriptors());
-
     Handle<DescriptorArray> reresult_descriptors =
         factory()->NewDescriptorArray(3);
+    DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
 
-    reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
+    JSFunction* array_function = global_context()->array_function();
+    Handle<DescriptorArray> array_descriptors(
+        array_function->initial_map()->instance_descriptors());
+    int index = array_descriptors->SearchWithCache(heap()->length_symbol());
+    MaybeObject* copy_result =
+        reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness);
+    if (copy_result->IsFailure()) return false;
 
     int enum_index = 0;
     {
@@ -1664,7 +1753,7 @@
                                   JSRegExpResult::kIndexIndex,
                                   NONE,
                                   enum_index++);
-      reresult_descriptors->Set(1, &index_field);
+      reresult_descriptors->Set(1, &index_field, witness);
     }
 
     {
@@ -1672,9 +1761,9 @@
                                   JSRegExpResult::kInputIndex,
                                   NONE,
                                   enum_index++);
-      reresult_descriptors->Set(2, &input_field);
+      reresult_descriptors->Set(2, &input_field, witness);
     }
-    reresult_descriptors->Sort();
+    reresult_descriptors->Sort(witness);
 
     initial_map->set_inobject_properties(2);
     initial_map->set_pre_allocated_property_fields(2);
@@ -1701,9 +1790,9 @@
                "native proxy.js") == 0) {
       if (!CompileExperimentalBuiltin(isolate(), i)) return false;
     }
-    if (FLAG_harmony_weakmaps &&
+    if (FLAG_harmony_collections &&
         strcmp(ExperimentalNatives::GetScriptName(i).start(),
-               "native weakmap.js") == 0) {
+               "native collection.js") == 0) {
       if (!CompileExperimentalBuiltin(isolate(), i)) return false;
     }
   }
@@ -1821,25 +1910,28 @@
 
 
 void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
-  Factory* factory = global_context->GetIsolate()->factory();
+  Isolate* isolate = global_context->GetIsolate();
+  Factory* factory = isolate->factory();
   HandleScope scope;
-  Handle<JSGlobalObject> js_global(
-      JSGlobalObject::cast(global_context->global()));
+  Handle<JSGlobalObject> global(JSGlobalObject::cast(global_context->global()));
   // Expose the natives in global if a name for it is specified.
   if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
-    Handle<String> natives_string =
-        factory->LookupAsciiSymbol(FLAG_expose_natives_as);
-    SetLocalPropertyNoThrow(js_global, natives_string,
-                            Handle<JSObject>(js_global->builtins()), DONT_ENUM);
+    Handle<String> natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as);
+    CHECK_NOT_EMPTY_HANDLE(isolate,
+                           JSObject::SetLocalPropertyIgnoreAttributes(
+                               global, natives,
+                               Handle<JSObject>(global->builtins()),
+                               DONT_ENUM));
   }
 
-  Handle<Object> Error = GetProperty(js_global, "Error");
+  Handle<Object> Error = GetProperty(global, "Error");
   if (Error->IsJSObject()) {
     Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit");
-    SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
-                            name,
-                            Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
-                            NONE);
+    Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit));
+    CHECK_NOT_EMPTY_HANDLE(isolate,
+                           JSObject::SetLocalPropertyIgnoreAttributes(
+                               Handle<JSObject>::cast(Error), name,
+                               stack_trace_limit, NONE));
   }
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1858,11 +1950,39 @@
     Handle<String> debug_string =
         factory->LookupAsciiSymbol(FLAG_expose_debug_as);
     Handle<Object> global_proxy(debug->debug_context()->global_proxy());
-    SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
+    CHECK_NOT_EMPTY_HANDLE(isolate,
+                           JSObject::SetLocalPropertyIgnoreAttributes(
+                               global, debug_string, global_proxy, DONT_ENUM));
   }
 #endif
 }
 
+static uint32_t Hash(RegisteredExtension* extension) {
+  return v8::internal::ComputePointerHash(extension);
+}
+
+static bool MatchRegisteredExtensions(void* key1, void* key2) {
+  return key1 == key2;
+}
+
+Genesis::ExtensionStates::ExtensionStates()
+  : map_(MatchRegisteredExtensions, 8) { }
+
+Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
+    RegisteredExtension* extension) {
+  i::HashMap::Entry* entry = map_.Lookup(extension, Hash(extension), false);
+  if (entry == NULL) {
+    return UNVISITED;
+  }
+  return static_cast<ExtensionTraversalState>(
+      reinterpret_cast<intptr_t>(entry->value));
+}
+
+void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
+                                         ExtensionTraversalState state) {
+  map_.Lookup(extension, Hash(extension), true)->value =
+      reinterpret_cast<void*>(static_cast<intptr_t>(state));
+}
 
 bool Genesis::InstallExtensions(Handle<Context> global_context,
                                 v8::ExtensionConfiguration* extensions) {
@@ -1870,29 +1990,27 @@
   //                 effort. (The external API reads 'ignore'-- does that mean
   //                 we can break the interface?)
 
-  // Clear coloring of extension list
+
+  ExtensionStates extension_states;  // All extensions have state UNVISITED.
+  // Install auto extensions.
   v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
   while (current != NULL) {
-    current->set_state(v8::UNVISITED);
-    current = current->next();
-  }
-  // Install auto extensions.
-  current = v8::RegisteredExtension::first_extension();
-  while (current != NULL) {
     if (current->extension()->auto_enable())
-      InstallExtension(current);
+      InstallExtension(current, &extension_states);
     current = current->next();
   }
 
-  if (FLAG_expose_gc) InstallExtension("v8/gc");
-  if (FLAG_expose_externalize_string) InstallExtension("v8/externalize");
+  if (FLAG_expose_gc) InstallExtension("v8/gc", &extension_states);
+  if (FLAG_expose_externalize_string) {
+    InstallExtension("v8/externalize", &extension_states);
+  }
 
   if (extensions == NULL) return true;
   // Install required extensions
   int count = v8::ImplementationUtilities::GetNameCount(extensions);
   const char** names = v8::ImplementationUtilities::GetNames(extensions);
   for (int i = 0; i < count; i++) {
-    if (!InstallExtension(names[i]))
+    if (!InstallExtension(names[i], &extension_states))
       return false;
   }
 
@@ -1902,7 +2020,8 @@
 
 // Installs a named extension.  This methods is unoptimized and does
 // not scale well if we want to support a large number of extensions.
-bool Genesis::InstallExtension(const char* name) {
+bool Genesis::InstallExtension(const char* name,
+                               ExtensionStates* extension_states) {
   v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
   // Loop until we find the relevant extension
   while (current != NULL) {
@@ -1915,42 +2034,52 @@
         "v8::Context::New()", "Cannot find required extension");
     return false;
   }
-  return InstallExtension(current);
+  return InstallExtension(current, extension_states);
 }
 
 
-bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
+bool Genesis::InstallExtension(v8::RegisteredExtension* current,
+                               ExtensionStates* extension_states) {
   HandleScope scope;
 
-  if (current->state() == v8::INSTALLED) return true;
+  if (extension_states->get_state(current) == INSTALLED) return true;
   // The current node has already been visited so there must be a
   // cycle in the dependency graph; fail.
-  if (current->state() == v8::VISITED) {
+  if (extension_states->get_state(current) == VISITED) {
     v8::Utils::ReportApiFailure(
         "v8::Context::New()", "Circular extension dependency");
     return false;
   }
-  ASSERT(current->state() == v8::UNVISITED);
-  current->set_state(v8::VISITED);
+  ASSERT(extension_states->get_state(current) == UNVISITED);
+  extension_states->set_state(current, VISITED);
   v8::Extension* extension = current->extension();
   // Install the extension's dependencies
   for (int i = 0; i < extension->dependency_count(); i++) {
-    if (!InstallExtension(extension->dependencies()[i])) return false;
+    if (!InstallExtension(extension->dependencies()[i], extension_states))
+      return false;
   }
   Isolate* isolate = Isolate::Current();
-  Vector<const char> source = CStrVector(extension->source());
-  Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
-  bool result = CompileScriptCached(CStrVector(extension->name()),
-                                    source_code,
-                                    isolate->bootstrapper()->extensions_cache(),
-                                    extension,
-                                    Handle<Context>(isolate->context()),
-                                    false);
+  Handle<String> source_code =
+      isolate->factory()->NewExternalStringFromAscii(extension->source());
+  bool result = CompileScriptCached(
+      CStrVector(extension->name()),
+      source_code,
+      isolate->bootstrapper()->extensions_cache(),
+      extension,
+      Handle<Context>(isolate->context()),
+      false);
   ASSERT(isolate->has_pending_exception() != result);
   if (!result) {
+    // We print out the name of the extension that fail to install.
+    // When an error is thrown during bootstrapping we automatically print
+    // the line number at which this happened to the console in the isolate
+    // error throwing functionality.
+    OS::PrintError("Error installing extension '%s'.\n",
+                   current->extension()->name());
     isolate->clear_pending_exception();
   }
-  current->set_state(v8::INSTALLED);
+  extension_states->set_state(current, INSTALLED);
+  isolate->NotifyExtensionInstalled();
   return result;
 }
 
@@ -1967,7 +2096,9 @@
     builtins->set_javascript_builtin(id, *function);
     Handle<SharedFunctionInfo> shared
         = Handle<SharedFunctionInfo>(function->shared());
-    if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+    if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+      return false;
+    }
     // Set the code object on the function object.
     function->ReplaceCode(function->shared()->code());
     builtins->set_javascript_builtin_code(id, shared->code());
@@ -2035,7 +2166,9 @@
           Handle<String> key = Handle<String>(descs->GetKey(i));
           int index = descs->GetFieldIndex(i);
           Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
-          SetLocalPropertyNoThrow(to, key, value, details.attributes());
+          CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+                                 JSObject::SetLocalPropertyIgnoreAttributes(
+                                     to, key, value, details.attributes()));
           break;
         }
         case CONSTANT_FUNCTION: {
@@ -2043,11 +2176,13 @@
           Handle<String> key = Handle<String>(descs->GetKey(i));
           Handle<JSFunction> fun =
               Handle<JSFunction>(descs->GetConstantFunction(i));
-          SetLocalPropertyNoThrow(to, key, fun, details.attributes());
+          CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+                                 JSObject::SetLocalPropertyIgnoreAttributes(
+                                     to, key, fun, details.attributes()));
           break;
         }
         case CALLBACKS: {
-          LookupResult result;
+          LookupResult result(isolate());
           to->LocalLookup(descs->GetKey(i), &result);
           // If the property is already there we skip it
           if (result.IsProperty()) continue;
@@ -2058,7 +2193,7 @@
           Handle<Object> callbacks(descs->GetCallbacksObject(i));
           PropertyDetails d =
               PropertyDetails(details.attributes(), CALLBACKS, details.index());
-          SetNormalizedProperty(to, key, callbacks, d);
+          JSObject::SetNormalizedProperty(to, key, callbacks, d);
           break;
         }
         case MAP_TRANSITION:
@@ -2085,7 +2220,7 @@
       if (properties->IsKey(raw_key)) {
         ASSERT(raw_key->IsString());
         // If the property is already there we skip it.
-        LookupResult result;
+        LookupResult result(isolate());
         to->LocalLookup(String::cast(raw_key), &result);
         if (result.IsProperty()) continue;
         // Set the property.
@@ -2095,7 +2230,9 @@
           value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
         }
         PropertyDetails details = properties->DetailsAt(i);
-        SetLocalPropertyNoThrow(to, key, value, details.attributes());
+        CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+                               JSObject::SetLocalPropertyIgnoreAttributes(
+                                   to, key, value, details.attributes()));
       }
     }
   }
@@ -2158,6 +2295,12 @@
   HandleScope scope;
   SaveContext saved_context(isolate);
 
+  // During genesis, the boilerplate for stack overflow won't work until the
+  // environment has been at least partially initialized. Add a stack check
+  // before entering JS code to catch overflow early.
+  StackLimitCheck check(Isolate::Current());
+  if (check.HasOverflowed()) return;
+
   Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
   if (!new_context.is_null()) {
     global_context_ =
@@ -2184,7 +2327,7 @@
     Handle<JSGlobalProxy> global_proxy =
         CreateNewGlobals(global_template, global_object, &inner_global);
     HookUpGlobalProxy(inner_global, global_proxy);
-    InitializeGlobal(inner_global, empty_function);
+    if (!InitializeGlobal(inner_global, empty_function)) return;
     InstallJSFunctionResultCaches();
     InitializeNormalizedMapCaches();
     if (!InstallNatives()) return;
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index abf61b9..101c2e1 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -88,7 +88,7 @@
 // context.
 class Bootstrapper {
  public:
-  // Requires: Heap::Setup has been called.
+  // Requires: Heap::SetUp has been called.
   void Initialize(bool create_heap_objects);
   void TearDown();
 
diff --git a/src/builtins.cc b/src/builtins.cc
index e6a0699..0f493e6 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,6 +33,8 @@
 #include "builtins.h"
 #include "gdb-jit.h"
 #include "ic-inl.h"
+#include "heap-profiler.h"
+#include "mark-compact.h"
 #include "vm-state-inl.h"
 
 namespace v8 {
@@ -183,39 +185,46 @@
 }
 
 
-BUILTIN(ArrayCodeGeneric) {
+static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
+                                           Isolate* isolate,
+                                           JSFunction* constructor) {
   Heap* heap = isolate->heap();
   isolate->counters()->array_function_runtime()->Increment();
 
   JSArray* array;
   if (CalledAsConstructor(isolate)) {
-    array = JSArray::cast(*args.receiver());
+    array = JSArray::cast((*args)[0]);
+    // Initialize elements and length in case later allocations fail so that the
+    // array object is initialized in a valid state.
+    array->set_length(Smi::FromInt(0));
+    array->set_elements(heap->empty_fixed_array());
+    if (!FLAG_smi_only_arrays) {
+      Context* global_context = isolate->context()->global_context();
+      if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
+          !global_context->object_js_array_map()->IsUndefined()) {
+        array->set_map(Map::cast(global_context->object_js_array_map()));
+      }
+    }
   } else {
     // Allocate the JS Array
-    JSFunction* constructor =
-        isolate->context()->global_context()->array_function();
-    Object* obj;
-    { MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
-      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    }
-    array = JSArray::cast(obj);
+    MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
+    if (!maybe_obj->To(&array)) return maybe_obj;
   }
 
-  // 'array' now contains the JSArray we should initialize.
-  ASSERT(array->HasFastElements());
-
   // Optimize the case where there is one argument and the argument is a
   // small smi.
-  if (args.length() == 2) {
-    Object* obj = args[1];
+  if (args->length() == 2) {
+    Object* obj = (*args)[1];
     if (obj->IsSmi()) {
       int len = Smi::cast(obj)->value();
       if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
-        Object* obj;
+        Object* fixed_array;
         { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
-          if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+          if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
         }
-        array->SetContent(FixedArray::cast(obj));
+        // We do not use SetContent to skip the unnecessary elements type check.
+        array->set_elements(FixedArray::cast(fixed_array));
+        array->set_length(Smi::cast(obj));
         return array;
       }
     }
@@ -223,78 +232,82 @@
     { MaybeObject* maybe_obj = array->Initialize(0);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
-    return array->SetElementsLength(args[1]);
+    return array->SetElementsLength((*args)[1]);
   }
 
   // Optimize the case where there are no parameters passed.
-  if (args.length() == 1) {
+  if (args->length() == 1) {
     return array->Initialize(JSArray::kPreallocatedArrayElements);
   }
 
-  // Take the arguments as elements.
-  int number_of_elements = args.length() - 1;
-  Smi* len = Smi::FromInt(number_of_elements);
-  Object* obj;
-  { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len->value());
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-
-  AssertNoAllocation no_gc;
-  FixedArray* elms = FixedArray::cast(obj);
-  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
-  // Fill in the content
-  for (int index = 0; index < number_of_elements; index++) {
-    elms->set(index, args[index+1], mode);
-  }
-
   // Set length and elements on the array.
-  array->set_elements(FixedArray::cast(obj));
-  array->set_length(len);
+  int number_of_elements = args->length() - 1;
+  MaybeObject* maybe_object =
+      array->EnsureCanContainElements(args, 1, number_of_elements,
+                                      ALLOW_CONVERTED_DOUBLE_ELEMENTS);
+  if (maybe_object->IsFailure()) return maybe_object;
 
+  // Allocate an appropriately typed elements array.
+  MaybeObject* maybe_elms;
+  ElementsKind elements_kind = array->GetElementsKind();
+  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+    maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
+        number_of_elements);
+  } else {
+    maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
+  }
+  FixedArrayBase* elms;
+  if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
+
+  // Fill in the content
+  switch (array->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS: {
+      FixedArray* smi_elms = FixedArray::cast(elms);
+      for (int index = 0; index < number_of_elements; index++) {
+        smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
+      }
+      break;
+    }
+    case FAST_ELEMENTS: {
+      AssertNoAllocation no_gc;
+      WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+      FixedArray* object_elms = FixedArray::cast(elms);
+      for (int index = 0; index < number_of_elements; index++) {
+        object_elms->set(index, (*args)[index+1], mode);
+      }
+      break;
+    }
+    case FAST_DOUBLE_ELEMENTS: {
+      FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
+      for (int index = 0; index < number_of_elements; index++) {
+        double_elms->set(index, (*args)[index+1]->Number());
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  array->set_elements(elms);
+  array->set_length(Smi::FromInt(number_of_elements));
   return array;
 }
 
 
-MUST_USE_RESULT static MaybeObject* AllocateJSArray(Heap* heap) {
-  JSFunction* array_function =
-      heap->isolate()->context()->global_context()->array_function();
-  Object* result;
-  { MaybeObject* maybe_result = heap->AllocateJSObject(array_function);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  return result;
+BUILTIN(InternalArrayCodeGeneric) {
+  return ArrayCodeGenericCommon(
+      &args,
+      isolate,
+      isolate->context()->global_context()->internal_array_function());
 }
 
 
-MUST_USE_RESULT static MaybeObject* AllocateEmptyJSArray(Heap* heap) {
-  Object* result;
-  { MaybeObject* maybe_result = AllocateJSArray(heap);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  JSArray* result_array = JSArray::cast(result);
-  result_array->set_length(Smi::FromInt(0));
-  result_array->set_elements(heap->empty_fixed_array());
-  return result_array;
-}
-
-
-static void CopyElements(Heap* heap,
-                         AssertNoAllocation* no_gc,
-                         FixedArray* dst,
-                         int dst_index,
-                         FixedArray* src,
-                         int src_index,
-                         int len) {
-  ASSERT(dst != src);  // Use MoveElements instead.
-  ASSERT(dst->map() != HEAP->fixed_cow_array_map());
-  ASSERT(len > 0);
-  CopyWords(dst->data_start() + dst_index,
-            src->data_start() + src_index,
-            len);
-  WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
-  if (mode == UPDATE_WRITE_BARRIER) {
-    heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
-  }
+BUILTIN(ArrayCodeGeneric) {
+  return ArrayCodeGenericCommon(
+      &args,
+      isolate,
+      isolate->context()->global_context()->array_function());
 }
 
 
@@ -305,6 +318,7 @@
                          FixedArray* src,
                          int src_index,
                          int len) {
+  if (len == 0) return;
   ASSERT(dst->map() != HEAP->fixed_cow_array_map());
   memmove(dst->data_start() + dst_index,
           src->data_start() + src_index,
@@ -313,6 +327,7 @@
   if (mode == UPDATE_WRITE_BARRIER) {
     heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
   }
+  heap->incremental_marking()->RecordWrites(dst);
 }
 
 
@@ -358,6 +373,16 @@
   former_start[to_trim] = heap->fixed_array_map();
   former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
 
+  // Maintain marking consistency for HeapObjectIterator and
+  // IncrementalMarking.
+  int size_delta = to_trim * kPointerSize;
+  if (heap->marking()->TransferMark(elms->address(),
+                                    elms->address() + size_delta)) {
+    MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
+  }
+
+  HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
+                                     elms->address() + size_delta));
   return FixedArray::cast(HeapObject::FromAddress(
       elms->address() + to_trim * kPointerSize));
 }
@@ -369,9 +394,6 @@
   // This method depends on non writability of Object and Array prototype
   // fields.
   if (array_proto->elements() != heap->empty_fixed_array()) return false;
-  // Hidden prototype
-  array_proto = JSObject::cast(array_proto->GetPrototype());
-  ASSERT(array_proto->elements() == heap->empty_fixed_array());
   // Object.prototype
   Object* proto = array_proto->GetPrototype();
   if (proto == heap->null_value()) return false;
@@ -384,20 +406,43 @@
 
 MUST_USE_RESULT
 static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
-    Heap* heap, Object* receiver) {
+    Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
   if (!receiver->IsJSArray()) return NULL;
   JSArray* array = JSArray::cast(receiver);
   HeapObject* elms = array->elements();
-  if (elms->map() == heap->fixed_array_map()) return elms;
-  if (elms->map() == heap->fixed_cow_array_map()) {
-    return array->EnsureWritableFastElements();
+  Map* map = elms->map();
+  if (map == heap->fixed_array_map()) {
+    if (args == NULL || !array->HasFastSmiOnlyElements()) {
+      return elms;
+    }
+  } else if (map == heap->fixed_cow_array_map()) {
+    MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
+    if (args == NULL || !array->HasFastSmiOnlyElements() ||
+        maybe_writable_result->IsFailure()) {
+      return maybe_writable_result;
+    }
+  } else {
+    return NULL;
   }
-  return NULL;
+
+  // Need to ensure that the arguments passed in args can be contained in
+  // the array.
+  int args_length = args->length();
+  if (first_added_arg >= args_length) return array->elements();
+
+  MaybeObject* maybe_array = array->EnsureCanContainElements(
+      args,
+      first_added_arg,
+      args_length - first_added_arg,
+      DONT_ALLOW_DOUBLE_ELEMENTS);
+  if (maybe_array->IsFailure()) return maybe_array;
+  return array->elements();
 }
 
 
 static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
                                                      JSArray* receiver) {
+  if (!FLAG_clever_optimizations) return false;
   Context* global_context = heap->isolate()->context()->global_context();
   JSObject* array_proto =
       JSObject::cast(global_context->array_function()->prototype());
@@ -413,20 +458,18 @@
   HandleScope handleScope(isolate);
 
   Handle<Object> js_builtin =
-      GetProperty(Handle<JSObject>(
-          isolate->global_context()->builtins()),
-          name);
-  ASSERT(js_builtin->IsJSFunction());
-  Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
-  ScopedVector<Object**> argv(args.length() - 1);
-  int n_args = args.length() - 1;
-  for (int i = 0; i < n_args; i++) {
-    argv[i] = args.at<Object>(i + 1).location();
+      GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
+                  name);
+  Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
+  int argc = args.length() - 1;
+  ScopedVector<Handle<Object> > argv(argc);
+  for (int i = 0; i < argc; ++i) {
+    argv[i] = args.at<Object>(i + 1);
   }
-  bool pending_exception = false;
+  bool pending_exception;
   Handle<Object> result = Execution::Call(function,
                                           args.receiver(),
-                                          n_args,
+                                          argc,
                                           argv.start(),
                                           &pending_exception);
   if (pending_exception) return Failure::Exception();
@@ -439,7 +482,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver);
+        EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
     if (maybe_elms_obj == NULL) {
       return CallJsBuiltin(isolate, "ArrayPush", args);
     }
@@ -468,14 +511,11 @@
     }
     FixedArray* new_elms = FixedArray::cast(obj);
 
-    AssertNoAllocation no_gc;
-    if (len > 0) {
-      CopyElements(heap, &no_gc, new_elms, 0, elms, 0, len);
-    }
+    CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
+                               new_elms, FAST_ELEMENTS, 0, len);
     FillWithHoles(heap, new_elms, new_length, capacity);
 
     elms = new_elms;
-    array->set_elements(elms);
   }
 
   // Add the provided values.
@@ -485,6 +525,10 @@
     elms->set(index + len, args[index + 1], mode);
   }
 
+  if (elms != array->elements()) {
+    array->set_elements(elms);
+  }
+
   // Set the length.
   array->set_length(Smi::FromInt(new_length));
   return Smi::FromInt(new_length);
@@ -496,7 +540,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver);
+        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
     if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
   }
@@ -529,7 +573,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver);
+        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
     if (maybe_elms_obj == NULL)
         return CallJsBuiltin(isolate, "ArrayShift", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -539,7 +583,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return heap->undefined_value();
@@ -551,9 +595,7 @@
   }
 
   if (!heap->lo_space()->Contains(elms)) {
-    // As elms still in the same space they used to be,
-    // there is no need to update region dirty mark.
-    array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
+    array->set_elements(LeftTrimFixedArray(heap, elms, 1));
   } else {
     // Shift the elements.
     AssertNoAllocation no_gc;
@@ -573,7 +615,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver);
+        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
     if (maybe_elms_obj == NULL)
         return CallJsBuiltin(isolate, "ArrayUnshift", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -583,7 +625,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   int len = Smi::cast(array->length())->value();
   int to_add = args.length() - 1;
@@ -592,6 +634,11 @@
   // we should never hit this case.
   ASSERT(to_add <= (Smi::kMaxValue - len));
 
+  MaybeObject* maybe_object =
+      array->EnsureCanContainElements(&args, 1, to_add,
+                                      DONT_ALLOW_DOUBLE_ELEMENTS);
+  if (maybe_object->IsFailure()) return maybe_object;
+
   if (new_length > elms->length()) {
     // New backing storage is needed.
     int capacity = new_length + (new_length >> 1) + 16;
@@ -600,13 +647,9 @@
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* new_elms = FixedArray::cast(obj);
-
-    AssertNoAllocation no_gc;
-    if (len > 0) {
-      CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
-    }
+    CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
+                               new_elms, FAST_ELEMENTS, to_add, len);
     FillWithHoles(heap, new_elms, new_length, capacity);
-
     elms = new_elms;
     array->set_elements(elms);
   } else {
@@ -634,7 +677,7 @@
   int len = -1;
   if (receiver->IsJSArray()) {
     JSArray* array = JSArray::cast(receiver);
-    if (!array->HasFastElements() ||
+    if (!array->HasFastTypeElements() ||
         !IsJSArrayFastElementMovingAllowed(heap, array)) {
       return CallJsBuiltin(isolate, "ArraySlice", args);
     }
@@ -650,7 +693,7 @@
     bool is_arguments_object_with_fast_elements =
         receiver->IsJSObject()
         && JSObject::cast(receiver)->map() == arguments_map
-        && JSObject::cast(receiver)->HasFastElements();
+        && JSObject::cast(receiver)->HasFastTypeElements();
     if (!is_arguments_object_with_fast_elements) {
       return CallJsBuiltin(isolate, "ArraySlice", args);
     }
@@ -703,32 +746,22 @@
   int final = (relative_end < 0) ? Max(len + relative_end, 0)
                                  : Min(relative_end, len);
 
+  ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
+
   // Calculate the length of result array.
-  int result_len = final - k;
-  if (result_len <= 0) {
-    return AllocateEmptyJSArray(heap);
-  }
+  int result_len = Max(final - k, 0);
 
-  Object* result;
-  { MaybeObject* maybe_result = AllocateJSArray(heap);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  JSArray* result_array = JSArray::cast(result);
+  MaybeObject* maybe_array =
+      heap->AllocateJSArrayAndStorage(elements_kind,
+                                      result_len,
+                                      result_len);
+  JSArray* result_array;
+  if (!maybe_array->To(&result_array)) return maybe_array;
 
-  { MaybeObject* maybe_result =
-        heap->AllocateUninitializedFixedArray(result_len);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  FixedArray* result_elms = FixedArray::cast(result);
+  CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
+                             FixedArray::cast(result_array->elements()),
+                             FAST_ELEMENTS, 0, result_len);
 
-  AssertNoAllocation no_gc;
-  CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
-
-  // Set elements.
-  result_array->set_elements(result_elms);
-
-  // Set the length.
-  result_array->set_length(Smi::FromInt(result_len));
   return result_array;
 }
 
@@ -738,7 +771,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver);
+        EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
     if (maybe_elms_obj == NULL)
         return CallJsBuiltin(isolate, "ArraySplice", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -748,7 +781,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   int len = Smi::cast(array->length())->value();
 
@@ -789,45 +822,25 @@
   }
 
   JSArray* result_array = NULL;
-  if (actual_delete_count == 0) {
-    Object* result;
-    { MaybeObject* maybe_result = AllocateEmptyJSArray(heap);
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-    result_array = JSArray::cast(result);
-  } else {
-    // Allocate result array.
-    Object* result;
-    { MaybeObject* maybe_result = AllocateJSArray(heap);
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-    result_array = JSArray::cast(result);
+  ElementsKind elements_kind =
+      JSObject::cast(receiver)->GetElementsKind();
+  MaybeObject* maybe_array =
+      heap->AllocateJSArrayAndStorage(elements_kind,
+                                      actual_delete_count,
+                                      actual_delete_count);
+  if (!maybe_array->To(&result_array)) return maybe_array;
 
-    { MaybeObject* maybe_result =
-          heap->AllocateUninitializedFixedArray(actual_delete_count);
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-    FixedArray* result_elms = FixedArray::cast(result);
-
-    AssertNoAllocation no_gc;
+  {
     // Fill newly created array.
-    CopyElements(heap,
-                 &no_gc,
-                 result_elms, 0,
-                 elms, actual_start,
-                 actual_delete_count);
-
-    // Set elements.
-    result_array->set_elements(result_elms);
-
-    // Set the length.
-    result_array->set_length(Smi::FromInt(actual_delete_count));
+    CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
+                               FixedArray::cast(result_array->elements()),
+                               FAST_ELEMENTS, 0, actual_delete_count);
   }
 
   int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
-
   int new_length = len - actual_delete_count + item_count;
 
+  bool elms_changed = false;
   if (item_count < actual_delete_count) {
     // Shrink the array.
     const bool trim_array = !heap->lo_space()->Contains(elms) &&
@@ -836,13 +849,14 @@
     if (trim_array) {
       const int delta = actual_delete_count - item_count;
 
-      if (actual_start > 0) {
+      {
         AssertNoAllocation no_gc;
         MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
       }
 
       elms = LeftTrimFixedArray(heap, elms, delta);
-      array->set_elements(elms, SKIP_WRITE_BARRIER);
+
+      elms_changed = true;
     } else {
       AssertNoAllocation no_gc;
       MoveElements(heap, &no_gc,
@@ -867,22 +881,21 @@
       }
       FixedArray* new_elms = FixedArray::cast(obj);
 
-      AssertNoAllocation no_gc;
-      // Copy the part before actual_start as is.
-      if (actual_start > 0) {
-        CopyElements(heap, &no_gc, new_elms, 0, elms, 0, actual_start);
+      {
+        // Copy the part before actual_start as is.
+        CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
+                                   new_elms, FAST_ELEMENTS, 0, actual_start);
+        const int to_copy = len - actual_delete_count - actual_start;
+        CopyObjectToObjectElements(elms, FAST_ELEMENTS,
+                                   actual_start + actual_delete_count,
+                                   new_elms, FAST_ELEMENTS,
+                                   actual_start + item_count, to_copy);
       }
-      const int to_copy = len - actual_delete_count - actual_start;
-      if (to_copy > 0) {
-        CopyElements(heap, &no_gc,
-                     new_elms, actual_start + item_count,
-                     elms, actual_start + actual_delete_count,
-                     to_copy);
-      }
+
       FillWithHoles(heap, new_elms, new_length, capacity);
 
       elms = new_elms;
-      array->set_elements(elms);
+      elms_changed = true;
     } else {
       AssertNoAllocation no_gc;
       MoveElements(heap, &no_gc,
@@ -898,6 +911,10 @@
     elms->set(k, args[3 + k - actual_start], mode);
   }
 
+  if (elms_changed) {
+    array->set_elements(elms);
+  }
+
   // Set the length.
   array->set_length(Smi::FromInt(new_length));
 
@@ -918,9 +935,10 @@
   // and calculating total length.
   int n_arguments = args.length();
   int result_len = 0;
+  ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
   for (int i = 0; i < n_arguments; i++) {
     Object* arg = args[i];
-    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
+    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
         || JSArray::cast(arg)->GetPrototype() != array_proto) {
       return CallJsBuiltin(isolate, "ArrayConcat", args);
     }
@@ -937,43 +955,35 @@
     if (result_len > FixedArray::kMaxLength) {
       return CallJsBuiltin(isolate, "ArrayConcat", args);
     }
-  }
 
-  if (result_len == 0) {
-    return AllocateEmptyJSArray(heap);
+    if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) {
+      elements_kind = FAST_ELEMENTS;
+    }
   }
 
   // Allocate result.
-  Object* result;
-  { MaybeObject* maybe_result = AllocateJSArray(heap);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  JSArray* result_array = JSArray::cast(result);
-
-  { MaybeObject* maybe_result =
-        heap->AllocateUninitializedFixedArray(result_len);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  FixedArray* result_elms = FixedArray::cast(result);
+  JSArray* result_array;
+  MaybeObject* maybe_array =
+      heap->AllocateJSArrayAndStorage(elements_kind,
+                                      result_len,
+                                      result_len);
+  if (!maybe_array->To(&result_array)) return maybe_array;
+  if (result_len == 0) return result_array;
 
   // Copy data.
-  AssertNoAllocation no_gc;
   int start_pos = 0;
+  FixedArray* result_elms(FixedArray::cast(result_array->elements()));
   for (int i = 0; i < n_arguments; i++) {
     JSArray* array = JSArray::cast(args[i]);
     int len = Smi::cast(array->length())->value();
-    if (len > 0) {
-      FixedArray* elms = FixedArray::cast(array->elements());
-      CopyElements(heap, &no_gc, result_elms, start_pos, elms, 0, len);
-      start_pos += len;
-    }
+    FixedArray* elms = FixedArray::cast(array->elements());
+    CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
+                               result_elms, FAST_ELEMENTS,
+                               start_pos, len);
+    start_pos += len;
   }
   ASSERT(start_pos == result_len);
 
-  // Set the length and elements.
-  result_array->set_length(Smi::FromInt(result_len));
-  result_array->set_elements(result_elms);
-
   return result_array;
 }
 
@@ -1448,6 +1458,14 @@
   KeyedStoreIC::GenerateNonStrictArguments(masm);
 }
 
+static void Generate_TransitionElementsSmiToDouble(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateTransitionElementsSmiToDouble(masm);
+}
+
+static void Generate_TransitionElementsDoubleToObject(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateTransitionElementsDoubleToObject(masm);
+}
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
   Debug::GenerateLoadICDebugBreak(masm);
@@ -1469,18 +1487,30 @@
 }
 
 
-static void Generate_ConstructCall_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateConstructCallDebugBreak(masm);
-}
-
-
 static void Generate_Return_DebugBreak(MacroAssembler* masm) {
   Debug::GenerateReturnDebugBreak(masm);
 }
 
 
-static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateStubNoRegistersDebugBreak(masm);
+static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateCallFunctionStubDebugBreak(masm);
+}
+
+
+static void Generate_CallFunctionStub_Recording_DebugBreak(
+    MacroAssembler* masm) {
+  Debug::GenerateCallFunctionStubRecordDebugBreak(masm);
+}
+
+
+static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateCallConstructStubDebugBreak(masm);
+}
+
+
+static void Generate_CallConstructStub_Recording_DebugBreak(
+    MacroAssembler* masm) {
+  Debug::GenerateCallConstructStubRecordDebugBreak(masm);
 }
 
 
@@ -1537,30 +1567,30 @@
   BuiltinExtraArguments extra_args;
 };
 
+#define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
+
 class BuiltinFunctionTable {
  public:
-  BuiltinFunctionTable() {
-    Builtins::InitBuiltinFunctionTable();
+  BuiltinDesc* functions() {
+    CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
+    return functions_;
   }
 
-  static const BuiltinDesc* functions() { return functions_; }
-
- private:
-  static BuiltinDesc functions_[Builtins::builtin_count + 1];
+  OnceType once_;
+  BuiltinDesc functions_[Builtins::builtin_count + 1];
 
   friend class Builtins;
 };
 
-BuiltinDesc BuiltinFunctionTable::functions_[Builtins::builtin_count + 1];
-
-static const BuiltinFunctionTable builtin_function_table_init;
+static BuiltinFunctionTable builtin_function_table =
+    BUILTIN_FUNCTION_TABLE_INIT;
 
 // Define array of pointers to generators and C builtin functions.
 // We do this in a sort of roundabout way so that we can do the initialization
 // within the lexical scope of Builtins:: and within a context where
 // Code::Flags names a non-abstract type.
 void Builtins::InitBuiltinFunctionTable() {
-  BuiltinDesc* functions = BuiltinFunctionTable::functions_;
+  BuiltinDesc* functions = builtin_function_table.functions_;
   functions[builtin_count].generator = NULL;
   functions[builtin_count].c_code = NULL;
   functions[builtin_count].s_name = NULL;
@@ -1596,7 +1626,7 @@
 #undef DEF_FUNCTION_PTR_A
 }
 
-void Builtins::Setup(bool create_heap_objects) {
+void Builtins::SetUp(bool create_heap_objects) {
   ASSERT(!initialized_);
   Isolate* isolate = Isolate::Current();
   Heap* heap = isolate->heap();
@@ -1604,23 +1634,25 @@
   // Create a scope for the handles in the builtins.
   HandleScope scope(isolate);
 
-  const BuiltinDesc* functions = BuiltinFunctionTable::functions();
+  const BuiltinDesc* functions = builtin_function_table.functions();
 
   // For now we generate builtin adaptor code into a stack-allocated
-  // buffer, before copying it into individual code objects.
-  byte buffer[4*KB];
+  // buffer, before copying it into individual code objects. Be careful
+  // with alignment, some platforms don't like unaligned code.
+  union { int force_alignment; byte buffer[4*KB]; } u;
 
   // Traverse the list of builtins and generate an adaptor in a
   // separate code object for each one.
   for (int i = 0; i < builtin_count; i++) {
     if (create_heap_objects) {
-      MacroAssembler masm(isolate, buffer, sizeof buffer);
+      MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
       // Generate the code/adaptor.
       typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
       Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
       // We pass all arguments to the generator, but it may not use all of
       // them.  This works because the first arguments are on top of the
       // stack.
+      ASSERT(!masm.has_frame());
       g(&masm, functions[i].name, functions[i].extra_args);
       // Move the code into the object heap.
       CodeDesc desc;
diff --git a/src/builtins.h b/src/builtins.h
index 31090d3..f079139 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -44,6 +44,7 @@
                                                                     \
   V(EmptyFunction, NO_EXTRA_ARGUMENTS)                              \
                                                                     \
+  V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS)                   \
   V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS)                           \
                                                                     \
   V(ArrayPush, NO_EXTRA_ARGUMENTS)                                  \
@@ -66,8 +67,6 @@
 #define BUILTIN_LIST_A(V)                                               \
   V(ArgumentsAdaptorTrampoline,     BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
-  V(JSConstructCall,                BUILTIN, UNINITIALIZED,             \
-                                    Code::kNoExtraICState)              \
   V(JSConstructStubCountdown,       BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
   V(JSConstructStubGeneric,         BUILTIN, UNINITIALIZED,             \
@@ -167,6 +166,10 @@
                                     kStrictMode)                        \
   V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC,       \
                                      Code::kNoExtraICState)             \
+  V(TransitionElementsSmiToDouble,  BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(TransitionElementsDoubleToObject, BUILTIN, UNINITIALIZED,           \
+                                      Code::kNoExtraICState)            \
                                                                         \
   /* Uses KeyedLoadIC_Initialize; must be after in list. */             \
   V(FunctionCall,                   BUILTIN, UNINITIALIZED,             \
@@ -174,6 +177,8 @@
   V(FunctionApply,                  BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
                                                                         \
+  V(InternalArrayCode,              BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
   V(ArrayCode,                      BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
   V(ArrayConstructCode,             BUILTIN, UNINITIALIZED,             \
@@ -188,27 +193,31 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 // Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V)                                \
-  V(Return_DebugBreak,          BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(ConstructCall_DebugBreak,   BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(LoadIC_DebugBreak,          LOAD_IC, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(KeyedLoadIC_DebugBreak,     KEYED_LOAD_IC, DEBUG_BREAK,    \
-                                Code::kNoExtraICState)         \
-  V(StoreIC_DebugBreak,         STORE_IC, DEBUG_BREAK,         \
-                                Code::kNoExtraICState)         \
-  V(KeyedStoreIC_DebugBreak,    KEYED_STORE_IC, DEBUG_BREAK,   \
-                                Code::kNoExtraICState)         \
-  V(Slot_DebugBreak,            BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(PlainReturn_LiveEdit,       BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(FrameDropper_LiveEdit,      BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)
+#define BUILTIN_LIST_DEBUG_A(V)                                 \
+  V(Return_DebugBreak,                         BUILTIN, DEBUG_BREAK,          \
+                                               Code::kNoExtraICState)         \
+  V(CallFunctionStub_DebugBreak,               BUILTIN, DEBUG_BREAK,          \
+                                               Code::kNoExtraICState)         \
+  V(CallFunctionStub_Recording_DebugBreak,     BUILTIN, DEBUG_BREAK,          \
+                                               Code::kNoExtraICState)         \
+  V(CallConstructStub_DebugBreak,              BUILTIN, DEBUG_BREAK,          \
+                                               Code::kNoExtraICState)         \
+  V(CallConstructStub_Recording_DebugBreak,    BUILTIN, DEBUG_BREAK,          \
+                                               Code::kNoExtraICState)         \
+  V(LoadIC_DebugBreak,                         LOAD_IC, DEBUG_BREAK,          \
+                                               Code::kNoExtraICState)         \
+  V(KeyedLoadIC_DebugBreak,                    KEYED_LOAD_IC, DEBUG_BREAK,    \
+                                               Code::kNoExtraICState)         \
+  V(StoreIC_DebugBreak,                        STORE_IC, DEBUG_BREAK,         \
+                                               Code::kNoExtraICState)         \
+  V(KeyedStoreIC_DebugBreak,                   KEYED_STORE_IC, DEBUG_BREAK,   \
+                                               Code::kNoExtraICState)         \
+  V(Slot_DebugBreak,                           BUILTIN, DEBUG_BREAK,          \
+                                               Code::kNoExtraICState)         \
+  V(PlainReturn_LiveEdit,                      BUILTIN, DEBUG_BREAK,          \
+                                               Code::kNoExtraICState)         \
+  V(FrameDropper_LiveEdit,                     BUILTIN, DEBUG_BREAK,          \
+                                               Code::kNoExtraICState)
 #else
 #define BUILTIN_LIST_DEBUG_A(V)
 #endif
@@ -234,7 +243,6 @@
   V(DELETE, 2)                           \
   V(IN, 1)                               \
   V(INSTANCE_OF, 1)                      \
-  V(GET_KEYS, 0)                         \
   V(FILTER_KEY, 1)                       \
   V(CALL_NON_FUNCTION, 0)                \
   V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
@@ -259,7 +267,7 @@
 
   // Generate all builtin code objects. Should be called once during
   // isolate initialization.
-  void Setup(bool create_heap_objects);
+  void SetUp(bool create_heap_objects);
   void TearDown();
 
   // Garbage collection support.
@@ -340,7 +348,6 @@
   static void Generate_Adaptor(MacroAssembler* masm,
                                CFunctionId id,
                                BuiltinExtraArguments extra_args);
-  static void Generate_JSConstructCall(MacroAssembler* masm);
   static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
   static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
   static void Generate_JSConstructStubApi(MacroAssembler* masm);
@@ -356,6 +363,7 @@
   static void Generate_FunctionCall(MacroAssembler* masm);
   static void Generate_FunctionApply(MacroAssembler* masm);
 
+  static void Generate_InternalArrayCode(MacroAssembler* masm);
   static void Generate_ArrayCode(MacroAssembler* masm);
   static void Generate_ArrayConstructCode(MacroAssembler* masm);
 
diff --git a/src/bytecodes-irregexp.h b/src/bytecodes-irregexp.h
index 93218ea..b13efb3 100644
--- a/src/bytecodes-irregexp.h
+++ b/src/bytecodes-irregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,12 +33,12 @@
 namespace internal {
 
 
-static const int BYTECODE_MASK = 0xff;
+const int BYTECODE_MASK = 0xff;
 // The first argument is packed in with the byte code in one word, but so it
 // has 24 bits, but it can be positive and negative so only use 23 bits for
 // positive values.
-static const unsigned int MAX_FIRST_ARG = 0x7fffffu;
-static const int BYTECODE_SHIFT = 8;
+const unsigned int MAX_FIRST_ARG = 0x7fffffu;
+const int BYTECODE_SHIFT = 8;
 
 #define BYTECODE_ITERATOR(V)                                                   \
 V(BREAK,              0, 4)   /* bc8                                        */ \
diff --git a/src/cached-powers.cc b/src/cached-powers.cc
index 30a67a6..9241d26 100644
--- a/src/cached-powers.cc
+++ b/src/cached-powers.cc
@@ -134,14 +134,12 @@
 };
 
 static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
-static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent;
+static const int kCachedPowersOffset = 348;  // -1 * the first decimal_exponent.
 static const double kD_1_LOG2_10 = 0.30102999566398114;  //  1 / lg(10)
-const int PowersOfTenCache::kDecimalExponentDistance =
-    kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent;
-const int PowersOfTenCache::kMinDecimalExponent =
-    kCachedPowers[0].decimal_exponent;
-const int PowersOfTenCache::kMaxDecimalExponent =
-    kCachedPowers[kCachedPowersLength - 1].decimal_exponent;
+// Difference between the decimal exponents in the table above.
+const int PowersOfTenCache::kDecimalExponentDistance = 8;
+const int PowersOfTenCache::kMinDecimalExponent = -348;
+const int PowersOfTenCache::kMaxDecimalExponent = 340;
 
 void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
     int min_exponent,
diff --git a/src/char-predicates-inl.h b/src/char-predicates-inl.h
index 0dfc80d..1a89ef3 100644
--- a/src/char-predicates-inl.h
+++ b/src/char-predicates-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,7 +52,7 @@
 }
 
 
-static inline bool IsInRange(int value, int lower_limit, int higher_limit) {
+inline bool IsInRange(int value, int lower_limit, int higher_limit) {
   ASSERT(lower_limit <= higher_limit);
   return static_cast<unsigned int>(value - lower_limit) <=
       static_cast<unsigned int>(higher_limit - lower_limit);
diff --git a/src/char-predicates.h b/src/char-predicates.h
index 5a901a2..b97191f 100644
--- a/src/char-predicates.h
+++ b/src/char-predicates.h
@@ -57,6 +57,8 @@
   static inline bool Is(uc32 c) {
     return IdentifierStart::Is(c)
         || unibrow::Number::Is(c)
+        || c == 0x200C  // U+200C is Zero-Width Non-Joiner.
+        || c == 0x200D  // U+200D is Zero-Width Joiner.
         || unibrow::CombiningMark::Is(c)
         || unibrow::ConnectorPunctuation::Is(c);
   }
diff --git a/src/checks.h b/src/checks.h
index 2f359f6..608aa14 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -51,26 +51,20 @@
 #endif
 
 
-// Used by the CHECK macro -- should not be called directly.
-static inline void CheckHelper(const char* file,
-                               int line,
-                               const char* source,
-                               bool condition) {
-  if (!condition)
-    V8_Fatal(file, line, "CHECK(%s) failed", source);
-}
-
-
 // The CHECK macro checks that the given condition is true; if not, it
 // prints a message to stderr and aborts.
-#define CHECK(condition) CheckHelper(__FILE__, __LINE__, #condition, condition)
+#define CHECK(condition) do {                                       \
+    if (!(condition)) {                                             \
+      V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \
+    }                                                               \
+  } while (0)
 
 
 // Helper function used by the CHECK_EQ function when given int
 // arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
-                                     const char* expected_source, int expected,
-                                     const char* value_source, int value) {
+inline void CheckEqualsHelper(const char* file, int line,
+                              const char* expected_source, int expected,
+                              const char* value_source, int value) {
   if (expected != value) {
     V8_Fatal(file, line,
              "CHECK_EQ(%s, %s) failed\n#   Expected: %i\n#   Found: %i",
@@ -81,11 +75,11 @@
 
 // Helper function used by the CHECK_EQ function when given int64_t
 // arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
-                                     const char* expected_source,
-                                     int64_t expected,
-                                     const char* value_source,
-                                     int64_t value) {
+inline void CheckEqualsHelper(const char* file, int line,
+                              const char* expected_source,
+                              int64_t expected,
+                              const char* value_source,
+                              int64_t value) {
   if (expected != value) {
     // Print int64_t values in hex, as two int32s,
     // to avoid platform-dependencies.
@@ -103,12 +97,12 @@
 
 // Helper function used by the CHECK_NE function when given int
 // arguments.  Should not be called directly.
-static inline void CheckNonEqualsHelper(const char* file,
-                                        int line,
-                                        const char* unexpected_source,
-                                        int unexpected,
-                                        const char* value_source,
-                                        int value) {
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* unexpected_source,
+                                 int unexpected,
+                                 const char* value_source,
+                                 int value) {
   if (unexpected == value) {
     V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %i",
              unexpected_source, value_source, value);
@@ -118,12 +112,12 @@
 
 // Helper function used by the CHECK function when given string
 // arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
-                                     int line,
-                                     const char* expected_source,
-                                     const char* expected,
-                                     const char* value_source,
-                                     const char* value) {
+inline void CheckEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              const char* expected,
+                              const char* value_source,
+                              const char* value) {
   if ((expected == NULL && value != NULL) ||
       (expected != NULL && value == NULL) ||
       (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
@@ -134,12 +128,12 @@
 }
 
 
-static inline void CheckNonEqualsHelper(const char* file,
-                                        int line,
-                                        const char* expected_source,
-                                        const char* expected,
-                                        const char* value_source,
-                                        const char* value) {
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* expected_source,
+                                 const char* expected,
+                                 const char* value_source,
+                                 const char* value) {
   if (expected == value ||
       (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
     V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %s",
@@ -150,12 +144,12 @@
 
 // Helper function used by the CHECK function when given pointer
 // arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
-                                     int line,
-                                     const char* expected_source,
-                                     const void* expected,
-                                     const char* value_source,
-                                     const void* value) {
+inline void CheckEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              const void* expected,
+                              const char* value_source,
+                              const void* value) {
   if (expected != value) {
     V8_Fatal(file, line,
              "CHECK_EQ(%s, %s) failed\n#   Expected: %p\n#   Found: %p",
@@ -165,12 +159,12 @@
 }
 
 
-static inline void CheckNonEqualsHelper(const char* file,
-                                        int line,
-                                        const char* expected_source,
-                                        const void* expected,
-                                        const char* value_source,
-                                        const void* value) {
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* expected_source,
+                                 const void* expected,
+                                 const char* value_source,
+                                 const void* value) {
   if (expected == value) {
     V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %p",
              expected_source, value_source, value);
@@ -180,12 +174,12 @@
 
 // Helper function used by the CHECK function when given floating
 // point arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
-                                     int line,
-                                     const char* expected_source,
-                                     double expected,
-                                     const char* value_source,
-                                     double value) {
+inline void CheckEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              double expected,
+                              const char* value_source,
+                              double value) {
   // Force values to 64 bit memory to truncate 80 bit precision on IA32.
   volatile double* exp = new double[1];
   *exp = expected;
@@ -201,12 +195,12 @@
 }
 
 
-static inline void CheckNonEqualsHelper(const char* file,
-                                     int line,
-                                     const char* expected_source,
-                                     double expected,
-                                     const char* value_source,
-                                     double value) {
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* expected_source,
+                                 double expected,
+                                 const char* value_source,
+                                 double value) {
   // Force values to 64 bit memory to truncate 80 bit precision on IA32.
   volatile double* exp = new double[1];
   *exp = expected;
@@ -257,25 +251,22 @@
     SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
 
 
-namespace v8 { namespace internal {
+extern bool FLAG_enable_slow_asserts;
 
-bool EnableSlowAsserts();
-
-} }  // namespace v8::internal
 
 // The ASSERT macro is equivalent to CHECK except that it only
 // generates code in debug builds.
 #ifdef DEBUG
-#define ASSERT_RESULT(expr)  CHECK(expr)
-#define ASSERT(condition)    CHECK(condition)
-#define ASSERT_EQ(v1, v2)    CHECK_EQ(v1, v2)
-#define ASSERT_NE(v1, v2)    CHECK_NE(v1, v2)
-#define ASSERT_GE(v1, v2)    CHECK_GE(v1, v2)
-#define ASSERT_LT(v1, v2)    CHECK_LT(v1, v2)
-#define ASSERT_LE(v1, v2)    CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
+#define ASSERT_RESULT(expr)    CHECK(expr)
+#define ASSERT(condition)      CHECK(condition)
+#define ASSERT_EQ(v1, v2)      CHECK_EQ(v1, v2)
+#define ASSERT_NE(v1, v2)      CHECK_NE(v1, v2)
+#define ASSERT_GE(v1, v2)      CHECK_GE(v1, v2)
+#define ASSERT_LT(v1, v2)      CHECK_LT(v1, v2)
+#define ASSERT_LE(v1, v2)      CHECK_LE(v1, v2)
+#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
 #else
-#define ASSERT_RESULT(expr)     (expr)
+#define ASSERT_RESULT(expr)    (expr)
 #define ASSERT(condition)      ((void) 0)
 #define ASSERT_EQ(v1, v2)      ((void) 0)
 #define ASSERT_NE(v1, v2)      ((void) 0)
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 724445e..11016c8 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,11 +52,12 @@
   // Update the static counter each time a new code stub is generated.
   masm->isolate()->counters()->code_stubs()->Increment();
 
-  // Nested stubs are not allowed for leafs.
-  AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
+  // Nested stubs are not allowed for leaves.
+  AllowStubCallsScope allow_scope(masm, false);
 
   // Generate the code for the stub.
   masm->set_generating_stub(true);
+  NoCurrentFrameScope scope(masm);
   Generate(masm);
 }
 
@@ -100,7 +101,14 @@
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
   Code* code;
-  if (!FindCodeInCache(&code)) {
+  if (UseSpecialCache()
+      ? FindCodeInSpecialCache(&code)
+      : FindCodeInCache(&code)) {
+    ASSERT(IsPregenerated() == code->is_pregenerated());
+    return Handle<Code>(code);
+  }
+
+  {
     HandleScope scope(isolate);
 
     // Generate the new code.
@@ -118,61 +126,28 @@
     Handle<Code> new_object = factory->NewCode(
         desc, flags, masm.CodeObject(), NeedsImmovableCode());
     RecordCodeGeneration(*new_object, &masm);
-    FinishCode(*new_object);
+    FinishCode(new_object);
 
-    // Update the dictionary and the root in Heap.
-    Handle<UnseededNumberDictionary> dict =
-        factory->DictionaryAtNumberPut(
-            Handle<UnseededNumberDictionary>(heap->code_stubs()),
-            GetKey(),
-            new_object);
-    heap->public_set_code_stubs(*dict);
-
+    if (UseSpecialCache()) {
+      AddToSpecialCache(new_object);
+    } else {
+      // Update the dictionary and the root in Heap.
+      Handle<UnseededNumberDictionary> dict =
+          factory->DictionaryAtNumberPut(
+              Handle<UnseededNumberDictionary>(heap->code_stubs()),
+              GetKey(),
+              new_object);
+      heap->public_set_code_stubs(*dict);
+    }
     code = *new_object;
   }
 
+  Activate(code);
   ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
   return Handle<Code>(code, isolate);
 }
 
 
-MaybeObject* CodeStub::TryGetCode() {
-  Code* code;
-  if (!FindCodeInCache(&code)) {
-    // Generate the new code.
-    MacroAssembler masm(Isolate::Current(), NULL, 256);
-    GenerateCode(&masm);
-    Heap* heap = masm.isolate()->heap();
-
-    // Create the code object.
-    CodeDesc desc;
-    masm.GetCode(&desc);
-
-    // Try to copy the generated code into a heap object.
-    Code::Flags flags = Code::ComputeFlags(
-        static_cast<Code::Kind>(GetCodeKind()),
-        GetICState());
-    Object* new_object;
-    { MaybeObject* maybe_new_object =
-          heap->CreateCode(desc, flags, masm.CodeObject());
-      if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
-    }
-    code = Code::cast(new_object);
-    RecordCodeGeneration(code, &masm);
-    FinishCode(code);
-
-    // Try to update the code cache but do not fail if unable.
-    MaybeObject* maybe_new_object =
-        heap->code_stubs()->AtNumberPut(GetKey(), code);
-    if (maybe_new_object->ToObject(&new_object)) {
-      heap->public_set_code_stubs(UnseededNumberDictionary::cast(new_object));
-    }
-  }
-
-  return code;
-}
-
-
 const char* CodeStub::MajorName(CodeStub::Major major_key,
                                 bool allow_unknown_keys) {
   switch (major_key) {
@@ -188,6 +163,37 @@
 }
 
 
+void CodeStub::PrintName(StringStream* stream) {
+  stream->Add("%s", MajorName(MajorKey(), false));
+}
+
+
+void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
+  ASSERT(*known_map_ != NULL);
+  Isolate* isolate = new_object->GetIsolate();
+  Factory* factory = isolate->factory();
+  return Map::UpdateCodeCache(known_map_,
+                              factory->compare_ic_symbol(),
+                              new_object);
+}
+
+
+bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
+  Isolate* isolate = known_map_->GetIsolate();
+  Factory* factory = isolate->factory();
+  Code::Flags flags = Code::ComputeFlags(
+      static_cast<Code::Kind>(GetCodeKind()),
+      UNINITIALIZED);
+  Handle<Object> probe(
+      known_map_->FindInCodeCache(*factory->compare_ic_symbol(), flags));
+  if (probe->IsCode()) {
+    *code_out = Code::cast(*probe);
+    return true;
+  }
+  return false;
+}
+
+
 int ICCompareStub::MinorKey() {
   return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
 }
@@ -213,6 +219,10 @@
     case CompareIC::OBJECTS:
       GenerateObjects(masm);
       break;
+    case CompareIC::KNOWN_OBJECTS:
+      ASSERT(*known_map_ != NULL);
+      GenerateKnownObjects(masm);
+      break;
     default:
       UNREACHABLE();
   }
@@ -242,9 +252,18 @@
 }
 
 
+void JSEntryStub::FinishCode(Handle<Code> code) {
+  Handle<FixedArray> handler_table =
+      code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
+  handler_table->set(0, Smi::FromInt(handler_offset_));
+  code->set_handler_table(*handler_table);
+}
+
+
 void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
   switch (elements_kind_) {
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
       KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
       break;
     case FAST_DOUBLE_ELEMENTS:
@@ -274,11 +293,17 @@
 void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
   switch (elements_kind_) {
     case FAST_ELEMENTS:
-      KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
+    case FAST_SMI_ONLY_ELEMENTS: {
+      KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
+                                                       is_js_array_,
+                                                       elements_kind_,
+                                                       grow_mode_);
+    }
       break;
     case FAST_DOUBLE_ELEMENTS:
       KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
-                                                             is_js_array_);
+                                                             is_js_array_,
+                                                             grow_mode_);
       break;
     case EXTERNAL_BYTE_ELEMENTS:
     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@@ -302,24 +327,26 @@
 
 
 void ArgumentsAccessStub::PrintName(StringStream* stream) {
-  const char* type_name = NULL;  // Make g++ happy.
+  stream->Add("ArgumentsAccessStub_");
   switch (type_) {
-    case READ_ELEMENT: type_name = "ReadElement"; break;
-    case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
-    case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
-    case NEW_STRICT: type_name = "NewStrict"; break;
+    case READ_ELEMENT: stream->Add("ReadElement"); break;
+    case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
+    case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
+    case NEW_STRICT: stream->Add("NewStrict"); break;
   }
-  stream->Add("ArgumentsAccessStub_%s", type_name);
 }
 
 
 void CallFunctionStub::PrintName(StringStream* stream) {
-  const char* flags_name = NULL;  // Make g++ happy.
-  switch (flags_) {
-    case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
-    case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
-  }
-  stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name);
+  stream->Add("CallFunctionStub_Args%d", argc_);
+  if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
+  if (RecordCallTarget()) stream->Add("_Recording");
+}
+
+
+void CallConstructStub::PrintName(StringStream* stream) {
+  stream->Add("CallConstructStub");
+  if (RecordCallTarget()) stream->Add("_Recording");
 }
 
 
@@ -402,4 +429,32 @@
 }
 
 
+void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
+  Label fail;
+  if (!FLAG_trace_elements_transitions) {
+    if (to_ == FAST_ELEMENTS) {
+      if (from_ == FAST_SMI_ONLY_ELEMENTS) {
+        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+      } else if (from_ == FAST_DOUBLE_ELEMENTS) {
+        ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+      } else {
+        UNREACHABLE();
+      }
+      KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
+                                                       is_jsarray_,
+                                                       FAST_ELEMENTS,
+                                                       grow_mode_);
+    } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
+      ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+      KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
+                                                             is_jsarray_,
+                                                             grow_mode_);
+    } else {
+      UNREACHABLE();
+    }
+  }
+  masm->bind(&fail);
+  KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
+}
+
 } }  // namespace v8::internal
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 64c89b9..b67e961 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include "allocation.h"
 #include "globals.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -37,6 +38,7 @@
 // List of code stubs used on all platforms.
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)  \
   V(CallFunction)                        \
+  V(CallConstruct)                       \
   V(UnaryOp)                             \
   V(BinaryOp)                            \
   V(StringAdd)                           \
@@ -45,27 +47,23 @@
   V(Compare)                             \
   V(CompareIC)                           \
   V(MathPow)                             \
+  V(RecordWrite)                         \
+  V(StoreBufferOverflow)                 \
+  V(RegExpExec)                          \
   V(TranscendentalCache)                 \
   V(Instanceof)                          \
-  /* All stubs above this line only exist in a few versions, which are  */  \
-  /* generated ahead of time.  Therefore compiling a call to one of     */  \
-  /* them can't cause a new stub to be compiled, so compiling a call to */  \
-  /* them is GC safe.  The ones below this line exist in many variants  */  \
-  /* so code compiling a call to one can cause a GC.  This means they   */  \
-  /* can't be called from other stubs, since stub generation code is    */  \
-  /* not GC safe.                                                       */  \
   V(ConvertToDouble)                     \
   V(WriteInt32ToHeapNumber)              \
   V(StackCheck)                          \
+  V(Interrupt)                           \
   V(FastNewClosure)                      \
   V(FastNewContext)                      \
+  V(FastNewBlockContext)                 \
   V(FastCloneShallowArray)               \
-  V(RevertToNumber)                      \
+  V(FastCloneShallowObject)              \
   V(ToBoolean)                           \
   V(ToNumber)                            \
-  V(CounterOp)                           \
   V(ArgumentsAccess)                     \
-  V(RegExpExec)                          \
   V(RegExpConstructResult)               \
   V(NumberToString)                      \
   V(CEntry)                              \
@@ -73,7 +71,9 @@
   V(KeyedLoadElement)                    \
   V(KeyedStoreElement)                   \
   V(DebuggerStatement)                   \
-  V(StringDictionaryNegativeLookup)
+  V(StringDictionaryLookup)              \
+  V(ElementsTransitionAndStore)          \
+  V(StoreArrayLiteralElement)
 
 // List of code stubs only used on ARM platforms.
 #ifdef V8_TARGET_ARCH_ARM
@@ -121,11 +121,6 @@
   // Retrieve the code for the stub. Generate the code if needed.
   Handle<Code> GetCode();
 
-  // Retrieve the code for the stub if already generated.  Do not
-  // generate the code if not already generated and instead return a
-  // retry after GC Failure object.
-  MUST_USE_RESULT MaybeObject* TryGetCode();
-
   static Major MajorKeyFromKey(uint32_t key) {
     return static_cast<Major>(MajorKeyBits::decode(key));
   }
@@ -142,14 +137,35 @@
 
   virtual ~CodeStub() {}
 
+  bool CompilingCallsToThisStubIsGCSafe() {
+    bool is_pregenerated = IsPregenerated();
+    Code* code = NULL;
+    CHECK(!is_pregenerated || FindCodeInCache(&code));
+    return is_pregenerated;
+  }
+
+  // See comment above, where Instanceof is defined.
+  virtual bool IsPregenerated() { return false; }
+
+  static void GenerateStubsAheadOfTime();
+  static void GenerateFPStubs();
+
+  // Some stubs put untagged junk on the stack that cannot be scanned by the
+  // GC.  This means that we must be statically sure that no GC can occur while
+  // they are running.  If that is the case they should override this to return
+  // true, which will cause an assertion if we try to call something that can
+  // GC or if we try to put a stack frame on top of the junk, which would not
+  // result in a traversable stack.
+  virtual bool SometimesSetsUpAFrame() { return true; }
+
+  // Lookup the code in the (possibly custom) cache.
+  bool FindCodeInCache(Code** code_out);
+
  protected:
   static const int kMajorBits = 6;
   static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
 
  private:
-  // Lookup the code in the (possibly custom) cache.
-  bool FindCodeInCache(Code** code_out);
-
   // Nonvirtual wrapper around the stub-specific Generate function.  Call
   // this function to set up the macro assembler and generate the code.
   void GenerateCode(MacroAssembler* masm);
@@ -162,7 +178,11 @@
   void RecordCodeGeneration(Code* code, MacroAssembler* masm);
 
   // Finish the code object after it has been generated.
-  virtual void FinishCode(Code* code) { }
+  virtual void FinishCode(Handle<Code> code) { }
+
+  // Activate newly generated stub. Is called after
+  // registering stub in the stub cache.
+  virtual void Activate(Code* code) { }
 
   // Returns information for computing the number key.
   virtual Major MajorKey() = 0;
@@ -176,11 +196,20 @@
     return UNINITIALIZED;
   }
 
+  // Add the code to a specialized cache, specific to an individual
+  // stub type. Please note, this method must add the code object to a
+  // roots object, otherwise we will remove the code during GC.
+  virtual void AddToSpecialCache(Handle<Code> new_object) { }
+
+  // Find code in a specialized cache, work is delegated to the specific stub.
+  virtual bool FindCodeInSpecialCache(Code** code_out) { return false; }
+
+  // If a stub uses a special cache override this.
+  virtual bool UseSpecialCache() { return false; }
+
   // Returns a name for logging/debugging purposes.
   SmartArrayPointer<const char> GetName();
-  virtual void PrintName(StringStream* stream) {
-    stream->Add("%s", MajorName(MajorKey(), false));
-  }
+  virtual void PrintName(StringStream* stream);
 
   // Returns whether the code generated for this stub needs to be allocated as
   // a fixed (non-moveable) code object.
@@ -193,9 +222,6 @@
            MajorKeyBits::encode(MajorKey());
   }
 
-  // See comment above, where Instanceof is defined.
-  bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
-
   class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
   class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
 
@@ -272,6 +298,18 @@
 };
 
 
+class InterruptStub : public CodeStub {
+ public:
+  InterruptStub() { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return Interrupt; }
+  int MinorKey() { return 0; }
+};
+
+
 class ToNumberStub: public CodeStub {
  public:
   ToNumberStub() { }
@@ -286,16 +324,17 @@
 
 class FastNewClosureStub : public CodeStub {
  public:
-  explicit FastNewClosureStub(StrictModeFlag strict_mode)
-    : strict_mode_(strict_mode) { }
+  explicit FastNewClosureStub(LanguageMode language_mode)
+    : language_mode_(language_mode) { }
 
   void Generate(MacroAssembler* masm);
 
  private:
   Major MajorKey() { return FastNewClosure; }
-  int MinorKey() { return strict_mode_; }
+  int MinorKey() { return language_mode_ == CLASSIC_MODE
+        ? kNonStrictMode : kStrictMode; }
 
-  StrictModeFlag strict_mode_;
+  LanguageMode language_mode_;
 };
 
 
@@ -304,7 +343,7 @@
   static const int kMaximumSlots = 64;
 
   explicit FastNewContextStub(int slots) : slots_(slots) {
-    ASSERT(slots_ > 0 && slots <= kMaximumSlots);
+    ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
   }
 
   void Generate(MacroAssembler* masm);
@@ -317,6 +356,24 @@
 };
 
 
+class FastNewBlockContextStub : public CodeStub {
+ public:
+  static const int kMaximumSlots = 64;
+
+  explicit FastNewBlockContextStub(int slots) : slots_(slots) {
+    ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  int slots_;
+
+  Major MajorKey() { return FastNewBlockContext; }
+  int MinorKey() { return slots_; }
+};
+
+
 class FastCloneShallowArrayStub : public CodeStub {
  public:
   // Maximum length of copied elements array.
@@ -324,14 +381,16 @@
 
   enum Mode {
     CLONE_ELEMENTS,
-    COPY_ON_WRITE_ELEMENTS
+    CLONE_DOUBLE_ELEMENTS,
+    COPY_ON_WRITE_ELEMENTS,
+    CLONE_ANY_ELEMENTS
   };
 
   FastCloneShallowArrayStub(Mode mode, int length)
       : mode_(mode),
         length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
-    ASSERT(length_ >= 0);
-    ASSERT(length_ <= kMaximumClonedLength);
+    ASSERT_GE(length_, 0);
+    ASSERT_LE(length_, kMaximumClonedLength);
   }
 
   void Generate(MacroAssembler* masm);
@@ -342,12 +401,32 @@
 
   Major MajorKey() { return FastCloneShallowArray; }
   int MinorKey() {
-    ASSERT(mode_ == 0 || mode_ == 1);
-    return (length_ << 1) | mode_;
+    ASSERT(mode_ == 0 || mode_ == 1 || mode_ == 2 || mode_ == 3);
+    return length_ * 4 +  mode_;
   }
 };
 
 
+class FastCloneShallowObjectStub : public CodeStub {
+ public:
+  // Maximum number of properties in copied object.
+  static const int kMaximumClonedProperties = 6;
+
+  explicit FastCloneShallowObjectStub(int length) : length_(length) {
+    ASSERT_GE(length_, 0);
+    ASSERT_LE(length_, kMaximumClonedProperties);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  int length_;
+
+  Major MajorKey() { return FastCloneShallowObject; }
+  int MinorKey() { return length_; }
+};
+
+
 class InstanceofStub: public CodeStub {
  public:
   enum Flags {
@@ -388,12 +467,17 @@
 
 class MathPowStub: public CodeStub {
  public:
-  MathPowStub() {}
+  enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
+
+  explicit MathPowStub(ExponentType exponent_type)
+      : exponent_type_(exponent_type) { }
   virtual void Generate(MacroAssembler* masm);
 
  private:
   virtual CodeStub::Major MajorKey() { return MathPow; }
-  virtual int MinorKey() { return 0; }
+  virtual int MinorKey() { return exponent_type_; }
+
+  ExponentType exponent_type_;
 };
 
 
@@ -406,11 +490,15 @@
 
   virtual void Generate(MacroAssembler* masm);
 
+  void set_known_map(Handle<Map> map) { known_map_ = map; }
+
  private:
   class OpField: public BitField<int, 0, 3> { };
   class StateField: public BitField<int, 3, 5> { };
 
-  virtual void FinishCode(Code* code) { code->set_compare_state(state_); }
+  virtual void FinishCode(Handle<Code> code) {
+    code->set_compare_state(state_);
+  }
 
   virtual CodeStub::Major MajorKey() { return CompareIC; }
   virtual int MinorKey();
@@ -423,12 +511,18 @@
   void GenerateStrings(MacroAssembler* masm);
   void GenerateObjects(MacroAssembler* masm);
   void GenerateMiss(MacroAssembler* masm);
+  void GenerateKnownObjects(MacroAssembler* masm);
 
   bool strict() const { return op_ == Token::EQ_STRICT; }
   Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
 
+  virtual void AddToSpecialCache(Handle<Code> new_object);
+  virtual bool FindCodeInSpecialCache(Code** code_out);
+  virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
+
   Token::Value op_;
   CompareIC::State state_;
+  Handle<Map> known_map_;
 };
 
 
@@ -513,7 +607,7 @@
   int MinorKey();
 
   virtual int GetCodeKind() { return Code::COMPARE_IC; }
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_compare_state(CompareIC::GENERIC);
   }
 
@@ -531,11 +625,18 @@
 
 class CEntryStub : public CodeStub {
  public:
-  explicit CEntryStub(int result_size)
-      : result_size_(result_size), save_doubles_(false) { }
+  explicit CEntryStub(int result_size,
+                      SaveFPRegsMode save_doubles = kDontSaveFPRegs)
+      : result_size_(result_size), save_doubles_(save_doubles) { }
 
   void Generate(MacroAssembler* masm);
-  void SaveDoubles() { save_doubles_ = true; }
+
+  // The version of this stub that doesn't save doubles is generated ahead of
+  // time, so it's OK to call it from other stubs that can't cope with GC during
+  // their code generation.  On machines that always have gp registers (x64) we
+  // can generate both variants ahead of time.
+  virtual bool IsPregenerated();
+  static void GenerateAheadOfTime();
 
  private:
   void GenerateCore(MacroAssembler* masm,
@@ -544,13 +645,10 @@
                     Label* throw_out_of_memory_exception,
                     bool do_gc,
                     bool always_allocate_scope);
-  void GenerateThrowTOS(MacroAssembler* masm);
-  void GenerateThrowUncatchable(MacroAssembler* masm,
-                                UncatchableExceptionType type);
 
   // Number of pointers/values returned.
   const int result_size_;
-  bool save_doubles_;
+  SaveFPRegsMode save_doubles_;
 
   Major MajorKey() { return CEntry; }
   int MinorKey();
@@ -571,6 +669,10 @@
  private:
   Major MajorKey() { return JSEntry; }
   int MinorKey() { return 0; }
+
+  virtual void FinishCode(Handle<Code> code);
+
+  int handler_offset_;
 };
 
 
@@ -647,6 +749,10 @@
 
   void Generate(MacroAssembler* masm);
 
+  virtual void FinishCode(Handle<Code> code) {
+    code->set_has_function_cache(RecordCallTarget());
+  }
+
   static int ExtractArgcFromMinorKey(int minor_key) {
     return ArgcBits::decode(minor_key);
   }
@@ -658,8 +764,8 @@
   virtual void PrintName(StringStream* stream);
 
   // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
-  class FlagBits: public BitField<CallFunctionFlags, 0, 1> {};
-  class ArgcBits: public BitField<unsigned, 1, 32 - 1> {};
+  class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
+  class ArgcBits: public BitField<unsigned, 2, 32 - 2> {};
 
   Major MajorKey() { return CallFunction; }
   int MinorKey() {
@@ -670,6 +776,34 @@
   bool ReceiverMightBeImplicit() {
     return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
   }
+
+  bool RecordCallTarget() {
+    return (flags_ & RECORD_CALL_TARGET) != 0;
+  }
+};
+
+
+class CallConstructStub: public CodeStub {
+ public:
+  explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
+
+  void Generate(MacroAssembler* masm);
+
+  virtual void FinishCode(Handle<Code> code) {
+    code->set_has_function_cache(RecordCallTarget());
+  }
+
+ private:
+  CallFunctionFlags flags_;
+
+  virtual void PrintName(StringStream* stream);
+
+  Major MajorKey() { return CallConstruct; }
+  int MinorKey() { return flags_; }
+
+  bool RecordCallTarget() {
+    return (flags_ & RECORD_CALL_TARGET) != 0;
+  }
 };
 
 
@@ -698,7 +832,6 @@
  public:
   StringCharCodeAtGenerator(Register object,
                             Register index,
-                            Register scratch,
                             Register result,
                             Label* receiver_not_string,
                             Label* index_not_number,
@@ -706,15 +839,11 @@
                             StringIndexFlags index_flags)
       : object_(object),
         index_(index),
-        scratch_(scratch),
         result_(result),
         receiver_not_string_(receiver_not_string),
         index_not_number_(index_not_number),
         index_out_of_range_(index_out_of_range),
         index_flags_(index_flags) {
-    ASSERT(!scratch_.is(object_));
-    ASSERT(!scratch_.is(index_));
-    ASSERT(!scratch_.is(result_));
     ASSERT(!result_.is(object_));
     ASSERT(!result_.is(index_));
   }
@@ -732,7 +861,6 @@
  private:
   Register object_;
   Register index_;
-  Register scratch_;
   Register result_;
 
   Label* receiver_not_string_;
@@ -795,8 +923,7 @@
  public:
   StringCharAtGenerator(Register object,
                         Register index,
-                        Register scratch1,
-                        Register scratch2,
+                        Register scratch,
                         Register result,
                         Label* receiver_not_string,
                         Label* index_not_number,
@@ -804,13 +931,12 @@
                         StringIndexFlags index_flags)
       : char_code_at_generator_(object,
                                 index,
-                                scratch1,
-                                scratch2,
+                                scratch,
                                 receiver_not_string,
                                 index_not_number,
                                 index_out_of_range,
                                 index_flags),
-        char_from_code_generator_(scratch2, result) {}
+        char_from_code_generator_(scratch, result) {}
 
   // Generates the fast case code. On the fallthrough path |result|
   // register contains the result.
@@ -869,20 +995,29 @@
 class KeyedStoreElementStub : public CodeStub {
  public:
   KeyedStoreElementStub(bool is_js_array,
-                        ElementsKind elements_kind)
-    : is_js_array_(is_js_array),
-    elements_kind_(elements_kind) { }
+                        ElementsKind elements_kind,
+                        KeyedAccessGrowMode grow_mode)
+      : is_js_array_(is_js_array),
+        elements_kind_(elements_kind),
+        grow_mode_(grow_mode) { }
 
   Major MajorKey() { return KeyedStoreElement; }
   int MinorKey() {
-    return (is_js_array_ ? 0 : kElementsKindCount) + elements_kind_;
+    return ElementsKindBits::encode(elements_kind_) |
+        IsJSArrayBits::encode(is_js_array_) |
+        GrowModeBits::encode(grow_mode_);
   }
 
   void Generate(MacroAssembler* masm);
 
  private:
+  class ElementsKindBits: public BitField<ElementsKind,    0, 8> {};
+  class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {};
+  class IsJSArrayBits: public BitField<bool,               9, 1> {};
+
   bool is_js_array_;
   ElementsKind elements_kind_;
+  KeyedAccessGrowMode grow_mode_;
 
   DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
 };
@@ -934,11 +1069,13 @@
   virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
   virtual void PrintName(StringStream* stream);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   Major MajorKey() { return ToBoolean; }
   int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_to_boolean_state(types_.ToByte());
   }
 
@@ -952,6 +1089,61 @@
   Types types_;
 };
 
+
+class ElementsTransitionAndStoreStub : public CodeStub {
+ public:
+  ElementsTransitionAndStoreStub(ElementsKind from,
+                                 ElementsKind to,
+                                 bool is_jsarray,
+                                 StrictModeFlag strict_mode,
+                                 KeyedAccessGrowMode grow_mode)
+      : from_(from),
+        to_(to),
+        is_jsarray_(is_jsarray),
+        strict_mode_(strict_mode),
+        grow_mode_(grow_mode) {}
+
+ private:
+  class FromBits:       public BitField<ElementsKind,      0, 8> {};
+  class ToBits:         public BitField<ElementsKind,      8, 8> {};
+  class IsJSArrayBits:  public BitField<bool,              16, 1> {};
+  class StrictModeBits: public BitField<StrictModeFlag,    17, 1> {};
+  class GrowModeBits: public BitField<KeyedAccessGrowMode, 18, 1> {};
+
+  Major MajorKey() { return ElementsTransitionAndStore; }
+  int MinorKey() {
+    return FromBits::encode(from_) |
+        ToBits::encode(to_) |
+        IsJSArrayBits::encode(is_jsarray_) |
+        StrictModeBits::encode(strict_mode_) |
+        GrowModeBits::encode(grow_mode_);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+  ElementsKind from_;
+  ElementsKind to_;
+  bool is_jsarray_;
+  StrictModeFlag strict_mode_;
+  KeyedAccessGrowMode grow_mode_;
+
+  DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
+};
+
+
+class StoreArrayLiteralElementStub : public CodeStub {
+ public:
+  explicit StoreArrayLiteralElementStub() {}
+
+ private:
+  Major MajorKey() { return StoreArrayLiteralElement; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_CODE_STUBS_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index cdc9ba1..0163580 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -62,28 +62,15 @@
 #ifdef DEBUG
   bool print_source = false;
   bool print_ast = false;
-  bool print_json_ast = false;
   const char* ftype;
 
   if (Isolate::Current()->bootstrapper()->IsActive()) {
     print_source = FLAG_print_builtin_source;
     print_ast = FLAG_print_builtin_ast;
-    print_json_ast = FLAG_print_builtin_json_ast;
     ftype = "builtin";
   } else {
     print_source = FLAG_print_source;
     print_ast = FLAG_print_ast;
-    print_json_ast = FLAG_print_json_ast;
-    Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
-    if (print_source && !filter.is_empty()) {
-      print_source = info->function()->name()->IsEqualTo(filter);
-    }
-    if (print_ast && !filter.is_empty()) {
-      print_ast = info->function()->name()->IsEqualTo(filter);
-    }
-    if (print_json_ast && !filter.is_empty()) {
-      print_json_ast = info->function()->name()->IsEqualTo(filter);
-    }
     ftype = "user-defined";
   }
 
@@ -102,11 +89,6 @@
     PrintF("--- AST ---\n%s\n",
            AstPrinter().PrintProgram(info->function()));
   }
-
-  if (print_json_ast) {
-    JsonAstBuilder builder;
-    PrintF("%s", builder.BuildProgram(info->function()));
-  }
 #endif  // DEBUG
 }
 
@@ -135,11 +117,9 @@
   bool print_code = Isolate::Current()->bootstrapper()->IsActive()
       ? FLAG_print_builtin_code
       : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
-  Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
-  FunctionLiteral* function = info->function();
-  bool match = filter.is_empty() || function->debug_name()->IsEqualTo(filter);
-  if (print_code && match) {
+  if (print_code) {
     // Print the source code if available.
+    FunctionLiteral* function = info->function();
     Handle<Script> script = info->script();
     if (!script->IsUndefined() && !script->source()->IsUndefined()) {
       PrintF("--- Raw source ---\n");
@@ -218,8 +198,8 @@
 
 
 int CEntryStub::MinorKey() {
+  int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
   ASSERT(result_size_ == 1 || result_size_ == 2);
-  int result = save_doubles_ ? 1 : 0;
 #ifdef _WIN64
   return result | ((result_size_ == 1) ? 0 : 2);
 #else
diff --git a/src/codegen.h b/src/codegen.h
index e551abf..50d70f2 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -81,4 +81,28 @@
 #error Unsupported target architecture.
 #endif
 
+namespace v8 {
+namespace internal {
+
+// Results of the library implementation of transcendental functions may differ
+// from the one we use in our generated code.  Therefore we use the same
+// generated code both in runtime and compiled code.
+typedef double (*UnaryMathFunction)(double x);
+
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
+UnaryMathFunction CreateSqrtFunction();
+
+
+class ElementsTransitionGenerator : public AllStatic {
+ public:
+  static void GenerateSmiOnlyToObject(MacroAssembler* masm);
+  static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
+  static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
+};
+
+} }  // namespace v8::internal
+
 #endif  // V8_CODEGEN_H_
diff --git a/src/collection.js b/src/collection.js
new file mode 100644
index 0000000..75fe3d5
--- /dev/null
+++ b/src/collection.js
@@ -0,0 +1,250 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+var $Set = global.Set;
+var $Map = global.Map;
+var $WeakMap = global.WeakMap;
+
+//-------------------------------------------------------------------
+
+// Global sentinel to be used instead of undefined keys, which are not
+// supported internally but required for Harmony sets and maps.
+var undefined_sentinel = {};
+
+
+function SetConstructor() {
+  if (%_IsConstructCall()) {
+    %SetInitialize(this);
+  } else {
+    return new $Set();
+  }
+}
+
+
+function SetAdd(key) {
+  if (!IS_SET(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['Set.prototype.add', this]);
+  }
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return %SetAdd(this, key);
+}
+
+
+function SetHas(key) {
+  if (!IS_SET(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['Set.prototype.has', this]);
+  }
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return %SetHas(this, key);
+}
+
+
+function SetDelete(key) {
+  if (!IS_SET(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['Set.prototype.delete', this]);
+  }
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return %SetDelete(this, key);
+}
+
+
+function MapConstructor() {
+  if (%_IsConstructCall()) {
+    %MapInitialize(this);
+  } else {
+    return new $Map();
+  }
+}
+
+
+function MapGet(key) {
+  if (!IS_MAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['Map.prototype.get', this]);
+  }
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return %MapGet(this, key);
+}
+
+
+function MapSet(key, value) {
+  if (!IS_MAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['Map.prototype.set', this]);
+  }
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return %MapSet(this, key, value);
+}
+
+
+function MapHas(key) {
+  if (!IS_MAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['Map.prototype.has', this]);
+  }
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return !IS_UNDEFINED(%MapGet(this, key));
+}
+
+
+function MapDelete(key) {
+  if (!IS_MAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['Map.prototype.delete', this]);
+  }
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  if (!IS_UNDEFINED(%MapGet(this, key))) {
+    %MapSet(this, key, void 0);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
+function WeakMapConstructor() {
+  if (%_IsConstructCall()) {
+    %WeakMapInitialize(this);
+  } else {
+    return new $WeakMap();
+  }
+}
+
+
+function WeakMapGet(key) {
+  if (!IS_WEAKMAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakMap.prototype.get', this]);
+  }
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakMapGet(this, key);
+}
+
+
+function WeakMapSet(key, value) {
+  if (!IS_WEAKMAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakMap.prototype.set', this]);
+  }
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakMapSet(this, key, value);
+}
+
+
+function WeakMapHas(key) {
+  if (!IS_WEAKMAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakMap.prototype.has', this]);
+  }
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return !IS_UNDEFINED(%WeakMapGet(this, key));
+}
+
+
+function WeakMapDelete(key) {
+  if (!IS_WEAKMAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakMap.prototype.delete', this]);
+  }
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  if (!IS_UNDEFINED(%WeakMapGet(this, key))) {
+    %WeakMapSet(this, key, void 0);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// -------------------------------------------------------------------
+
+(function () {
+  %CheckIsBootstrapping();
+
+  // Set up the Set and Map constructor function.
+  %SetCode($Set, SetConstructor);
+  %SetCode($Map, MapConstructor);
+
+  // Set up the constructor property on the Set and Map prototype object.
+  %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
+  %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
+
+  // Set up the non-enumerable functions on the Set prototype object.
+  InstallFunctions($Set.prototype, DONT_ENUM, $Array(
+    "add", SetAdd,
+    "has", SetHas,
+    "delete", SetDelete
+  ));
+
+  // Set up the non-enumerable functions on the Map prototype object.
+  InstallFunctions($Map.prototype, DONT_ENUM, $Array(
+    "get", MapGet,
+    "set", MapSet,
+    "has", MapHas,
+    "delete", MapDelete
+  ));
+
+  // Set up the WeakMap constructor function.
+  %SetCode($WeakMap, WeakMapConstructor);
+
+  // Set up the constructor property on the WeakMap prototype object.
+  %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
+
+  // Set up the non-enumerable functions on the WeakMap prototype object.
+  InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
+    "get", WeakMapGet,
+    "set", WeakMapSet,
+    "has", WeakMapHas,
+    "delete", WeakMapDelete
+  ));
+})();
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 28e833a..82cc223 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,6 +27,7 @@
 
 #include "v8.h"
 
+#include "assembler.h"
 #include "compilation-cache.h"
 #include "serialize.h"
 
@@ -250,7 +251,8 @@
 Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
     Handle<String> source,
     Handle<Context> context,
-    StrictModeFlag strict_mode) {
+    LanguageMode language_mode,
+    int scope_position) {
   // Make sure not to leak the table into the surrounding handle
   // scope. Otherwise, we risk keeping old tables around even after
   // having cleared the cache.
@@ -259,7 +261,8 @@
   { HandleScope scope(isolate());
     for (generation = 0; generation < generations(); generation++) {
       Handle<CompilationCacheTable> table = GetTable(generation);
-      result = table->LookupEval(*source, *context, strict_mode);
+      result = table->LookupEval(
+          *source, *context, language_mode, scope_position);
       if (result->IsSharedFunctionInfo()) {
         break;
       }
@@ -269,7 +272,7 @@
     Handle<SharedFunctionInfo>
         function_info(SharedFunctionInfo::cast(result), isolate());
     if (generation != 0) {
-      Put(source, context, function_info);
+      Put(source, context, function_info, scope_position);
     }
     isolate()->counters()->compilation_cache_hits()->Increment();
     return function_info;
@@ -283,27 +286,31 @@
 MaybeObject* CompilationCacheEval::TryTablePut(
     Handle<String> source,
     Handle<Context> context,
-    Handle<SharedFunctionInfo> function_info) {
+    Handle<SharedFunctionInfo> function_info,
+    int scope_position) {
   Handle<CompilationCacheTable> table = GetFirstTable();
-  return table->PutEval(*source, *context, *function_info);
+  return table->PutEval(*source, *context, *function_info, scope_position);
 }
 
 
 Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
     Handle<String> source,
     Handle<Context> context,
-    Handle<SharedFunctionInfo> function_info) {
+    Handle<SharedFunctionInfo> function_info,
+    int scope_position) {
   CALL_HEAP_FUNCTION(isolate(),
-                     TryTablePut(source, context, function_info),
+                     TryTablePut(
+                         source, context, function_info, scope_position),
                      CompilationCacheTable);
 }
 
 
 void CompilationCacheEval::Put(Handle<String> source,
                                Handle<Context> context,
-                               Handle<SharedFunctionInfo> function_info) {
+                               Handle<SharedFunctionInfo> function_info,
+                               int scope_position) {
   HandleScope scope(isolate());
-  SetFirstTable(TablePut(source, context, function_info));
+  SetFirstTable(TablePut(source, context, function_info, scope_position));
 }
 
 
@@ -389,16 +396,20 @@
     Handle<String> source,
     Handle<Context> context,
     bool is_global,
-    StrictModeFlag strict_mode) {
+    LanguageMode language_mode,
+    int scope_position) {
   if (!IsEnabled()) {
     return Handle<SharedFunctionInfo>::null();
   }
 
   Handle<SharedFunctionInfo> result;
   if (is_global) {
-    result = eval_global_.Lookup(source, context, strict_mode);
+    result = eval_global_.Lookup(
+        source, context, language_mode, scope_position);
   } else {
-    result = eval_contextual_.Lookup(source, context, strict_mode);
+    ASSERT(scope_position != RelocInfo::kNoPosition);
+    result = eval_contextual_.Lookup(
+        source, context, language_mode, scope_position);
   }
   return result;
 }
@@ -427,16 +438,18 @@
 void CompilationCache::PutEval(Handle<String> source,
                                Handle<Context> context,
                                bool is_global,
-                               Handle<SharedFunctionInfo> function_info) {
+                               Handle<SharedFunctionInfo> function_info,
+                               int scope_position) {
   if (!IsEnabled()) {
     return;
   }
 
   HandleScope scope(isolate());
   if (is_global) {
-    eval_global_.Put(source, context, function_info);
+    eval_global_.Put(source, context, function_info, scope_position);
   } else {
-    eval_contextual_.Put(source, context, function_info);
+    ASSERT(scope_position != RelocInfo::kNoPosition);
+    eval_contextual_.Put(source, context, function_info, scope_position);
   }
 }
 
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 4339d22..2f2fbad 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,8 +31,6 @@
 namespace v8 {
 namespace internal {
 
-class HashMap;
-
 // The compilation cache consists of several generational sub-caches which uses
 // this class as a base class. A sub-cache contains a compilation cache tables
 // for each generation of the sub-cache. Since the same source code string has
@@ -123,7 +121,19 @@
 };
 
 
-// Sub-cache for eval scripts.
+// Sub-cache for eval scripts. Two caches for eval are used. One for eval calls
+// in global contexts and one for eval calls in other contexts. The cache
+// considers the following pieces of information when checking for matching
+// entries:
+// 1. The source string.
+// 2. The shared function info of the calling function.
+// 3. Whether the source should be compiled as strict code or as non-strict
+//    code.
+//    Note: Currently there are clients of CompileEval that always compile
+//    non-strict code even if the calling function is a strict mode function.
+//    More specifically these are the CompileString, DebugEvaluate and
+//    DebugEvaluateGlobal runtime functions.
+// 4. The start position of the calling scope.
 class CompilationCacheEval: public CompilationSubCache {
  public:
   CompilationCacheEval(Isolate* isolate, int generations)
@@ -131,23 +141,27 @@
 
   Handle<SharedFunctionInfo> Lookup(Handle<String> source,
                                     Handle<Context> context,
-                                    StrictModeFlag strict_mode);
+                                    LanguageMode language_mode,
+                                    int scope_position);
 
   void Put(Handle<String> source,
            Handle<Context> context,
-           Handle<SharedFunctionInfo> function_info);
+           Handle<SharedFunctionInfo> function_info,
+           int scope_position);
 
  private:
   MUST_USE_RESULT MaybeObject* TryTablePut(
       Handle<String> source,
       Handle<Context> context,
-      Handle<SharedFunctionInfo> function_info);
+      Handle<SharedFunctionInfo> function_info,
+      int scope_position);
 
   // Note: Returns a new hash table if operation results in expansion.
   Handle<CompilationCacheTable> TablePut(
       Handle<String> source,
       Handle<Context> context,
-      Handle<SharedFunctionInfo> function_info);
+      Handle<SharedFunctionInfo> function_info,
+      int scope_position);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
 };
@@ -198,7 +212,8 @@
   Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
                                         Handle<Context> context,
                                         bool is_global,
-                                        StrictModeFlag strict_mode);
+                                        LanguageMode language_mode,
+                                        int scope_position);
 
   // Returns the regexp data associated with the given regexp if it
   // is in cache, otherwise an empty handle.
@@ -215,7 +230,8 @@
   void PutEval(Handle<String> source,
                Handle<Context> context,
                bool is_global,
-               Handle<SharedFunctionInfo> function_info);
+               Handle<SharedFunctionInfo> function_info,
+               int scope_position);
 
   // Associate the (source, flags) pair to the given regexp data.
   // This may overwrite an existing mapping.
diff --git a/src/compiler-intrinsics.h b/src/compiler-intrinsics.h
new file mode 100644
index 0000000..3b9c59e
--- /dev/null
+++ b/src/compiler-intrinsics.h
@@ -0,0 +1,77 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_COMPILER_INTRINSICS_H_
+#define V8_COMPILER_INTRINSICS_H_
+
+namespace v8 {
+namespace internal {
+
+class CompilerIntrinsics {
+ public:
+  // Returns number of zero bits preceding least significant 1 bit.
+  // Undefined for zero value.
+  INLINE(static int CountTrailingZeros(uint32_t value));
+
+  // Returns number of zero bits following most significant 1 bit.
+  // Undefined for zero value.
+  INLINE(static int CountLeadingZeros(uint32_t value));
+};
+
+#ifdef __GNUC__
+int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
+  return __builtin_ctz(value);
+}
+
+int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
+  return __builtin_clz(value);
+}
+
+#elif defined(_MSC_VER)
+
+#pragma intrinsic(_BitScanForward)
+#pragma intrinsic(_BitScanReverse)
+
+int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
+  unsigned long result;  //NOLINT
+  _BitScanForward(&result, static_cast<long>(value));  //NOLINT
+  return static_cast<int>(result);
+}
+
+int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
+  unsigned long result;  //NOLINT
+  _BitScanReverse(&result, static_cast<long>(value));  //NOLINT
+  return 31 - static_cast<int>(result);
+}
+
+#else
+#error Unsupported compiler
+#endif
+
+} }  // namespace v8::internal
+
+#endif  // V8_COMPILER_INTRINSICS_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index 5e1c4a9..2272337 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,6 +36,7 @@
 #include "full-codegen.h"
 #include "gdb-jit.h"
 #include "hydrogen.h"
+#include "isolate-inl.h"
 #include "lithium.h"
 #include "liveedit.h"
 #include "parser.h"
@@ -52,28 +53,29 @@
 
 CompilationInfo::CompilationInfo(Handle<Script> script)
     : isolate_(script->GetIsolate()),
-      flags_(0),
+      flags_(LanguageModeField::encode(CLASSIC_MODE)),
       function_(NULL),
       scope_(NULL),
+      global_scope_(NULL),
       script_(script),
       extension_(NULL),
       pre_parse_data_(NULL),
-      supports_deoptimization_(false),
       osr_ast_id_(AstNode::kNoNumber) {
-  Initialize(NONOPT);
+  Initialize(BASE);
 }
 
 
 CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
     : isolate_(shared_info->GetIsolate()),
-      flags_(IsLazy::encode(true)),
+      flags_(LanguageModeField::encode(CLASSIC_MODE) |
+             IsLazy::encode(true)),
       function_(NULL),
       scope_(NULL),
+      global_scope_(NULL),
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       extension_(NULL),
       pre_parse_data_(NULL),
-      supports_deoptimization_(false),
       osr_ast_id_(AstNode::kNoNumber) {
   Initialize(BASE);
 }
@@ -81,15 +83,16 @@
 
 CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
     : isolate_(closure->GetIsolate()),
-      flags_(IsLazy::encode(true)),
+      flags_(LanguageModeField::encode(CLASSIC_MODE) |
+             IsLazy::encode(true)),
       function_(NULL),
       scope_(NULL),
+      global_scope_(NULL),
       closure_(closure),
       shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       extension_(NULL),
       pre_parse_data_(NULL),
-      supports_deoptimization_(false),
       osr_ast_id_(AstNode::kNoNumber) {
   Initialize(BASE);
 }
@@ -107,6 +110,19 @@
 }
 
 
+// Primitive functions are unlikely to be picked up by the stack-walking
+// profiler, so they trigger their own optimization when they're called
+// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
+bool CompilationInfo::ShouldSelfOptimize() {
+  return FLAG_self_optimization &&
+      FLAG_crankshaft &&
+      !function()->flags()->Contains(kDontSelfOptimize) &&
+      !function()->flags()->Contains(kDontOptimize) &&
+      function()->scope()->allows_lazy_recompilation() &&
+      (shared_info().is_null() || !shared_info()->optimization_disabled());
+}
+
+
 void CompilationInfo::AbortOptimization() {
   Handle<Code> code(shared_info()->code());
   SetCode(code);
@@ -167,7 +183,9 @@
 static bool MakeCrankshaftCode(CompilationInfo* info) {
   // Test if we can optimize this function when asked to. We can only
   // do this after the scopes are computed.
-  if (!info->AllowOptimize()) info->DisableOptimization();
+  if (!V8::UseCrankshaft()) {
+    info->DisableOptimization();
+  }
 
   // In case we are not optimizing simply return the code from
   // the full code generator.
@@ -187,7 +205,7 @@
   // Fall back to using the full code generator if it's not possible
   // to use the Hydrogen-based optimizing compiler. We already have
   // generated code for this from the shared function object.
-  if (AlwaysFullCompiler() || !FLAG_use_hydrogen) {
+  if (AlwaysFullCompiler()) {
     info->SetCode(code);
     return true;
   }
@@ -198,8 +216,7 @@
       FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
   if (info->shared_info()->opt_count() > kMaxOptCount) {
     info->AbortOptimization();
-    Handle<JSFunction> closure = info->closure();
-    info->shared_info()->DisableOptimization(*closure);
+    info->shared_info()->DisableOptimization();
     // True indicates the compilation pipeline is still going, not
     // necessarily that we optimized the code.
     return true;
@@ -219,20 +236,22 @@
       (info->osr_ast_id() != AstNode::kNoNumber &&
        scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
     info->AbortOptimization();
-    Handle<JSFunction> closure = info->closure();
-    info->shared_info()->DisableOptimization(*closure);
+    info->shared_info()->DisableOptimization();
     // True indicates the compilation pipeline is still going, not
     // necessarily that we optimized the code.
     return true;
   }
 
   // Take --hydrogen-filter into account.
-  Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
   Handle<String> name = info->function()->debug_name();
-  bool match = filter.is_empty() || name->IsEqualTo(filter);
-  if (!match) {
-    info->SetCode(code);
-    return true;
+  if (*FLAG_hydrogen_filter != '\0') {
+    Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+    if ((filter[0] == '-'
+         && name->IsEqualTo(filter.SubVector(1, filter.length())))
+        || (filter[0] != '-' && !name->IsEqualTo(filter))) {
+      info->SetCode(code);
+      return true;
+    }
   }
 
   // Recompile the unoptimized version of the code if the current version
@@ -275,7 +294,7 @@
   }
 
   Handle<Context> global_context(info->closure()->context()->global_context());
-  TypeFeedbackOracle oracle(code, global_context);
+  TypeFeedbackOracle oracle(code, global_context, info->isolate());
   HGraphBuilder builder(info, &oracle);
   HPhase phase(HPhase::kTotal);
   HGraph* graph = builder.CreateGraph();
@@ -284,7 +303,7 @@
     return false;
   }
 
-  if (graph != NULL && FLAG_build_lithium) {
+  if (graph != NULL) {
     Handle<Code> optimized_code = graph->Compile(info);
     if (!optimized_code.is_null()) {
       info->SetCode(optimized_code);
@@ -298,8 +317,7 @@
   if (!builder.inline_bailout()) {
     // Mark the shared code as unoptimizable unless it was an inlined
     // function that bailed out.
-    Handle<JSFunction> closure = info->closure();
-    info->shared_info()->DisableOptimization(*closure);
+    info->shared_info()->DisableOptimization();
   }
   // True indicates the compilation pipeline is still going, not necessarily
   // that we optimized the code.
@@ -308,9 +326,9 @@
 
 
 static bool GenerateCode(CompilationInfo* info) {
-  return V8::UseCrankshaft() ?
-    MakeCrankshaftCode(info) :
-    FullCodeGenerator::MakeCode(info);
+  return info->IsCompilingForDebugging() || !V8::UseCrankshaft() ?
+      FullCodeGenerator::MakeCode(info) :
+      MakeCrankshaftCode(info);
 }
 
 
@@ -328,8 +346,7 @@
   // the compilation info is set if compilation succeeded.
   bool succeeded = MakeCode(info);
   if (!info->shared_info().is_null()) {
-    Handle<SerializedScopeInfo> scope_info =
-        SerializedScopeInfo::Create(info->scope());
+    Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
     info->shared_info()->set_scope_info(*scope_info);
   }
   return succeeded;
@@ -371,8 +388,14 @@
 
   // Only allow non-global compiles for eval.
   ASSERT(info->is_eval() || info->is_global());
-
-  if (!ParserApi::Parse(info)) return Handle<SharedFunctionInfo>::null();
+  ParsingFlags flags = kNoParsingFlags;
+  if (info->pre_parse_data() != NULL ||
+      String::cast(script->source())->length() > FLAG_min_preparse_length) {
+    flags = kAllowLazy;
+  }
+  if (!ParserApi::Parse(info, flags)) {
+    return Handle<SharedFunctionInfo>::null();
+  }
 
   // Measure how long it takes to do the compilation; only take the
   // rest of the function into account to avoid overlap with the
@@ -386,7 +409,7 @@
   FunctionLiteral* lit = info->function();
   LiveEditFunctionTracker live_edit_tracker(isolate, lit);
   if (!MakeCode(info)) {
-    isolate->StackOverflow();
+    if (!isolate->has_pending_exception()) isolate->StackOverflow();
     return Handle<SharedFunctionInfo>::null();
   }
 
@@ -397,7 +420,7 @@
           lit->name(),
           lit->materialized_literal_count(),
           info->code(),
-          SerializedScopeInfo::Create(info->scope()));
+          ScopeInfo::Create(info->scope()));
 
   ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
   Compiler::SetFunctionInfo(result, lit, true, script);
@@ -430,6 +453,9 @@
   // the instances of the function.
   SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
 
+  script->set_compilation_state(
+      Smi::FromInt(Script::COMPILATION_STATE_COMPILED));
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Notify debugger
   isolate->debugger()->OnAfterCompile(
@@ -447,7 +473,7 @@
                                              int line_offset,
                                              int column_offset,
                                              v8::Extension* extension,
-                                             ScriptDataImpl* input_pre_data,
+                                             ScriptDataImpl* pre_data,
                                              Handle<Object> script_data,
                                              NativesFlag natives) {
   Isolate* isolate = source->GetIsolate();
@@ -478,24 +504,6 @@
     // for small sources, odds are that there aren't many functions
     // that would be compiled lazily anyway, so we skip the preparse step
     // in that case too.
-    ScriptDataImpl* pre_data = input_pre_data;
-    bool harmony_block_scoping = natives != NATIVES_CODE &&
-                                 FLAG_harmony_block_scoping;
-    if (pre_data == NULL
-        && source_length >= FLAG_min_preparse_length) {
-      if (source->IsExternalTwoByteString()) {
-        ExternalTwoByteStringUC16CharacterStream stream(
-            Handle<ExternalTwoByteString>::cast(source), 0, source->length());
-        pre_data = ParserApi::PartialPreParse(&stream,
-                                              extension,
-                                              harmony_block_scoping);
-      } else {
-        GenericStringUC16CharacterStream stream(source, 0, source->length());
-        pre_data = ParserApi::PartialPreParse(&stream,
-                                              extension,
-                                              harmony_block_scoping);
-      }
-    }
 
     // Create a script object describing the script to be compiled.
     Handle<Script> script = FACTORY->NewScript(source);
@@ -516,15 +524,13 @@
     info.MarkAsGlobal();
     info.SetExtension(extension);
     info.SetPreParseData(pre_data);
+    if (FLAG_use_strict) {
+      info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
+    }
     result = MakeFunctionInfo(&info);
     if (extension == NULL && !result.is_null()) {
       compilation_cache->PutScript(source, result);
     }
-
-    // Get rid of the pre-parsing data (if necessary).
-    if (input_pre_data == NULL && pre_data != NULL) {
-      delete pre_data;
-    }
   }
 
   if (result.is_null()) isolate->ReportPendingMessages();
@@ -535,7 +541,8 @@
 Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
                                                  Handle<Context> context,
                                                  bool is_global,
-                                                 StrictModeFlag strict_mode) {
+                                                 LanguageMode language_mode,
+                                                 int scope_position) {
   Isolate* isolate = source->GetIsolate();
   int source_length = source->length();
   isolate->counters()->total_eval_size()->Increment(source_length);
@@ -551,7 +558,8 @@
   result = compilation_cache->LookupEval(source,
                                          context,
                                          is_global,
-                                         strict_mode);
+                                         language_mode,
+                                         scope_position);
 
   if (result.is_null()) {
     // Create a script object describing the script to be compiled.
@@ -559,16 +567,24 @@
     CompilationInfo info(script);
     info.MarkAsEval();
     if (is_global) info.MarkAsGlobal();
-    if (strict_mode == kStrictMode) info.MarkAsStrictMode();
+    info.SetLanguageMode(language_mode);
     info.SetCallingContext(context);
     result = MakeFunctionInfo(&info);
     if (!result.is_null()) {
-      CompilationCache* compilation_cache = isolate->compilation_cache();
-      // If caller is strict mode, the result must be strict as well,
-      // but not the other way around. Consider:
+      // Explicitly disable optimization for eval code. We're not yet prepared
+      // to handle eval-code in the optimizing compiler.
+      result->DisableOptimization();
+
+      // If caller is strict mode, the result must be in strict mode or
+      // extended mode as well, but not the other way around. Consider:
       // eval("'use strict'; ...");
-      ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
-      compilation_cache->PutEval(source, context, is_global, result);
+      ASSERT(language_mode != STRICT_MODE || !result->is_classic_mode());
+      // If caller is in extended mode, the result must also be in
+      // extended mode.
+      ASSERT(language_mode != EXTENDED_MODE ||
+             result->is_extended_mode());
+      compilation_cache->PutEval(
+          source, context, is_global, result, scope_position);
     }
   }
 
@@ -591,17 +607,16 @@
   isolate->counters()->total_compile_size()->Increment(compiled_size);
 
   // Generate the AST for the lazily compiled function.
-  if (ParserApi::Parse(info)) {
+  if (ParserApi::Parse(info, kNoParsingFlags)) {
     // Measure how long it takes to do the lazy compilation; only take the
     // rest of the function into account to avoid overlap with the lazy
     // parsing statistics.
     HistogramTimerScope timer(isolate->counters()->compile_lazy());
 
-    // After parsing we know function's strict mode. Remember it.
-    if (info->function()->strict_mode()) {
-      shared->set_strict_mode(true);
-      info->MarkAsStrictMode();
-    }
+    // After parsing we know the function's language mode. Remember it.
+    LanguageMode language_mode = info->function()->language_mode();
+    info->SetLanguageMode(language_mode);
+    shared->set_language_mode(language_mode);
 
     // Compile the code.
     if (!MakeCode(info)) {
@@ -620,16 +635,15 @@
       RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
 
       if (info->IsOptimizing()) {
-        ASSERT(shared->scope_info() != SerializedScopeInfo::Empty());
+        ASSERT(shared->scope_info() != ScopeInfo::Empty());
         function->ReplaceCode(*code);
       } else {
         // Update the shared function info with the compiled code and the
         // scope info.  Please note, that the order of the shared function
         // info initialization is important since set_scope_info might
         // trigger a GC, causing the ASSERT below to be invalid if the code
-        // was flushed. By settting the code object last we avoid this.
-        Handle<SerializedScopeInfo> scope_info =
-            SerializedScopeInfo::Create(info->scope());
+        // was flushed. By setting the code object last we avoid this.
+        Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
         shared->set_scope_info(*scope_info);
         shared->set_code(*code);
         if (!function.is_null()) {
@@ -652,8 +666,13 @@
         // Check the function has compiled code.
         ASSERT(shared->is_compiled());
         shared->set_code_age(0);
+        shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+        shared->set_dont_inline(lit->flags()->Contains(kDontInline));
+        shared->set_ast_node_count(lit->ast_node_count());
 
-        if (info->AllowOptimize() && !shared->optimization_disabled()) {
+        if (V8::UseCrankshaft()&&
+            !function.is_null() &&
+            !shared->optimization_disabled()) {
           // If we're asked to always optimize, we compile the optimized
           // version of the function right away - unless the debugger is
           // active as it makes no sense to compile optimized code then.
@@ -681,7 +700,7 @@
   CompilationInfo info(script);
   info.SetFunction(literal);
   info.SetScope(literal->scope());
-  if (literal->scope()->is_strict_mode()) info.MarkAsStrictMode();
+  info.SetLanguageMode(literal->scope()->language_mode());
 
   LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
   // Determine if the function can be lazily compiled. This is necessary to
@@ -692,7 +711,7 @@
   bool allow_lazy = literal->AllowsLazyCompilation() &&
       !LiveEditFunctionTracker::IsActive(info.isolate());
 
-  Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+  Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
 
   // Generate code
   if (FLAG_lazy && allow_lazy) {
@@ -701,7 +720,7 @@
   } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
              (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
     ASSERT(!info.code().is_null());
-    scope_info = SerializedScopeInfo::Create(info.scope());
+    scope_info = ScopeInfo::Create(info.scope());
   } else {
     return Handle<SharedFunctionInfo>::null();
   }
@@ -733,8 +752,8 @@
                                FunctionLiteral* lit,
                                bool is_toplevel,
                                Handle<Script> script) {
-  function_info->set_length(lit->num_parameters());
-  function_info->set_formal_parameter_count(lit->num_parameters());
+  function_info->set_length(lit->parameter_count());
+  function_info->set_formal_parameter_count(lit->parameter_count());
   function_info->set_script(*script);
   function_info->set_function_token_position(lit->function_token_position());
   function_info->set_start_position(lit->start_position());
@@ -747,9 +766,13 @@
       lit->has_only_simple_this_property_assignments(),
       *lit->this_property_assignments());
   function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
-  function_info->set_strict_mode(lit->strict_mode());
+  function_info->set_language_mode(lit->language_mode());
   function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
   function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
+  function_info->set_ast_node_count(lit->ast_node_count());
+  function_info->set_is_function(lit->is_function());
+  function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+  function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
 }
 
 
diff --git a/src/compiler.h b/src/compiler.h
index 69ab27d..44df9e0 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,10 +52,15 @@
   bool is_lazy() const { return IsLazy::decode(flags_); }
   bool is_eval() const { return IsEval::decode(flags_); }
   bool is_global() const { return IsGlobal::decode(flags_); }
-  bool is_strict_mode() const { return IsStrictMode::decode(flags_); }
+  bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
+  bool is_extended_mode() const { return language_mode() == EXTENDED_MODE; }
+  LanguageMode language_mode() const {
+    return LanguageModeField::decode(flags_);
+  }
   bool is_in_loop() const { return IsInLoop::decode(flags_); }
   FunctionLiteral* function() const { return function_; }
   Scope* scope() const { return scope_; }
+  Scope* global_scope() const { return global_scope_; }
   Handle<Code> code() const { return code_; }
   Handle<JSFunction> closure() const { return closure_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
@@ -73,11 +78,11 @@
     ASSERT(!is_lazy());
     flags_ |= IsGlobal::encode(true);
   }
-  void MarkAsStrictMode() {
-    flags_ |= IsStrictMode::encode(true);
-  }
-  StrictModeFlag StrictMode() {
-    return is_strict_mode() ? kStrictMode : kNonStrictMode;
+  void SetLanguageMode(LanguageMode language_mode) {
+    ASSERT(this->language_mode() == CLASSIC_MODE ||
+           this->language_mode() == language_mode ||
+           language_mode == EXTENDED_MODE);
+    flags_ = LanguageModeField::update(flags_, language_mode);
   }
   void MarkAsInLoop() {
     ASSERT(is_lazy());
@@ -97,6 +102,10 @@
     ASSERT(scope_ == NULL);
     scope_ = scope;
   }
+  void SetGlobalScope(Scope* global_scope) {
+    ASSERT(global_scope_ == NULL);
+    global_scope_ = global_scope;
+  }
   void SetCode(Handle<Code> code) { code_ = code; }
   void SetExtension(v8::Extension* extension) {
     ASSERT(!is_lazy());
@@ -114,6 +123,19 @@
     ASSERT(IsOptimizing());
     osr_ast_id_ = osr_ast_id;
   }
+  void MarkCompilingForDebugging(Handle<Code> current_code) {
+    ASSERT(mode_ != OPTIMIZE);
+    ASSERT(current_code->kind() == Code::FUNCTION);
+    flags_ |= IsCompilingForDebugging::encode(true);
+    if (current_code->is_compiled_optimizable()) {
+      EnableDeoptimizationSupport();
+    } else {
+      mode_ = CompilationInfo::NONOPT;
+    }
+  }
+  bool IsCompilingForDebugging() {
+    return IsCompilingForDebugging::decode(flags_);
+  }
 
   bool has_global_object() const {
     return !closure().is_null() && (closure()->context()->global() != NULL);
@@ -133,16 +155,16 @@
   void DisableOptimization();
 
   // Deoptimization support.
-  bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
+  bool HasDeoptimizationSupport() const {
+    return SupportsDeoptimization::decode(flags_);
+  }
   void EnableDeoptimizationSupport() {
     ASSERT(IsOptimizable());
-    supports_deoptimization_ = true;
+    flags_ |= SupportsDeoptimization::encode(true);
   }
 
-  // Determine whether or not we can adaptively optimize.
-  bool AllowOptimize() {
-    return V8::UseCrankshaft() && !closure_.is_null();
-  }
+  // Determines whether or not to insert a self-optimization header.
+  bool ShouldSelfOptimize();
 
   // Disable all optimization attempts of this info for the rest of the
   // current compilation pipeline.
@@ -154,9 +176,8 @@
   // Compilation mode.
   // BASE is generated by the full codegen, optionally prepared for bailouts.
   // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
-  // NONOPT is generated by the full codegen or the classic backend
-  //   and is not prepared for recompilation/bailouts. These functions
-  //   are never recompiled.
+  // NONOPT is generated by the full codegen and is not prepared for
+  //   recompilation/bailouts.  These functions are never recompiled.
   enum Mode {
     BASE,
     OPTIMIZE,
@@ -171,8 +192,9 @@
     if (script_->type()->value() == Script::TYPE_NATIVE) {
       MarkAsNative();
     }
-    if (!shared_info_.is_null() && shared_info_->strict_mode()) {
-      MarkAsStrictMode();
+    if (!shared_info_.is_null()) {
+      ASSERT(language_mode() == CLASSIC_MODE);
+      SetLanguageMode(shared_info_->language_mode());
     }
   }
 
@@ -192,9 +214,14 @@
   // Flags that can be set for lazy compilation.
   class IsInLoop: public BitField<bool, 3, 1> {};
   // Strict mode - used in eager compilation.
-  class IsStrictMode: public BitField<bool, 4, 1> {};
+  class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
   // Is this a function from our natives.
   class IsNative: public BitField<bool, 6, 1> {};
+  // Is this code being compiled with support for deoptimization..
+  class SupportsDeoptimization: public BitField<bool, 7, 1> {};
+  // If compiling for debugging produce just full code matching the
+  // initial mode setting.
+  class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
 
 
   unsigned flags_;
@@ -205,6 +232,8 @@
   // The scope of the function literal as a convenience.  Set to indicate
   // that scopes have been analyzed.
   Scope* scope_;
+  // The global scope provided as a convenience.
+  Scope* global_scope_;
   // The compiled code.
   Handle<Code> code_;
 
@@ -223,7 +252,6 @@
 
   // Compilation mode flag and whether deoptimization is allowed.
   Mode mode_;
-  bool supports_deoptimization_;
   int osr_ast_id_;
 
   DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
@@ -249,6 +277,9 @@
 
   static const int kMaxInliningLevels = 3;
 
+  // Call count before primitive functions trigger their own optimization.
+  static const int kCallsUntilPrimitiveOpt = 200;
+
   // All routines return a SharedFunctionInfo.
   // If an error occurs an exception is raised and the return handle
   // contains NULL.
@@ -267,7 +298,8 @@
   static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
                                                 Handle<Context> context,
                                                 bool is_global,
-                                                StrictModeFlag strict_mode);
+                                                LanguageMode language_mode,
+                                                int scope_position);
 
   // Compile from function info (used for lazy compilation). Returns true on
   // success and false if the compilation resulted in a stack overflow.
diff --git a/src/contexts.cc b/src/contexts.cc
index 4f93abd..76784bd 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -86,14 +86,14 @@
 
 Handle<Object> Context::Lookup(Handle<String> name,
                                ContextLookupFlags flags,
-                               int* index_,
+                               int* index,
                                PropertyAttributes* attributes,
                                BindingFlags* binding_flags) {
   Isolate* isolate = GetIsolate();
   Handle<Context> context(this, isolate);
 
   bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
-  *index_ = -1;
+  *index = -1;
   *attributes = ABSENT;
   *binding_flags = MISSING_BINDING;
 
@@ -110,70 +110,51 @@
       PrintF("\n");
     }
 
-    // Check extension/with/global object.
-    if (!context->IsBlockContext() && context->has_extension()) {
-      if (context->IsCatchContext()) {
-        // Catch contexts have the variable name in the extension slot.
-        if (name->Equals(String::cast(context->extension()))) {
-          if (FLAG_trace_contexts) {
-            PrintF("=> found in catch context\n");
-          }
-          *index_ = Context::THROWN_OBJECT_INDEX;
-          *attributes = NONE;
-          *binding_flags = MUTABLE_IS_INITIALIZED;
-          return context;
-        }
+    // 1. Check global objects, subjects of with, and extension objects.
+    if (context->IsGlobalContext() ||
+        context->IsWithContext() ||
+        (context->IsFunctionContext() && context->has_extension())) {
+      Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
+      // Context extension objects needs to behave as if they have no
+      // prototype.  So even if we want to follow prototype chains, we need
+      // to only do a local lookup for context extension objects.
+      if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+          object->IsJSContextExtensionObject()) {
+        *attributes = object->GetLocalPropertyAttribute(*name);
       } else {
-        ASSERT(context->IsGlobalContext() ||
-               context->IsFunctionContext() ||
-               context->IsWithContext());
-        // Global, function, and with contexts may have an object in the
-        // extension slot.
-        Handle<JSObject> extension(JSObject::cast(context->extension()),
-                                   isolate);
-        // Context extension objects needs to behave as if they have no
-        // prototype.  So even if we want to follow prototype chains, we
-        // need to only do a local lookup for context extension objects.
-        if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
-            extension->IsJSContextExtensionObject()) {
-          *attributes = extension->GetLocalPropertyAttribute(*name);
-        } else {
-          *attributes = extension->GetPropertyAttribute(*name);
+        *attributes = object->GetPropertyAttribute(*name);
+      }
+      if (*attributes != ABSENT) {
+        if (FLAG_trace_contexts) {
+          PrintF("=> found property in context object %p\n",
+                 reinterpret_cast<void*>(*object));
         }
-        if (*attributes != ABSENT) {
-          // property found
-          if (FLAG_trace_contexts) {
-            PrintF("=> found property in context object %p\n",
-                   reinterpret_cast<void*>(*extension));
-          }
-          return extension;
-        }
+        return object;
       }
     }
 
-    // Check serialized scope information of functions and blocks. Only
-    // functions can have parameters, and a function name.
+    // 2. Check the context proper if it has slots.
     if (context->IsFunctionContext() || context->IsBlockContext()) {
-      // We may have context-local slots.  Check locals in the context.
-      Handle<SerializedScopeInfo> scope_info;
+      // Use serialized scope information of functions and blocks to search
+      // for the context index.
+      Handle<ScopeInfo> scope_info;
       if (context->IsFunctionContext()) {
-        scope_info = Handle<SerializedScopeInfo>(
+        scope_info = Handle<ScopeInfo>(
             context->closure()->shared()->scope_info(), isolate);
       } else {
-        ASSERT(context->IsBlockContext());
-        scope_info = Handle<SerializedScopeInfo>(
-            SerializedScopeInfo::cast(context->extension()), isolate);
+        scope_info = Handle<ScopeInfo>(
+            ScopeInfo::cast(context->extension()), isolate);
       }
-
-      Variable::Mode mode;
-      int index = scope_info->ContextSlotIndex(*name, &mode);
-      ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
-      if (index >= 0) {
+      VariableMode mode;
+      InitializationFlag init_flag;
+      int slot_index = scope_info->ContextSlotIndex(*name, &mode, &init_flag);
+      ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
+      if (slot_index >= 0) {
         if (FLAG_trace_contexts) {
           PrintF("=> found local in context slot %d (mode = %d)\n",
-                 index, mode);
+                 slot_index, mode);
         }
-        *index_ = index;
+        *index = slot_index;
         // Note: Fixed context slots are statically allocated by the compiler.
         // Statically allocated variables always have a statically known mode,
         // which is the mode with which they were declared when added to the
@@ -181,23 +162,31 @@
         // declared variables that were introduced through declaration nodes)
         // must not appear here.
         switch (mode) {
-          case Variable::INTERNAL:  // Fall through.
-          case Variable::VAR:
+          case INTERNAL:  // Fall through.
+          case VAR:
             *attributes = NONE;
             *binding_flags = MUTABLE_IS_INITIALIZED;
             break;
-          case Variable::LET:
+          case LET:
             *attributes = NONE;
-            *binding_flags = MUTABLE_CHECK_INITIALIZED;
+            *binding_flags = (init_flag == kNeedsInitialization)
+                ? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
             break;
-          case Variable::CONST:
+          case CONST:
             *attributes = READ_ONLY;
-            *binding_flags = IMMUTABLE_CHECK_INITIALIZED;
+            *binding_flags = (init_flag == kNeedsInitialization)
+                ? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
             break;
-          case Variable::DYNAMIC:
-          case Variable::DYNAMIC_GLOBAL:
-          case Variable::DYNAMIC_LOCAL:
-          case Variable::TEMPORARY:
+          case CONST_HARMONY:
+            *attributes = READ_ONLY;
+            *binding_flags = (init_flag == kNeedsInitialization)
+                ? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
+                IMMUTABLE_IS_INITIALIZED_HARMONY;
+            break;
+          case DYNAMIC:
+          case DYNAMIC_GLOBAL:
+          case DYNAMIC_LOCAL:
+          case TEMPORARY:
             UNREACHABLE();
             break;
         }
@@ -206,22 +195,37 @@
 
       // Check the slot corresponding to the intermediate context holding
       // only the function name variable.
-      if (follow_context_chain) {
-        int index = scope_info->FunctionContextSlotIndex(*name);
-        if (index >= 0) {
+      if (follow_context_chain && context->IsFunctionContext()) {
+        VariableMode mode;
+        int function_index = scope_info->FunctionContextSlotIndex(*name, &mode);
+        if (function_index >= 0) {
           if (FLAG_trace_contexts) {
             PrintF("=> found intermediate function in context slot %d\n",
-                   index);
+                   function_index);
           }
-          *index_ = index;
+          *index = function_index;
           *attributes = READ_ONLY;
-          *binding_flags = IMMUTABLE_IS_INITIALIZED;
+          ASSERT(mode == CONST || mode == CONST_HARMONY);
+          *binding_flags = (mode == CONST)
+              ? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
           return context;
         }
       }
+
+    } else if (context->IsCatchContext()) {
+      // Catch contexts have the variable name in the extension slot.
+      if (name->Equals(String::cast(context->extension()))) {
+        if (FLAG_trace_contexts) {
+          PrintF("=> found in catch context\n");
+        }
+        *index = Context::THROWN_OBJECT_INDEX;
+        *attributes = NONE;
+        *binding_flags = MUTABLE_IS_INITIALIZED;
+        return context;
+      }
     }
 
-    // Proceed with the previous context.
+    // 3. Prepare to continue with the previous (next outermost) context.
     if (context->IsGlobalContext()) {
       follow_context_chain = false;
     } else {
@@ -236,68 +240,6 @@
 }
 
 
-bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
-  Context* context = this;
-
-  // Check that there is no local with the given name in contexts
-  // before the global context and check that there are no context
-  // extension objects (conservative check for with statements).
-  while (!context->IsGlobalContext()) {
-    // Check if the context is a catch or with context, or has introduced
-    // bindings by calling non-strict eval.
-    if (context->has_extension()) return false;
-
-    // Not a with context so it must be a function context.
-    ASSERT(context->IsFunctionContext());
-
-    // Check non-parameter locals.
-    Handle<SerializedScopeInfo> scope_info(
-        context->closure()->shared()->scope_info());
-    Variable::Mode mode;
-    int index = scope_info->ContextSlotIndex(*name, &mode);
-    ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
-    if (index >= 0) return false;
-
-    // Check parameter locals.
-    int param_index = scope_info->ParameterIndex(*name);
-    if (param_index >= 0) return false;
-
-    // Check context only holding the function name variable.
-    index = scope_info->FunctionContextSlotIndex(*name);
-    if (index >= 0) return false;
-    context = context->previous();
-  }
-
-  // No local or potential with statement found so the variable is
-  // global unless it is shadowed by an eval-introduced variable.
-  return true;
-}
-
-
-void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
-                                   bool* outer_scope_calls_non_strict_eval) {
-  // Skip up the context chain checking all the function contexts to see
-  // whether they call eval.
-  Context* context = this;
-  while (!context->IsGlobalContext()) {
-    if (context->IsFunctionContext()) {
-      Handle<SerializedScopeInfo> scope_info(
-          context->closure()->shared()->scope_info());
-      if (scope_info->CallsEval()) {
-        *outer_scope_calls_eval = true;
-        if (!scope_info->IsStrictMode()) {
-          // No need to go further since the answers will not change from
-          // here.
-          *outer_scope_calls_non_strict_eval = true;
-          return;
-        }
-      }
-    }
-    context = context->previous();
-  }
-}
-
-
 void Context::AddOptimizedFunction(JSFunction* function) {
   ASSERT(IsGlobalContext());
 #ifdef DEBUG
diff --git a/src/contexts.h b/src/contexts.h
index 505f86c..af5cb03 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -46,24 +46,43 @@
 
 // ES5 10.2 defines lexical environments with mutable and immutable bindings.
 // Immutable bindings have two states, initialized and uninitialized, and
-// their state is changed by the InitializeImmutableBinding method.
+// their state is changed by the InitializeImmutableBinding method. The
+// BindingFlags enum represents information if a binding has definitely been
+// initialized. A mutable binding does not need to be checked and thus has
+// the BindingFlag MUTABLE_IS_INITIALIZED.
+//
+// There are two possibilities for immutable bindings
+//  * 'const' declared variables. They are initialized when evaluating the
+//    corresponding declaration statement. They need to be checked for being
+//    initialized and thus get the flag IMMUTABLE_CHECK_INITIALIZED.
+//  * The function name of a named function literal. The binding is immediately
+//    initialized when entering the function and thus does not need to be
+//    checked. it gets the BindingFlag IMMUTABLE_IS_INITIALIZED.
+// Accessing an uninitialized binding produces the undefined value.
 //
 // The harmony proposal for block scoped bindings also introduces the
-// uninitialized state for mutable bindings. A 'let' declared variable
-// is a mutable binding that is created uninitalized upon activation of its
-// lexical environment and it is initialized when evaluating its declaration
-// statement. Var declared variables are mutable bindings that are
-// immediately initialized upon creation. The BindingFlags enum represents
-// information if a binding has definitely been initialized. 'const' declared
-// variables are created as uninitialized immutable bindings.
-
-// In harmony mode accessing an uninitialized binding produces a reference
-// error.
+// uninitialized state for mutable bindings.
+//  * A 'let' declared variable. They are initialized when evaluating the
+//    corresponding declaration statement. They need to be checked for being
+//    initialized and thus get the flag MUTABLE_CHECK_INITIALIZED.
+//  * A 'var' declared variable. It is initialized immediately upon creation
+//    and thus doesn't need to be checked. It gets the flag
+//    MUTABLE_IS_INITIALIZED.
+//  * Catch bound variables, function parameters and variables introduced by
+//    function declarations are initialized immediately and do not need to be
+//    checked. Thus they get the flag MUTABLE_IS_INITIALIZED.
+// Immutable bindings in harmony mode get the _HARMONY flag variants. Accessing
+// an uninitialized binding produces a reference error.
+//
+// In V8 uninitialized bindings are set to the hole value upon creation and set
+// to a different value upon initialization.
 enum BindingFlags {
   MUTABLE_IS_INITIALIZED,
   MUTABLE_CHECK_INITIALIZED,
   IMMUTABLE_IS_INITIALIZED,
   IMMUTABLE_CHECK_INITIALIZED,
+  IMMUTABLE_IS_INITIALIZED_HARMONY,
+  IMMUTABLE_CHECK_INITIALIZED_HARMONY,
   MISSING_BINDING
 };
 
@@ -85,7 +104,11 @@
   V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
   V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
   V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
+  V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
   V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
+  V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \
+  V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \
+  V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \
   V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
   V(JSON_OBJECT_INDEX, JSObject, json_object) \
   V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@@ -109,7 +132,6 @@
   V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
   V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
     strict_mode_function_instance_map) \
-  V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
   V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
   V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
   V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
@@ -134,9 +156,13 @@
   V(MAP_CACHE_INDEX, Object, map_cache) \
   V(CONTEXT_DATA_INDEX, Object, data) \
   V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+  V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
+    to_complete_property_descriptor) \
   V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
   V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
-  V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
+  V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
+  V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \
+  V(RANDOM_SEED_INDEX, ByteArray, random_seed)
 
 // JSFunctions are pairs (context, function code), sometimes also called
 // closures. A Context object is used to represent function contexts and
@@ -192,7 +218,8 @@
     PREVIOUS_INDEX,
     // The extension slot is used for either the global object (in global
     // contexts), eval extension object (function contexts), subject of with
-    // (with contexts), or the variable name (catch contexts).
+    // (with contexts), or the variable name (catch contexts), the serialized
+    // scope info (block contexts).
     EXTENSION_INDEX,
     GLOBAL_INDEX,
     MIN_CONTEXT_SLOTS,
@@ -206,7 +233,6 @@
     ARGUMENTS_BOILERPLATE_INDEX,
     ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
     STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
-    JS_ARRAY_MAP_INDEX,
     REGEXP_RESULT_MAP_INDEX,
     FUNCTION_MAP_INDEX,
     STRICT_MODE_FUNCTION_MAP_INDEX,
@@ -220,7 +246,11 @@
     STRING_FUNCTION_INDEX,
     STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
     OBJECT_FUNCTION_INDEX,
+    INTERNAL_ARRAY_FUNCTION_INDEX,
     ARRAY_FUNCTION_INDEX,
+    SMI_JS_ARRAY_MAP_INDEX,
+    DOUBLE_JS_ARRAY_MAP_INDEX,
+    OBJECT_JS_ARRAY_MAP_INDEX,
     DATE_FUNCTION_INDEX,
     JSON_OBJECT_INDEX,
     REGEXP_FUNCTION_INDEX,
@@ -252,9 +282,12 @@
     OUT_OF_MEMORY_INDEX,
     CONTEXT_DATA_INDEX,
     ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
+    TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
     DERIVED_HAS_TRAP_INDEX,
     DERIVED_GET_TRAP_INDEX,
     DERIVED_SET_TRAP_INDEX,
+    PROXY_ENUMERATE,
+    RANDOM_SEED_INDEX,
 
     // Properties from here are treated as weak references by the full GC.
     // Scavenge treats them as strong references.
@@ -323,6 +356,10 @@
     Map* map = this->map();
     return map == map->GetHeap()->block_context_map();
   }
+  bool IsModuleContext() {
+    Map* map = this->map();
+    return map == map->GetHeap()->module_context_map();
+  }
 
   // Tells whether the global context is marked with out of memory.
   inline bool has_out_of_memory();
@@ -330,18 +367,24 @@
   // Mark the global context with out of memory.
   inline void mark_out_of_memory();
 
-  // The exception holder is the object used as a with object in
-  // the implementation of a catch block.
-  bool is_exception_holder(Object* object) {
-    return IsCatchContext() && extension() == object;
-  }
-
   // A global context hold a list of all functions which have been optimized.
   void AddOptimizedFunction(JSFunction* function);
   void RemoveOptimizedFunction(JSFunction* function);
   Object* OptimizedFunctionsListHead();
   void ClearOptimizedFunctions();
 
+  static int GetContextMapIndexFromElementsKind(
+      ElementsKind elements_kind) {
+    if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+      return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
+    } else if (elements_kind == FAST_ELEMENTS) {
+      return Context::OBJECT_JS_ARRAY_MAP_INDEX;
+    } else {
+      ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
+      return Context::SMI_JS_ARRAY_MAP_INDEX;
+    }
+  }
+
 #define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
   void  set_##name(type* value) {                         \
     ASSERT(IsGlobalContext());                            \
@@ -355,46 +398,28 @@
 #undef GLOBAL_CONTEXT_FIELD_ACCESSORS
 
   // Lookup the the slot called name, starting with the current context.
-  // There are 4 possible outcomes:
+  // There are three possibilities:
   //
-  // 1) index_ >= 0 && result->IsContext():
-  //    most common case, the result is a Context, and index is the
-  //    context slot index, and the slot exists.
-  //    attributes == READ_ONLY for the function name variable, NONE otherwise.
+  // 1) result->IsContext():
+  //    The binding was found in a context.  *index is always the
+  //    non-negative slot index.  *attributes is NONE for var and let
+  //    declarations, READ_ONLY for const declarations (never ABSENT).
   //
-  // 2) index_ >= 0 && result->IsJSObject():
-  //    the result is the JSObject arguments object, the index is the parameter
-  //    index, i.e., key into the arguments object, and the property exists.
-  //    attributes != ABSENT.
+  // 2) result->IsJSObject():
+  //    The binding was found as a named property in a context extension
+  //    object (i.e., was introduced via eval), as a property on the subject
+  //    of with, or as a property of the global object.  *index is -1 and
+  //    *attributes is not ABSENT.
   //
-  // 3) index_ < 0 && result->IsJSObject():
-  //    the result is the JSObject extension context or the global object,
-  //    and the name is the property name, and the property exists.
-  //    attributes != ABSENT.
-  //
-  // 4) index_ < 0 && result.is_null():
-  //    there was no context found with the corresponding property.
-  //    attributes == ABSENT.
+  // 3) result.is_null():
+  //    There was no binding found, *index is always -1 and *attributes is
+  //    always ABSENT.
   Handle<Object> Lookup(Handle<String> name,
                         ContextLookupFlags flags,
-                        int* index_,
+                        int* index,
                         PropertyAttributes* attributes,
                         BindingFlags* binding_flags);
 
-  // Determine if a local variable with the given name exists in a
-  // context.  Do not consider context extension objects.  This is
-  // used for compiling code using eval.  If the context surrounding
-  // the eval call does not have a local variable with this name and
-  // does not contain a with statement the property is global unless
-  // it is shadowed by a property in an extension object introduced by
-  // eval.
-  bool GlobalIfNotShadowedByEval(Handle<String> name);
-
-  // Determine if any function scope in the context call eval and if
-  // any of those calls are in non-strict mode.
-  void ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
-                            bool* outer_scope_calls_non_strict_eval);
-
   // Code generation support.
   static int SlotOffset(int index) {
     return kHeaderSize + index * kPointerSize - kHeapObjectTag;
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 41cf0d5..b098a1c 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -46,15 +46,15 @@
 namespace v8 {
 namespace internal {
 
-static inline double JunkStringValue() {
-  return std::numeric_limits<double>::quiet_NaN();
+inline double JunkStringValue() {
+  return BitCast<double, uint64_t>(kQuietNaNMask);
 }
 
 
 // The fast double-to-unsigned-int conversion routine does not guarantee
 // rounding towards zero, or any reasonable value if the argument is larger
 // than what fits in an unsigned 32-bit integer.
-static inline unsigned int FastD2UI(double x) {
+inline unsigned int FastD2UI(double x) {
   // There is no unsigned version of lrint, so there is no fast path
   // in this function as there is in FastD2I. Using lrint doesn't work
   // for values of 2^31 and above.
@@ -80,7 +80,7 @@
 }
 
 
-static inline double DoubleToInteger(double x) {
+inline double DoubleToInteger(double x) {
   if (isnan(x)) return 0;
   if (!isfinite(x) || x == 0) return x;
   return (x >= 0) ? floor(x) : ceil(x);
@@ -103,9 +103,9 @@
 
 
 template <class Iterator, class EndMark>
-static bool SubStringEquals(Iterator* current,
-                            EndMark end,
-                            const char* substring) {
+bool SubStringEquals(Iterator* current,
+                     EndMark end,
+                     const char* substring) {
   ASSERT(**current == *substring);
   for (substring++; *substring != '\0'; substring++) {
     ++*current;
@@ -119,9 +119,9 @@
 // Returns true if a nonspace character has been found and false if the
 // end was been reached before finding a nonspace character.
 template <class Iterator, class EndMark>
-static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
-                                     Iterator* current,
-                                     EndMark end) {
+inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
+                              Iterator* current,
+                              EndMark end) {
   while (*current != end) {
     if (!unicode_cache->IsWhiteSpace(**current)) return true;
     ++*current;
@@ -132,11 +132,11 @@
 
 // Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
 template <int radix_log_2, class Iterator, class EndMark>
-static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
-                                        Iterator current,
-                                        EndMark end,
-                                        bool negative,
-                                        bool allow_trailing_junk) {
+double InternalStringToIntDouble(UnicodeCache* unicode_cache,
+                                 Iterator current,
+                                 EndMark end,
+                                 bool negative,
+                                 bool allow_trailing_junk) {
   ASSERT(current != end);
 
   // Skip leading 0s.
@@ -235,10 +235,10 @@
 
 
 template <class Iterator, class EndMark>
-static double InternalStringToInt(UnicodeCache* unicode_cache,
-                                  Iterator current,
-                                  EndMark end,
-                                  int radix) {
+double InternalStringToInt(UnicodeCache* unicode_cache,
+                           Iterator current,
+                           EndMark end,
+                           int radix) {
   const bool allow_trailing_junk = true;
   const double empty_string_val = JunkStringValue();
 
@@ -430,11 +430,11 @@
 // 2. *current - gets the current character in the sequence.
 // 3. ++current (advances the position).
 template <class Iterator, class EndMark>
-static double InternalStringToDouble(UnicodeCache* unicode_cache,
-                                     Iterator current,
-                                     EndMark end,
-                                     int flags,
-                                     double empty_string_val) {
+double InternalStringToDouble(UnicodeCache* unicode_cache,
+                              Iterator current,
+                              EndMark end,
+                              int flags,
+                              double empty_string_val) {
   // To make sure that iterator dereferencing is valid the following
   // convention is used:
   // 1. Each '++current' statement is followed by check for equality to 'end'.
diff --git a/src/conversions.h b/src/conversions.h
index e51ad65..70559c9 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -28,8 +28,6 @@
 #ifndef V8_CONVERSIONS_H_
 #define V8_CONVERSIONS_H_
 
-#include <limits>
-
 #include "utils.h"
 
 namespace v8 {
@@ -47,14 +45,14 @@
 const int kMaxSignificantDigits = 772;
 
 
-static inline bool isDigit(int x, int radix) {
+inline bool isDigit(int x, int radix) {
   return (x >= '0' && x <= '9' && x < '0' + radix)
       || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
       || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
 }
 
 
-static inline double SignedZero(bool negative) {
+inline double SignedZero(bool negative) {
   return negative ? -0.0 : 0.0;
 }
 
@@ -63,16 +61,16 @@
 // rounding towards zero.
 // The result is unspecified if x is infinite or NaN, or if the rounded
 // integer value is outside the range of type int.
-static inline int FastD2I(double x) {
+inline int FastD2I(double x) {
   // The static_cast convertion from double to int used to be slow, but
   // as new benchmarks show, now it is much faster than lrint().
   return static_cast<int>(x);
 }
 
-static inline unsigned int FastD2UI(double x);
+inline unsigned int FastD2UI(double x);
 
 
-static inline double FastI2D(int x) {
+inline double FastI2D(int x) {
   // There is no rounding involved in converting an integer to a
   // double, so this code should compile to a few instructions without
   // any FPU pipeline stalls.
@@ -80,7 +78,7 @@
 }
 
 
-static inline double FastUI2D(unsigned x) {
+inline double FastUI2D(unsigned x) {
   // There is no rounding involved in converting an unsigned integer to a
   // double, so this code should compile to a few instructions without
   // any FPU pipeline stalls.
@@ -89,15 +87,15 @@
 
 
 // This function should match the exact semantics of ECMA-262 9.4.
-static inline double DoubleToInteger(double x);
+inline double DoubleToInteger(double x);
 
 
 // This function should match the exact semantics of ECMA-262 9.5.
-static inline int32_t DoubleToInt32(double x);
+inline int32_t DoubleToInt32(double x);
 
 
 // This function should match the exact semantics of ECMA-262 9.6.
-static inline uint32_t DoubleToUint32(double x) {
+inline uint32_t DoubleToUint32(double x) {
   return static_cast<uint32_t>(DoubleToInt32(x));
 }
 
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index d74c034..3cbac77 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -39,13 +39,14 @@
 namespace v8 {
 namespace internal {
 
-static const int kEventsBufferSize = 256*KB;
-static const int kTickSamplesBufferChunkSize = 64*KB;
+static const int kEventsBufferSize = 256 * KB;
+static const int kTickSamplesBufferChunkSize = 64 * KB;
 static const int kTickSamplesBufferChunksCount = 16;
+static const int kProfilerStackSize = 64 * KB;
 
 
 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
-    : Thread("v8:ProfEvntProc"),
+    : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
       generator_(generator),
       running_(true),
       ticks_buffer_(sizeof(TickSampleEventRecord),
@@ -493,7 +494,7 @@
     NoBarrier_Store(&is_profiling_, true);
     processor_->Start();
     // Enumerate stuff we already have in the heap.
-    if (isolate->heap()->HasBeenSetup()) {
+    if (isolate->heap()->HasBeenSetUp()) {
       if (!FLAG_prof_browser_mode) {
         bool saved_log_code_flag = FLAG_log_code;
         FLAG_log_code = true;
@@ -562,7 +563,7 @@
 }
 
 
-void CpuProfiler::Setup() {
+void CpuProfiler::SetUp() {
   Isolate* isolate = Isolate::Current();
   if (isolate->cpu_profiler() == NULL) {
     isolate->set_cpu_profiler(new CpuProfiler());
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index a71c0e0..6e2e771 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,7 +41,6 @@
 class CodeMap;
 class CpuProfile;
 class CpuProfilesCollection;
-class HashMap;
 class ProfileGenerator;
 class TokenEnumerator;
 
@@ -204,7 +203,7 @@
 // TODO(isolates): isolatify this class.
 class CpuProfiler {
  public:
-  static void Setup();
+  static void SetUp();
   static void TearDown();
 
   static void StartProfiling(const char* title);
@@ -230,11 +229,11 @@
                               Code* code, String* name);
   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                               Code* code,
-                              SharedFunctionInfo *shared,
+                              SharedFunctionInfo* shared,
                               String* name);
   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                               Code* code,
-                              SharedFunctionInfo *shared,
+                              SharedFunctionInfo* shared,
                               String* source, int line);
   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                               Code* code, int args_count);
diff --git a/src/cpu.h b/src/cpu.h
index 2525484..247af71 100644
--- a/src/cpu.h
+++ b/src/cpu.h
@@ -53,7 +53,7 @@
 class CPU : public AllStatic {
  public:
   // Initializes the cpu architecture support. Called once at VM startup.
-  static void Setup();
+  static void SetUp();
 
   static bool SupportsCrankshaft();
 
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index adefba7..de0faa8 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,6 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
 
 #include "d8.h"
 #include "d8-debug.h"
@@ -168,7 +169,7 @@
   bool ok;
 
   // Make sure that socket support is initialized.
-  ok = i::Socket::Setup();
+  ok = i::Socket::SetUp();
   if (!ok) {
     printf("Unable to initialize socket support %d\n", i::Socket::LastError());
     return;
@@ -309,9 +310,7 @@
   Handle<Value> request =
       Shell::DebugCommandToJSONRequest(String::New(command));
   if (try_catch.HasCaught()) {
-    v8::String::Utf8Value exception(try_catch.Exception());
-    const char* exception_string = Shell::ToCString(exception);
-    printf("%s\n", exception_string);
+    Shell::ReportException(&try_catch);
     PrintPrompt();
     return;
   }
@@ -367,3 +366,5 @@
 
 
 }  // namespace v8
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index 289c3b0..8a278e4 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -366,7 +366,8 @@
 // We're disabling usage of waitid in Mac OS X because it doens't work for us:
 // a parent process hangs on waiting while a child process is already a zombie.
 // See http://code.google.com/p/v8/issues/detail?id=401.
-#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
+#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) \
+    && !defined(__NetBSD__)
 #if !defined(__FreeBSD__)
 #define HAS_WAITID 1
 #endif
diff --git a/src/d8-readline.cc b/src/d8-readline.cc
index 71be933..ed7721c 100644
--- a/src/d8-readline.cc
+++ b/src/d8-readline.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,10 +49,14 @@
 class ReadLineEditor: public LineEditor {
  public:
   ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
-  virtual i::SmartArrayPointer<char> Prompt(const char* prompt);
+  virtual Handle<String> Prompt(const char* prompt);
   virtual bool Open();
   virtual bool Close();
   virtual void AddHistory(const char* str);
+
+  static const char* kHistoryFileName;
+  static const int kMaxHistoryEntries;
+
  private:
   static char** AttemptedCompletion(const char* text, int start, int end);
   static char* CompletionGenerator(const char* text, int state);
@@ -66,25 +70,38 @@
     '\0'};
 
 
+const char* ReadLineEditor::kHistoryFileName = ".d8_history";
+const int ReadLineEditor::kMaxHistoryEntries = 1000;
+
+
 bool ReadLineEditor::Open() {
   rl_initialize();
   rl_attempted_completion_function = AttemptedCompletion;
   rl_completer_word_break_characters = kWordBreakCharacters;
   rl_bind_key('\t', rl_complete);
   using_history();
-  stifle_history(Shell::kMaxHistoryEntries);
-  return read_history(Shell::kHistoryFileName) == 0;
+  stifle_history(kMaxHistoryEntries);
+  return read_history(kHistoryFileName) == 0;
 }
 
 
 bool ReadLineEditor::Close() {
-  return write_history(Shell::kHistoryFileName) == 0;
+  return write_history(kHistoryFileName) == 0;
 }
 
 
-i::SmartArrayPointer<char> ReadLineEditor::Prompt(const char* prompt) {
-  char* result = readline(prompt);
-  return i::SmartArrayPointer<char>(result);
+Handle<String> ReadLineEditor::Prompt(const char* prompt) {
+  char* result = NULL;
+  {  // Release lock for blocking input.
+    Unlocker unlock(Isolate::GetCurrent());
+    result = readline(prompt);
+  }
+  if (result != NULL) {
+    AddHistory(result);
+  } else {
+    return Handle<String>();
+  }
+  return String::New(result);
 }
 
 
@@ -118,10 +135,10 @@
   static unsigned current_index;
   static Persistent<Array> current_completions;
   if (state == 0) {
-    i::SmartArrayPointer<char> full_text(i::StrNDup(rl_line_buffer, rl_point));
     HandleScope scope;
+    Local<String> full_text = String::New(rl_line_buffer, rl_point);
     Handle<Array> completions =
-      Shell::GetCompletions(String::New(text), String::New(*full_text));
+      Shell::GetCompletions(String::New(text), full_text);
     current_completions = Persistent<Array>::New(completions);
     current_index = 0;
   }
diff --git a/src/d8.cc b/src/d8.cc
index 63a7d15..45781cf 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -66,11 +66,7 @@
 
 namespace v8 {
 
-
-#ifndef V8_SHARED
 LineEditor *LineEditor::first_ = NULL;
-const char* Shell::kHistoryFileName = ".d8_history";
-const int Shell::kMaxHistoryEntries = 1000;
 
 
 LineEditor::LineEditor(Type type, const char* name)
@@ -96,36 +92,37 @@
 class DumbLineEditor: public LineEditor {
  public:
   DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
-  virtual i::SmartArrayPointer<char> Prompt(const char* prompt);
+  virtual Handle<String> Prompt(const char* prompt);
 };
 
 
 static DumbLineEditor dumb_line_editor;
 
 
-i::SmartArrayPointer<char> DumbLineEditor::Prompt(const char* prompt) {
-  static const int kBufferSize = 256;
-  char buffer[kBufferSize];
+Handle<String> DumbLineEditor::Prompt(const char* prompt) {
   printf("%s", prompt);
-  char* str = fgets(buffer, kBufferSize, stdin);
-  return i::SmartArrayPointer<char>(str ? i::StrDup(str) : str);
+  return Shell::ReadFromStdin();
 }
 
 
+#ifndef V8_SHARED
 CounterMap* Shell::counter_map_;
 i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
 CounterCollection Shell::local_counters_;
 CounterCollection* Shell::counters_ = &local_counters_;
 i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
 Persistent<Context> Shell::utility_context_;
-LineEditor* Shell::console = NULL;
 #endif  // V8_SHARED
 
+LineEditor* Shell::console = NULL;
 Persistent<Context> Shell::evaluation_context_;
 ShellOptions Shell::options;
 const char* Shell::kPrompt = "d8> ";
 
 
+const int MB = 1024 * 1024;
+
+
 #ifndef V8_SHARED
 bool CounterMap::Match(void* key1, void* key2) {
   const char* name1 = reinterpret_cast<const char*>(key1);
@@ -146,11 +143,11 @@
                           Handle<Value> name,
                           bool print_result,
                           bool report_exceptions) {
-#ifndef V8_SHARED
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
   bool FLAG_debugger = i::FLAG_debugger;
 #else
   bool FLAG_debugger = false;
-#endif  // V8_SHARED
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
   HandleScope handle_scope;
   TryCatch try_catch;
   options.script_executed = true;
@@ -178,7 +175,8 @@
         // If all went well and the result wasn't undefined then print
         // the returned value.
         v8::String::Utf8Value str(result);
-        fwrite(*str, sizeof(**str), str.length(), stdout);
+        size_t count = fwrite(*str, sizeof(**str), str.length(), stdout);
+        (void) count;  // Silence GCC-4.5.x "unused result" warning.
         printf("\n");
       }
       return true;
@@ -237,7 +235,7 @@
 }
 
 
-Handle<Value> Shell::ReadLine(const Arguments& args) {
+Handle<String> Shell::ReadFromStdin() {
   static const int kBufferSize = 256;
   char buffer[kBufferSize];
   Handle<String> accumulator = String::New("");
@@ -246,7 +244,12 @@
     // Continue reading if the line ends with an escape '\\' or the line has
     // not been fully read into the buffer yet (does not end with '\n').
     // If fgets gets an error, just give up.
-    if (fgets(buffer, kBufferSize, stdin) == NULL) return Null();
+    char* input = NULL;
+    {  // Release lock for blocking input.
+      Unlocker unlock(Isolate::GetCurrent());
+      input = fgets(buffer, kBufferSize, stdin);
+    }
+    if (input == NULL) return Handle<String>();
     length = static_cast<int>(strlen(buffer));
     if (length == 0) {
       return accumulator;
@@ -280,51 +283,161 @@
   return Undefined();
 }
 
-
-Handle<Value> Shell::CreateExternalArray(const Arguments& args,
-                                         ExternalArrayType type,
-                                         size_t element_size) {
-  ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
-         element_size == 8);
-  if (args.Length() != 1) {
-    return ThrowException(
-        String::New("Array constructor needs one parameter."));
+static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
+  if (value_in->IsUint32()) {
+    return value_in->Uint32Value();
   }
+
+  Local<Value> number = value_in->ToNumber();
+  if (try_catch->HasCaught()) return 0;
+
+  ASSERT(number->IsNumber());
+  Local<Int32> int32 = number->ToInt32();
+  if (try_catch->HasCaught() || int32.IsEmpty()) return 0;
+
+  int32_t raw_value = int32->Int32Value();
+  if (try_catch->HasCaught()) return 0;
+
+  if (raw_value < 0) {
+    ThrowException(String::New("Array length must not be negative."));
+    return 0;
+  }
+
   static const int kMaxLength = 0x3fffffff;
 #ifndef V8_SHARED
   ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
 #endif  // V8_SHARED
-  size_t length = 0;
-  if (args[0]->IsUint32()) {
-    length = args[0]->Uint32Value();
-  } else {
-    Local<Number> number = args[0]->ToNumber();
-    if (number.IsEmpty() || !number->IsNumber()) {
-      return ThrowException(String::New("Array length must be a number."));
-    }
-    int32_t raw_length = number->ToInt32()->Int32Value();
-    if (raw_length < 0) {
-      return ThrowException(String::New("Array length must not be negative."));
-    }
-    if (raw_length > static_cast<int32_t>(kMaxLength)) {
-      return ThrowException(
-          String::New("Array length exceeds maximum length."));
-    }
-    length = static_cast<size_t>(raw_length);
+  if (raw_value > static_cast<int32_t>(kMaxLength)) {
+    ThrowException(
+        String::New("Array length exceeds maximum length."));
   }
-  if (length > static_cast<size_t>(kMaxLength)) {
-    return ThrowException(String::New("Array length exceeds maximum length."));
+  return static_cast<size_t>(raw_value);
+}
+
+
+const char kArrayBufferReferencePropName[] = "_is_array_buffer_";
+const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
+
+
+Handle<Value> Shell::CreateExternalArray(const Arguments& args,
+                                         ExternalArrayType type,
+                                         size_t element_size) {
+  TryCatch try_catch;
+  bool is_array_buffer_construct = element_size == 0;
+  if (is_array_buffer_construct) {
+    type = v8::kExternalByteArray;
+    element_size = 1;
   }
-  void* data = calloc(length, element_size);
-  if (data == NULL) {
-    return ThrowException(String::New("Memory allocation failed."));
+  ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
+         element_size == 8);
+  if (args.Length() == 0) {
+    return ThrowException(
+        String::New("Array constructor must have at least one "
+                    "parameter."));
   }
+  bool first_arg_is_array_buffer =
+      args[0]->IsObject() &&
+      args[0]->ToObject()->Get(
+          String::New(kArrayBufferMarkerPropName))->IsTrue();
+  // Currently, only the following constructors are supported:
+  //   TypedArray(unsigned long length)
+  //   TypedArray(ArrayBuffer buffer,
+  //              optional unsigned long byteOffset,
+  //              optional unsigned long length)
+  if (args.Length() > 3) {
+    return ThrowException(
+        String::New("Array constructor from ArrayBuffer must "
+                    "have 1-3 parameters."));
+  }
+
+  Local<Value> length_value = (args.Length() < 3)
+      ? (first_arg_is_array_buffer
+         ? args[0]->ToObject()->Get(String::New("length"))
+         : args[0])
+      : args[2];
+  size_t length = convertToUint(length_value, &try_catch);
+  if (try_catch.HasCaught()) return try_catch.Exception();
+
+  void* data = NULL;
+  size_t offset = 0;
+
   Handle<Object> array = Object::New();
+  if (first_arg_is_array_buffer) {
+    Handle<Object> derived_from = args[0]->ToObject();
+    data = derived_from->GetIndexedPropertiesExternalArrayData();
+
+    size_t array_buffer_length = convertToUint(
+        derived_from->Get(String::New("length")),
+        &try_catch);
+    if (try_catch.HasCaught()) return try_catch.Exception();
+
+    if (data == NULL && array_buffer_length != 0) {
+      return ThrowException(
+          String::New("ArrayBuffer doesn't have data"));
+    }
+
+    if (args.Length() > 1) {
+      offset = convertToUint(args[1], &try_catch);
+      if (try_catch.HasCaught()) return try_catch.Exception();
+
+      // The given byteOffset must be a multiple of the element size of the
+      // specific type, otherwise an exception is raised.
+      if (offset % element_size != 0) {
+        return ThrowException(
+            String::New("offset must be multiple of element_size"));
+      }
+    }
+
+    if (offset > array_buffer_length) {
+      return ThrowException(
+          String::New("byteOffset must be less than ArrayBuffer length."));
+    }
+
+    if (args.Length() == 2) {
+      // If length is not explicitly specified, the length of the ArrayBuffer
+      // minus the byteOffset must be a multiple of the element size of the
+      // specific type, or an exception is raised.
+      length = array_buffer_length - offset;
+    }
+
+    if (args.Length() != 3) {
+      if (length % element_size != 0) {
+        return ThrowException(
+            String::New("ArrayBuffer length minus the byteOffset must be a "
+                        "multiple of the element size"));
+      }
+      length /= element_size;
+    }
+
+    // If a given byteOffset and length references an area beyond the end of
+    // the ArrayBuffer an exception is raised.
+    if (offset + (length * element_size) > array_buffer_length) {
+      return ThrowException(
+          String::New("length references an area beyond the end of the "
+                      "ArrayBuffer"));
+    }
+
+    // Hold a reference to the ArrayBuffer so its buffer doesn't get collected.
+    array->Set(String::New(kArrayBufferReferencePropName), args[0], ReadOnly);
+  }
+
+  if (is_array_buffer_construct) {
+    array->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly);
+  }
+
   Persistent<Object> persistent_array = Persistent<Object>::New(array);
   persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
   persistent_array.MarkIndependent();
-  array->SetIndexedPropertiesToExternalArrayData(data, type,
-                                                 static_cast<int>(length));
+  if (data == NULL && length != 0) {
+    data = calloc(length, element_size);
+    if (data == NULL) {
+      return ThrowException(String::New("Memory allocation failed."));
+    }
+  }
+
+  array->SetIndexedPropertiesToExternalArrayData(
+      reinterpret_cast<uint8_t*>(data) + offset, type,
+      static_cast<int>(length));
   array->Set(String::New("length"),
              Int32::New(static_cast<int32_t>(length)), ReadOnly);
   array->Set(String::New("BYTES_PER_ELEMENT"),
@@ -334,11 +447,22 @@
 
 
 void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
-  free(data);
+  HandleScope scope;
+  Handle<String> prop_name = String::New(kArrayBufferReferencePropName);
+  Handle<Object> converted_object = object->ToObject();
+  Local<Value> prop_value = converted_object->Get(prop_name);
+  if (data != NULL && !prop_value->IsObject()) {
+    free(data);
+  }
   object.Dispose();
 }
 
 
+Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
+  return CreateExternalArray(args, v8::kExternalByteArray, 0);
+}
+
+
 Handle<Value> Shell::Int8Array(const Arguments& args) {
   return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
 }
@@ -410,6 +534,10 @@
 
 void Shell::ReportException(v8::TryCatch* try_catch) {
   HandleScope handle_scope;
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+  bool enter_context = !Context::InContext();
+  if (enter_context) utility_context_->Enter();
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
   v8::String::Utf8Value exception(try_catch->Exception());
   const char* exception_string = ToCString(exception);
   Handle<Message> message = try_catch->Message();
@@ -444,6 +572,9 @@
     }
   }
   printf("\n");
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+  if (enter_context) utility_context_->Exit();
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
 }
 
 
@@ -481,6 +612,12 @@
   Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
   return val;
 }
+
+
+void Shell::DispatchDebugMessages() {
+  v8::Context::Scope scope(Shell::evaluation_context_);
+  v8::Debug::ProcessDebugMessages();
+}
 #endif  // ENABLE_DEBUGGER_SUPPORT
 #endif  // V8_SHARED
 
@@ -594,6 +731,7 @@
   Context::Scope utility_scope(utility_context_);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
+  if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
   // Install the debugger object in the utility scope
   i::Debug* debug = i::Isolate::Current()->debug();
   debug->Load();
@@ -668,6 +806,8 @@
   global_template->Set(String::New("print"), FunctionTemplate::New(Print));
   global_template->Set(String::New("write"), FunctionTemplate::New(Write));
   global_template->Set(String::New("read"), FunctionTemplate::New(Read));
+  global_template->Set(String::New("readbinary"),
+                       FunctionTemplate::New(ReadBinary));
   global_template->Set(String::New("readline"),
                        FunctionTemplate::New(ReadLine));
   global_template->Set(String::New("load"), FunctionTemplate::New(Load));
@@ -679,6 +819,8 @@
                        FunctionTemplate::New(DisableProfiler));
 
   // Bind the handlers for external arrays.
+  global_template->Set(String::New("ArrayBuffer"),
+                       FunctionTemplate::New(ArrayBuffer));
   global_template->Set(String::New("Int8Array"),
                        FunctionTemplate::New(Int8Array));
   global_template->Set(String::New("Uint8Array"),
@@ -747,6 +889,7 @@
   // Start the debugger agent if requested.
   if (i::FLAG_debugger_agent) {
     v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
+    v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true);
   }
 #endif  // ENABLE_DEBUGGER_SUPPORT
 #endif  // V8_SHARED
@@ -760,13 +903,8 @@
 #endif  // V8_SHARED
   // Initialize the global objects
   Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
-
-  v8::TryCatch try_catch;
   Persistent<Context> context = Context::New(NULL, global_template);
-  if (context.IsEmpty()) {
-    v8::Local<v8::Value> st = try_catch.StackTrace();
-    ASSERT(!context.IsEmpty());
-  }
+  ASSERT(!context.IsEmpty());
   Context::Scope scope(context);
 
 #ifndef V8_SHARED
@@ -797,22 +935,47 @@
 
 
 #ifndef V8_SHARED
+struct CounterAndKey {
+  Counter* counter;
+  const char* key;
+};
+
+
+int CompareKeys(const void* a, const void* b) {
+  return strcmp(static_cast<const CounterAndKey*>(a)->key,
+                static_cast<const CounterAndKey*>(b)->key);
+}
+
+
 void Shell::OnExit() {
   if (console != NULL) console->Close();
   if (i::FLAG_dump_counters) {
-    printf("+----------------------------------------+-------------+\n");
-    printf("| Name                                   | Value       |\n");
-    printf("+----------------------------------------+-------------+\n");
+    int number_of_counters = 0;
     for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
-      Counter* counter = i.CurrentValue();
+      number_of_counters++;
+    }
+    CounterAndKey* counters = new CounterAndKey[number_of_counters];
+    int j = 0;
+    for (CounterMap::Iterator i(counter_map_); i.More(); i.Next(), j++) {
+      counters[j].counter = i.CurrentValue();
+      counters[j].key = i.CurrentKey();
+    }
+    qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys);
+    printf("+--------------------------------------------+-------------+\n");
+    printf("| Name                                       | Value       |\n");
+    printf("+--------------------------------------------+-------------+\n");
+    for (j = 0; j < number_of_counters; j++) {
+      Counter* counter = counters[j].counter;
+      const char* key = counters[j].key;
       if (counter->is_histogram()) {
-        printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
-        printf("| t:%-36s | %11i |\n", i.CurrentKey(), counter->sample_total());
+        printf("| c:%-40s | %11i |\n", key, counter->count());
+        printf("| t:%-40s | %11i |\n", key, counter->sample_total());
       } else {
-        printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
+        printf("| %-42s | %11i |\n", key, counter->count());
       }
     }
-    printf("+----------------------------------------+-------------+\n");
+    printf("+--------------------------------------------+-------------+\n");
+    delete [] counters;
   }
   if (counters_file_ != NULL)
     delete counters_file_;
@@ -821,7 +984,7 @@
 
 
 static FILE* FOpen(const char* path, const char* mode) {
-#if (defined(_WIN32) || defined(_WIN64))
+#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
   FILE* result;
   if (fopen_s(&result, path, mode) == 0) {
     return result;
@@ -863,6 +1026,23 @@
 }
 
 
+Handle<Value> Shell::ReadBinary(const Arguments& args) {
+  String::Utf8Value filename(args[0]);
+  int size;
+  if (*filename == NULL) {
+    return ThrowException(String::New("Error loading file"));
+  }
+  char* chars = ReadChars(*filename, &size);
+  if (chars == NULL) {
+    return ThrowException(String::New("Error reading file"));
+  }
+  // We skip checking the string for UTF8 characters and use it raw as
+  // backing store for the external string with 8-bit characters.
+  BinaryResource* resource = new BinaryResource(chars, size);
+  return String::NewExternal(resource);
+}
+
+
 #ifndef V8_SHARED
 static char* ReadToken(char* data, char token) {
   char* next = i::OS::StrChr(data, token);
@@ -902,31 +1082,15 @@
   Context::Scope context_scope(evaluation_context_);
   HandleScope outer_scope;
   Handle<String> name = String::New("(d8)");
-#ifndef V8_SHARED
   console = LineEditor::Get();
   printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
-  if (i::FLAG_debugger) {
-    printf("JavaScript debugger enabled\n");
-  }
   console->Open();
   while (true) {
-    i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
-    if (input.is_empty()) break;
-    console->AddHistory(*input);
     HandleScope inner_scope;
-    ExecuteString(String::New(*input), name, true, true);
+    Handle<String> input = console->Prompt(Shell::kPrompt);
+    if (input.IsEmpty()) break;
+    ExecuteString(input, name, true, true);
   }
-#else
-  printf("V8 version %s [D8 light using shared library]\n", V8::GetVersion());
-  static const int kBufferSize = 256;
-  while (true) {
-    char buffer[kBufferSize];
-    printf("%s", Shell::kPrompt);
-    if (fgets(buffer, kBufferSize, stdin) == NULL) break;
-    HandleScope inner_scope;
-    ExecuteString(String::New(buffer), name, true, true);
-  }
-#endif  // V8_SHARED
   printf("\n");
 }
 
@@ -1049,14 +1213,11 @@
 
 #ifndef V8_SHARED
 i::Thread::Options SourceGroup::GetThreadOptions() {
-  i::Thread::Options options;
-  options.name = "IsolateThread";
   // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
   // which is not enough to parse the big literal expressions used in tests.
   // The stack size should be at least StackGuard::kLimitSize + some
-  // OS-specific padding for thread startup code.
-  options.stack_size = 2 << 20;  // 2 Mb seems to be enough
-  return options;
+  // OS-specific padding for thread startup code.  2Mbytes seems to be enough.
+  return i::Thread::Options("IsolateThread", 2 * MB);
 }
 
 
@@ -1127,7 +1288,7 @@
       options.use_preemption = true;
       argv[i] = NULL;
 #endif  // V8_SHARED
-    } else if (strcmp(argv[i], "--no-preemption") == 0) {
+    } else if (strcmp(argv[i], "--nopreemption") == 0) {
 #ifdef V8_SHARED
       printf("D8 with shared library does not support multi-threading\n");
       return false;
@@ -1258,15 +1419,30 @@
     Locker lock;
     HandleScope scope;
     Persistent<Context> context = CreateEvaluationContext();
+    if (options.last_run) {
+      // Keep using the same context in the interactive shell.
+      evaluation_context_ = context;
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+      // If the interactive debugger is enabled make sure to activate
+      // it before running the files passed on the command line.
+      if (i::FLAG_debugger) {
+        InstallUtilityScript();
+      }
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+    }
     {
       Context::Scope cscope(context);
       options.isolate_sources[0].Execute();
     }
-    if (options.last_run) {
-      // Keep using the same context in the interactive shell
-      evaluation_context_ = context;
-    } else {
+    if (!options.last_run) {
       context.Dispose();
+#if !defined(V8_SHARED)
+      if (i::FLAG_send_idle_notification) {
+        const int kLongIdlePauseInMs = 1000;
+        V8::ContextDisposedNotification();
+        V8::IdleNotification(kLongIdlePauseInMs);
+      }
+#endif  // !V8_SHARED
     }
 
 #ifndef V8_SHARED
@@ -1316,6 +1492,15 @@
     }
     printf("======== Full Deoptimization =======\n");
     Testing::DeoptimizeAll();
+#if !defined(V8_SHARED)
+  } else if (i::FLAG_stress_runs > 0) {
+    int stress_runs = i::FLAG_stress_runs;
+    for (int i = 0; i < stress_runs && result == 0; i++) {
+      printf("============ Run %d/%d ============\n", i + 1, stress_runs);
+      options.last_run = (i == stress_runs - 1);
+      result = RunMain(argc, argv);
+    }
+#endif
   } else {
     result = RunMain(argc, argv);
   }
@@ -1336,9 +1521,11 @@
   if (( options.interactive_shell
       || !options.script_executed )
       && !options.test_shell ) {
-#ifndef V8_SHARED
-    InstallUtilityScript();
-#endif  // V8_SHARED
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+    if (!i::FLAG_debugger) {
+      InstallUtilityScript();
+    }
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
     RunShell();
   }
 
diff --git a/src/d8.gyp b/src/d8.gyp
index 70186cf..a8361e6 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -1,4 +1,4 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -41,9 +41,6 @@
       'include_dirs+': [
         '../src',
       ],
-      'defines': [
-        'ENABLE_DEBUGGER_SUPPORT',
-      ],
       'sources': [
         'd8.cc',
       ],
@@ -64,8 +61,8 @@
               'libraries': [ '-lreadline', ],
               'sources': [ 'd8-readline.cc' ],
             }],
-            [ '(OS=="linux" or OS=="mac" or OS=="freebsd" \
-              or OS=="openbsd" or OS=="solaris")', {
+            ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
+               or OS=="openbsd" or OS=="solaris" or OS=="android")', {
               'sources': [ 'd8-posix.cc', ]
             }],
             [ 'OS=="win"', {
diff --git a/src/d8.h b/src/d8.h
index 15d8d5d..c872f90 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -116,14 +116,13 @@
 #endif  // V8_SHARED
 
 
-#ifndef V8_SHARED
 class LineEditor {
  public:
   enum Type { DUMB = 0, READLINE = 1 };
   LineEditor(Type type, const char* name);
   virtual ~LineEditor() { }
 
-  virtual i::SmartArrayPointer<char> Prompt(const char* prompt) = 0;
+  virtual Handle<String> Prompt(const char* prompt) = 0;
   virtual bool Open() { return true; }
   virtual bool Close() { return true; }
   virtual void AddHistory(const char* str) { }
@@ -136,7 +135,6 @@
   LineEditor* next_;
   static LineEditor* first_;
 };
-#endif  // V8_SHARED
 
 
 class SourceGroup {
@@ -197,6 +195,27 @@
 };
 
 
+class BinaryResource : public v8::String::ExternalAsciiStringResource {
+ public:
+  BinaryResource(const char* string, int length)
+      : data_(string),
+        length_(length) { }
+
+  ~BinaryResource() {
+    delete[] data_;
+    data_ = NULL;
+    length_ = 0;
+  }
+
+  virtual const char* data() const { return data_; }
+  virtual size_t length() const { return length_; }
+
+ private:
+  const char* data_;
+  size_t length_;
+};
+
+
 class ShellOptions {
  public:
   ShellOptions() :
@@ -268,12 +287,13 @@
                                size_t buckets);
   static void AddHistogramSample(void* histogram, int sample);
   static void MapCounters(const char* name);
-#endif  // V8_SHARED
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   static Handle<Object> DebugMessageDetails(Handle<String> message);
   static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
-#endif
+  static void DispatchDebugMessages();
+#endif  // ENABLE_DEBUGGER_SUPPORT
+#endif  // V8_SHARED
 
 #ifdef WIN32
 #undef Yield
@@ -287,8 +307,13 @@
   static Handle<Value> EnableProfiler(const Arguments& args);
   static Handle<Value> DisableProfiler(const Arguments& args);
   static Handle<Value> Read(const Arguments& args);
-  static Handle<Value> ReadLine(const Arguments& args);
+  static Handle<Value> ReadBinary(const Arguments& args);
+  static Handle<String> ReadFromStdin();
+  static Handle<Value> ReadLine(const Arguments& args) {
+    return ReadFromStdin();
+  }
   static Handle<Value> Load(const Arguments& args);
+  static Handle<Value> ArrayBuffer(const Arguments& args);
   static Handle<Value> Int8Array(const Arguments& args);
   static Handle<Value> Uint8Array(const Arguments& args);
   static Handle<Value> Int16Array(const Arguments& args);
@@ -334,11 +359,8 @@
   static Handle<Value> RemoveDirectory(const Arguments& args);
 
   static void AddOSMethods(Handle<ObjectTemplate> os_template);
-#ifndef V8_SHARED
-  static const char* kHistoryFileName;
-  static const int kMaxHistoryEntries;
+
   static LineEditor* console;
-#endif  // V8_SHARED
   static const char* kPrompt;
   static ShellOptions options;
 
diff --git a/src/d8.js b/src/d8.js
index 3009037..bf26923 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -25,11 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+"use strict";
+
 String.prototype.startsWith = function (str) {
-  if (str.length > this.length)
+  if (str.length > this.length) {
     return false;
+  }
   return this.substr(0, str.length) == str;
-}
+};
 
 function log10(num) {
   return Math.log(num)/Math.log(10);
@@ -52,8 +55,9 @@
   for (var i = 0; i < parts.length; i++) {
     var part = parts[i];
     var next = current[part];
-    if (!next)
+    if (!next) {
       return [];
+    }
     current = next;
   }
   var result = [];
@@ -63,8 +67,9 @@
     var properties = mirror.properties();
     for (var i = 0; i < properties.length; i++) {
       var name = properties[i].name();
-      if (typeof name === 'string' && name.startsWith(last))
+      if (typeof name === 'string' && name.startsWith(last)) {
         result.push(name);
+      }
     }
     current = ToInspectableObject(current.__proto__);
   }
@@ -73,7 +78,7 @@
 
 
 // Global object holding debugger related constants and state.
-const Debug = {};
+var Debug = {};
 
 
 // Debug events which can occour in the V8 JavaScript engine. These originate
@@ -108,22 +113,24 @@
 
 
 // Current debug state.
-const kNoFrame = -1;
+var kNoFrame = -1;
 Debug.State = {
   currentFrame: kNoFrame,
   displaySourceStartLine: -1,
   displaySourceEndLine: -1,
   currentSourceLine: -1
-}
+};
 var trace_compile = false;  // Tracing all compile events?
 var trace_debug_json = false; // Tracing all debug json packets?
-var last_cmd_line = '';
+var last_cmd = '';
 //var lol_is_enabled;  // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
 var lol_next_dump_index = 0;
-const kDefaultLolLinesToPrintAtATime = 10;
-const kMaxLolLinesToPrintAtATime = 1000;
+var kDefaultLolLinesToPrintAtATime = 10;
+var kMaxLolLinesToPrintAtATime = 1000;
 var repeat_cmd_line = '';
 var is_running = true;
+// Global variable used to store whether a handle was requested.
+var lookup_handle = null;
 
 // Copied from debug-delay.js.  This is needed below:
 function ScriptTypeFlag(type) {
@@ -150,7 +157,7 @@
 }
 
 function DebugEventDetails(response) {
-  details = {text:'', running:false}
+  var details = {text:'', running:false};
 
   // Get the running state.
   details.running = response.running();
@@ -217,7 +224,7 @@
 
     case 'afterCompile':
       if (trace_compile) {
-        result = 'Source ' + body.script.name + ' compiled:\n'
+        result = 'Source ' + body.script.name + ' compiled:\n';
         var source = body.script.source;
         if (!(source[source.length - 1] == '\n')) {
           result += source;
@@ -237,7 +244,7 @@
   }
 
   return details;
-};
+}
 
 
 function SourceInfo(body) {
@@ -279,7 +286,7 @@
 
   // Return the source line text with the underline beneath.
   return source_text + '\n' + underline;
-};
+}
 
 
 // Converts a text command to a JSON request.
@@ -289,7 +296,7 @@
     print("sending: '" + result + "'");
   }
   return result;
-};
+}
 
 
 function DebugRequest(cmd_line) {
@@ -514,7 +521,7 @@
 
 DebugRequest.prototype.JSONRequest = function() {
   return this.request_;
-}
+};
 
 
 function RequestPacket(command) {
@@ -536,14 +543,14 @@
     json += ',"arguments":';
     // Encode the arguments part.
     if (this.arguments.toJSONProtocol) {
-      json += this.arguments.toJSONProtocol()
+      json += this.arguments.toJSONProtocol();
     } else {
       json += SimpleObjectToJSON_(this.arguments);
     }
   }
   json += '}';
   return json;
-}
+};
 
 
 DebugRequest.prototype.createRequest = function(command) {
@@ -583,7 +590,6 @@
 
 // Create a JSON request for the evaluation command.
 DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
-  // Global varaible used to store whether a handle was requested.
   lookup_handle = null;
 
   if (lol_is_enabled) {
@@ -1310,7 +1316,7 @@
   }
 
   return request;
-}
+};
 
 
 function extractObjId(args) {
@@ -1499,7 +1505,7 @@
   } else {
     throw new Error('Invalid trace arguments.');
   }
-}
+};
 
 // Handle the help command.
 DebugRequest.prototype.helpCommand_ = function(args) {
@@ -1608,7 +1614,7 @@
   print('');
   print('disconnect|exit|quit       - disconnects and quits the debugger');
   print('help                       - prints this help information');
-}
+};
 
 
 function formatHandleReference_(value) {
@@ -1623,7 +1629,7 @@
 function formatObject_(value, include_properties) {
   var result = '';
   result += formatHandleReference_(value);
-  result += ', type: object'
+  result += ', type: object';
   result += ', constructor ';
   var ctor = value.constructorFunctionValue();
   result += formatHandleReference_(ctor);
@@ -1943,7 +1949,7 @@
 
 // Convert a JSON response to text for display in a text based debugger.
 function DebugResponseDetails(response) {
-  details = {text:'', running:false}
+  var details = { text: '', running: false };
 
   try {
     if (!response.success()) {
@@ -2308,7 +2314,7 @@
   }
 
   return details;
-};
+}
 
 
 /**
@@ -2334,7 +2340,7 @@
  */
 ProtocolPackage.prototype.type = function() {
   return this.packet_.type;
-}
+};
 
 
 /**
@@ -2343,7 +2349,7 @@
  */
 ProtocolPackage.prototype.event = function() {
   return this.packet_.event;
-}
+};
 
 
 /**
@@ -2352,7 +2358,7 @@
  */
 ProtocolPackage.prototype.requestSeq = function() {
   return this.packet_.request_seq;
-}
+};
 
 
 /**
@@ -2361,27 +2367,27 @@
  */
 ProtocolPackage.prototype.running = function() {
   return this.packet_.running ? true : false;
-}
+};
 
 
 ProtocolPackage.prototype.success = function() {
   return this.packet_.success ? true : false;
-}
+};
 
 
 ProtocolPackage.prototype.message = function() {
   return this.packet_.message;
-}
+};
 
 
 ProtocolPackage.prototype.command = function() {
   return this.packet_.command;
-}
+};
 
 
 ProtocolPackage.prototype.body = function() {
   return this.packet_.body;
-}
+};
 
 
 ProtocolPackage.prototype.bodyValue = function(index) {
@@ -2390,12 +2396,12 @@
   } else {
     return new ProtocolValue(this.packet_.body, this);
   }
-}
+};
 
 
 ProtocolPackage.prototype.body = function() {
   return this.packet_.body;
-}
+};
 
 
 ProtocolPackage.prototype.lookup = function(handle) {
@@ -2405,12 +2411,12 @@
   } else {
     return new ProtocolReference(handle);
   }
-}
+};
 
 
 ProtocolPackage.prototype.raw_json = function() {
   return this.raw_json_;
-}
+};
 
 
 function ProtocolValue(value, packet) {
@@ -2425,7 +2431,7 @@
  */
 ProtocolValue.prototype.type = function() {
   return this.value_.type;
-}
+};
 
 
 /**
@@ -2434,7 +2440,7 @@
  */
 ProtocolValue.prototype.field = function(name) {
   return this.value_[name];
-}
+};
 
 
 /**
@@ -2444,7 +2450,7 @@
 ProtocolValue.prototype.isPrimitive = function() {
   return this.isUndefined() || this.isNull() || this.isBoolean() ||
          this.isNumber() || this.isString();
-}
+};
 
 
 /**
@@ -2453,7 +2459,7 @@
  */
 ProtocolValue.prototype.handle = function() {
   return this.value_.handle;
-}
+};
 
 
 /**
@@ -2462,7 +2468,7 @@
  */
 ProtocolValue.prototype.isUndefined = function() {
   return this.value_.type == 'undefined';
-}
+};
 
 
 /**
@@ -2471,7 +2477,7 @@
  */
 ProtocolValue.prototype.isNull = function() {
   return this.value_.type == 'null';
-}
+};
 
 
 /**
@@ -2480,7 +2486,7 @@
  */
 ProtocolValue.prototype.isBoolean = function() {
   return this.value_.type == 'boolean';
-}
+};
 
 
 /**
@@ -2489,7 +2495,7 @@
  */
 ProtocolValue.prototype.isNumber = function() {
   return this.value_.type == 'number';
-}
+};
 
 
 /**
@@ -2498,7 +2504,7 @@
  */
 ProtocolValue.prototype.isString = function() {
   return this.value_.type == 'string';
-}
+};
 
 
 /**
@@ -2508,7 +2514,7 @@
 ProtocolValue.prototype.isObject = function() {
   return this.value_.type == 'object' || this.value_.type == 'function' ||
          this.value_.type == 'error' || this.value_.type == 'regexp';
-}
+};
 
 
 /**
@@ -2518,7 +2524,7 @@
 ProtocolValue.prototype.constructorFunctionValue = function() {
   var ctor = this.value_.constructorFunction;
   return this.packet_.lookup(ctor.ref);
-}
+};
 
 
 /**
@@ -2528,7 +2534,7 @@
 ProtocolValue.prototype.protoObjectValue = function() {
   var proto = this.value_.protoObject;
   return this.packet_.lookup(proto.ref);
-}
+};
 
 
 /**
@@ -2537,7 +2543,7 @@
  */
 ProtocolValue.prototype.propertyCount = function() {
   return this.value_.properties ? this.value_.properties.length : 0;
-}
+};
 
 
 /**
@@ -2547,7 +2553,7 @@
 ProtocolValue.prototype.propertyName = function(index) {
   var property = this.value_.properties[index];
   return property.name;
-}
+};
 
 
 /**
@@ -2562,7 +2568,7 @@
     }
   }
   return null;
-}
+};
 
 
 /**
@@ -2572,7 +2578,7 @@
 ProtocolValue.prototype.propertyValue = function(index) {
   var property = this.value_.properties[index];
   return this.packet_.lookup(property.ref);
-}
+};
 
 
 /**
@@ -2581,12 +2587,12 @@
  */
 ProtocolValue.prototype.value = function() {
   return this.value_.value;
-}
+};
 
 
 ProtocolValue.prototype.valueString = function() {
   return this.value_.text;
-}
+};
 
 
 function ProtocolReference(handle) {
@@ -2596,7 +2602,7 @@
 
 ProtocolReference.prototype.handle = function() {
   return this.handle_;
-}
+};
 
 
 function MakeJSONPair_(name, value) {
@@ -2626,7 +2632,7 @@
 
 // Mapping of some control characters to avoid the \uXXXX syntax for most
 // commonly used control cahracters.
-const ctrlCharMap_ = {
+var ctrlCharMap_ = {
   '\b': '\\b',
   '\t': '\\t',
   '\n': '\\n',
@@ -2638,12 +2644,12 @@
 
 
 // Regular expression testing for ", \ and control characters (0x00 - 0x1F).
-const ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
+var ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
 
 
 // Regular expression matching ", \ and control characters (0x00 - 0x1F)
 // globally.
-const ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
+var ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
 
 
 /**
@@ -2667,7 +2673,7 @@
         // Convert control character to unicode escape sequence.
         return '\\u00' +
           '0' + // TODO %NumberToRadixString(Math.floor(mapped / 16), 16) +
-          '0' // TODO %NumberToRadixString(mapped % 16, 16);
+          '0'; // TODO %NumberToRadixString(mapped % 16, 16)
       })
     + '"';
   }
@@ -2685,12 +2691,12 @@
  * @return {string} JSON formatted Date value
  */
 function DateToISO8601_(value) {
-  function f(n) {
+  var f = function(n) {
     return n < 10 ? '0' + n : n;
-  }
-  function g(n) {
+  };
+  var g = function(n) {
     return n < 10 ? '00' + n : n < 100 ? '0' + n : n;
-  }
+  };
   return builtins.GetUTCFullYearFrom(value)         + '-' +
           f(builtins.GetUTCMonthFrom(value) + 1)    + '-' +
           f(builtins.GetUTCDateFrom(value))         + 'T' +
@@ -2738,7 +2744,7 @@
           if (property_value === null) {
             property_value_json = 'null';
           } else if (typeof property_value.toJSONProtocol == 'function') {
-            property_value_json = property_value.toJSONProtocol(true)
+            property_value_json = property_value.toJSONProtocol(true);
           } else if (property_value.constructor.name == 'Array'){
             property_value_json = SimpleArrayToJSON_(property_value);
           } else {
@@ -2789,7 +2795,7 @@
     }
     var elem = array[i];
     if (elem.toJSONProtocol) {
-      json += elem.toJSONProtocol(true)
+      json += elem.toJSONProtocol(true);
     } else if (typeof(elem) === 'object')  {
       json += SimpleObjectToJSON_(elem);
     } else if (typeof(elem) === 'boolean')  {
diff --git a/src/data-flow.h b/src/data-flow.h
index d69d6c7..71f56e7 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -85,18 +85,18 @@
     friend class BitVector;
   };
 
-  explicit BitVector(int length)
+  BitVector(int length, Zone* zone)
       : length_(length),
         data_length_(SizeFor(length)),
-        data_(ZONE->NewArray<uint32_t>(data_length_)) {
+        data_(zone->NewArray<uint32_t>(data_length_)) {
     ASSERT(length > 0);
     Clear();
   }
 
-  BitVector(const BitVector& other)
+  BitVector(const BitVector& other, Zone* zone)
       : length_(other.length()),
         data_length_(SizeFor(length_)),
-        data_(ZONE->NewArray<uint32_t>(data_length_)) {
+        data_(zone->NewArray<uint32_t>(data_length_)) {
     CopyFrom(other);
   }
 
diff --git a/src/date.cc b/src/date.cc
new file mode 100644
index 0000000..a377451
--- /dev/null
+++ b/src/date.cc
@@ -0,0 +1,384 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "date.h"
+
+#include "v8.h"
+
+#include "objects.h"
+#include "objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
+static const int kDaysIn4Years = 4 * 365 + 1;
+static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
+static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
+static const int kDays1970to2000 = 30 * 365 + 7;
+static const int kDaysOffset = 1000 * kDaysIn400Years + 5 * kDaysIn400Years -
+                               kDays1970to2000;
+static const int kYearsOffset = 400000;
+static const char kDaysInMonths[] =
+    {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+
+
+void DateCache::ResetDateCache() {
+  static const int kMaxStamp = Smi::kMaxValue;
+  stamp_ = Smi::FromInt(stamp_->value() + 1);
+  if (stamp_->value() > kMaxStamp) {
+    stamp_ = Smi::FromInt(0);
+  }
+  ASSERT(stamp_ != Smi::FromInt(kInvalidStamp));
+  for (int i = 0; i < kDSTSize; ++i) {
+    ClearSegment(&dst_[i]);
+  }
+  dst_usage_counter_ = 0;
+  before_ = &dst_[0];
+  after_ = &dst_[1];
+  local_offset_ms_ = kInvalidLocalOffsetInMs;
+  ymd_valid_ = false;
+}
+
+
+void DateCache::ClearSegment(DST* segment) {
+  segment->start_sec = kMaxEpochTimeInSec;
+  segment->end_sec = -kMaxEpochTimeInSec;
+  segment->offset_ms = 0;
+  segment->last_used = 0;
+}
+
+
+void DateCache::YearMonthDayFromDays(
+    int days, int* year, int* month, int* day) {
+  if (ymd_valid_) {
+    // Check conservatively if the given 'days' has
+    // the same year and month as the cached 'days'.
+    int new_day = ymd_day_ + (days - ymd_days_);
+    if (new_day >= 1 && new_day <= 28) {
+      ymd_day_ = new_day;
+      ymd_days_ = days;
+      *year = ymd_year_;
+      *month = ymd_month_;
+      *day = new_day;
+      return;
+    }
+  }
+  int save_days = days;
+
+  days += kDaysOffset;
+  *year = 400 * (days / kDaysIn400Years) - kYearsOffset;
+  days %= kDaysIn400Years;
+
+  ASSERT(DaysFromYearMonth(*year, 0) + days == save_days);
+
+  days--;
+  int yd1 = days / kDaysIn100Years;
+  days %= kDaysIn100Years;
+  *year += 100 * yd1;
+
+  days++;
+  int yd2 = days / kDaysIn4Years;
+  days %= kDaysIn4Years;
+  *year += 4 * yd2;
+
+  days--;
+  int yd3 = days / 365;
+  days %= 365;
+  *year += yd3;
+
+
+  bool is_leap = (!yd1 || yd2) && !yd3;
+
+  ASSERT(days >= -1);
+  ASSERT(is_leap || (days >= 0));
+  ASSERT((days < 365) || (is_leap && (days < 366)));
+  ASSERT(is_leap == ((*year % 4 == 0) && (*year % 100 || (*year % 400 == 0))));
+  ASSERT(is_leap || ((DaysFromYearMonth(*year, 0) + days) == save_days));
+  ASSERT(!is_leap || ((DaysFromYearMonth(*year, 0) + days + 1) == save_days));
+
+  days += is_leap;
+
+  // Check if the date is after February.
+  if (days >= 31 + 28 + is_leap) {
+    days -= 31 + 28 + is_leap;
+    // Find the date starting from March.
+    for (int i = 2; i < 12; i++) {
+      if (days < kDaysInMonths[i]) {
+        *month = i;
+        *day = days + 1;
+        break;
+      }
+      days -= kDaysInMonths[i];
+    }
+  } else {
+    // Check January and February.
+    if (days < 31) {
+      *month = 0;
+      *day = days + 1;
+    } else {
+      *month = 1;
+      *day = days - 31 + 1;
+    }
+  }
+  ASSERT(DaysFromYearMonth(*year, *month) + *day - 1 == save_days);
+  ymd_valid_ = true;
+  ymd_year_ = *year;
+  ymd_month_ = *month;
+  ymd_day_ = *day;
+  ymd_days_ = save_days;
+}
+
+
+int DateCache::DaysFromYearMonth(int year, int month) {
+  static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
+                                       181, 212, 243, 273, 304, 334};
+  static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
+                                            182, 213, 244, 274, 305, 335};
+
+  year += month / 12;
+  month %= 12;
+  if (month < 0) {
+    year--;
+    month += 12;
+  }
+
+  ASSERT(month >= 0);
+  ASSERT(month < 12);
+
+  // year_delta is an arbitrary number such that:
+  // a) year_delta = -1 (mod 400)
+  // b) year + year_delta > 0 for years in the range defined by
+  //    ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+  //    Jan 1 1970. This is required so that we don't run into integer
+  //    division of negative numbers.
+  // c) there shouldn't be an overflow for 32-bit integers in the following
+  //    operations.
+  static const int year_delta = 399999;
+  static const int base_day = 365 * (1970 + year_delta) +
+                              (1970 + year_delta) / 4 -
+                              (1970 + year_delta) / 100 +
+                              (1970 + year_delta) / 400;
+
+  int year1 = year + year_delta;
+  int day_from_year = 365 * year1 +
+                      year1 / 4 -
+                      year1 / 100 +
+                      year1 / 400 -
+                      base_day;
+
+  if ((year % 4 != 0) || (year % 100 == 0 && year % 400 != 0)) {
+    return day_from_year + day_from_month[month];
+  }
+  return day_from_year + day_from_month_leap[month];
+}
+
+
+void DateCache::ExtendTheAfterSegment(int time_sec, int offset_ms) {
+  if (after_->offset_ms == offset_ms &&
+      after_->start_sec <= time_sec + kDefaultDSTDeltaInSec &&
+      time_sec <= after_->end_sec) {
+    // Extend the after_ segment.
+    after_->start_sec = time_sec;
+  } else {
+    // The after_ segment is either invalid or starts too late.
+    if (after_->start_sec <= after_->end_sec) {
+      // If the after_ segment is valid, replace it with a new segment.
+      after_ = LeastRecentlyUsedDST(before_);
+    }
+    after_->start_sec = time_sec;
+    after_->end_sec = time_sec;
+    after_->offset_ms = offset_ms;
+    after_->last_used = ++dst_usage_counter_;
+  }
+}
+
+
+int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
+  int time_sec = (time_ms >= 0 && time_ms <= kMaxEpochTimeInMs)
+      ? static_cast<int>(time_ms / 1000)
+      : static_cast<int>(EquivalentTime(time_ms) / 1000);
+
+  // Invalidate cache if the usage counter is close to overflow.
+  // Note that dst_usage_counter is incremented less than ten times
+  // in this function.
+  if (dst_usage_counter_ >= kMaxInt - 10) {
+    dst_usage_counter_ = 0;
+    for (int i = 0; i < kDSTSize; ++i) {
+      ClearSegment(&dst_[i]);
+    }
+  }
+
+  // Optimistic fast check.
+  if (before_->start_sec <= time_sec &&
+      time_sec <= before_->end_sec) {
+    // Cache hit.
+    before_->last_used = ++dst_usage_counter_;
+    return before_->offset_ms;
+  }
+
+  ProbeDST(time_sec);
+
+  ASSERT(InvalidSegment(before_) || before_->start_sec <= time_sec);
+  ASSERT(InvalidSegment(after_) || time_sec < after_->start_sec);
+
+  if (InvalidSegment(before_)) {
+    // Cache miss.
+    before_->start_sec = time_sec;
+    before_->end_sec = time_sec;
+    before_->offset_ms = GetDaylightSavingsOffsetFromOS(time_sec);
+    before_->last_used = ++dst_usage_counter_;
+    return before_->offset_ms;
+  }
+
+  if (time_sec <= before_->end_sec) {
+    // Cache hit.
+    before_->last_used = ++dst_usage_counter_;
+    return before_->offset_ms;
+  }
+
+  if (time_sec > before_->end_sec + kDefaultDSTDeltaInSec) {
+    // If the before_ segment ends too early, then just
+    // query for the offset of the time_sec
+    int offset_ms = GetDaylightSavingsOffsetFromOS(time_sec);
+    ExtendTheAfterSegment(time_sec, offset_ms);
+    // This swap helps the optimistic fast check in subsequent invocations.
+    DST* temp = before_;
+    before_ = after_;
+    after_ = temp;
+    return offset_ms;
+  }
+
+  // Now the time_sec is between
+  // before_->end_sec and before_->end_sec + default DST delta.
+  // Update the usage counter of before_ since it is going to be used.
+  before_->last_used = ++dst_usage_counter_;
+
+  // Check if after_ segment is invalid or starts too late.
+  // Note that start_sec of invalid segments is kMaxEpochTimeInSec.
+  if (before_->end_sec + kDefaultDSTDeltaInSec <= after_->start_sec) {
+    int new_after_start_sec = before_->end_sec + kDefaultDSTDeltaInSec;
+    int new_offset_ms = GetDaylightSavingsOffsetFromOS(new_after_start_sec);
+    ExtendTheAfterSegment(new_after_start_sec, new_offset_ms);
+  } else {
+    ASSERT(!InvalidSegment(after_));
+    // Update the usage counter of after_ since it is going to be used.
+    after_->last_used = ++dst_usage_counter_;
+  }
+
+  // Now the time_sec is between before_->end_sec and after_->start_sec.
+  // Only one daylight savings offset change can occur in this interval.
+
+  if (before_->offset_ms == after_->offset_ms) {
+    // Merge two segments if they have the same offset.
+    before_->end_sec = after_->end_sec;
+    ClearSegment(after_);
+    return before_->offset_ms;
+  }
+
+  // Binary search for daylight savings offset change point,
+  // but give up if we don't find it in four iterations.
+  for (int i = 4; i >= 0; --i) {
+    int delta = after_->start_sec - before_->end_sec;
+    int middle_sec = (i == 0) ? time_sec : before_->end_sec + delta / 2;
+    int offset_ms = GetDaylightSavingsOffsetFromOS(middle_sec);
+    if (before_->offset_ms == offset_ms) {
+      before_->end_sec = middle_sec;
+      if (time_sec <= before_->end_sec) {
+        return offset_ms;
+      }
+    } else {
+      ASSERT(after_->offset_ms == offset_ms);
+      after_->start_sec = middle_sec;
+      if (time_sec >= after_->start_sec) {
+        // This swap helps the optimistic fast check in subsequent invocations.
+        DST* temp = before_;
+        before_ = after_;
+        after_ = temp;
+        return offset_ms;
+      }
+    }
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
+void DateCache::ProbeDST(int time_sec) {
+  DST* before = NULL;
+  DST* after = NULL;
+  ASSERT(before_ != after_);
+
+  for (int i = 0; i < kDSTSize; ++i) {
+    if (dst_[i].start_sec <= time_sec) {
+      if (before == NULL || before->start_sec < dst_[i].start_sec) {
+        before = &dst_[i];
+      }
+    } else if (time_sec < dst_[i].end_sec) {
+      if (after == NULL || after->end_sec > dst_[i].end_sec) {
+        after = &dst_[i];
+      }
+    }
+  }
+
+  // If before or after segments were not found,
+  // then set them to any invalid segment.
+  if (before == NULL) {
+    before = InvalidSegment(before_) ? before_ : LeastRecentlyUsedDST(after);
+  }
+  if (after == NULL) {
+    after = InvalidSegment(after_) && before != after_
+            ? after_ : LeastRecentlyUsedDST(before);
+  }
+
+  ASSERT(before != NULL);
+  ASSERT(after != NULL);
+  ASSERT(before != after);
+  ASSERT(InvalidSegment(before) || before->start_sec <= time_sec);
+  ASSERT(InvalidSegment(after) || time_sec < after->start_sec);
+  ASSERT(InvalidSegment(before) || InvalidSegment(after) ||
+         before->end_sec < after->start_sec);
+
+  before_ = before;
+  after_ = after;
+}
+
+
+DateCache::DST* DateCache::LeastRecentlyUsedDST(DST* skip) {
+  DST* result = NULL;
+  for (int i = 0; i < kDSTSize; ++i) {
+    if (&dst_[i] == skip) continue;
+    if (result == NULL || result->last_used > dst_[i].last_used) {
+      result = &dst_[i];
+    }
+  }
+  ClearSegment(result);
+  return result;
+}
+
+} }  // namespace v8::internal
diff --git a/src/date.h b/src/date.h
new file mode 100644
index 0000000..fcd61db
--- /dev/null
+++ b/src/date.h
@@ -0,0 +1,260 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DATE_H_
+#define V8_DATE_H_
+
+#include "allocation.h"
+#include "globals.h"
+#include "platform.h"
+
+
+namespace v8 {
+namespace internal {
+
+class DateCache {
+ public:
+  static const int kMsPerMin = 60 * 1000;
+  static const int kSecPerDay = 24 * 60 * 60;
+  static const int64_t kMsPerDay = kSecPerDay * 1000;
+
+  // The largest time that can be passed to OS date-time library functions.
+  static const int kMaxEpochTimeInSec = kMaxInt;
+  static const int64_t kMaxEpochTimeInMs =
+      static_cast<int64_t>(kMaxInt) * 1000;
+
+  // The largest time that can be stored in JSDate.
+  static const int64_t kMaxTimeInMs =
+      static_cast<int64_t>(864000000) * 10000000;
+
+  // Conservative upper bound on time that can be stored in JSDate
+  // before UTC conversion.
+  static const int64_t kMaxTimeBeforeUTCInMs =
+      kMaxTimeInMs + 10 * kMsPerDay;
+
+  // Sentinel that denotes an invalid local offset.
+  static const int kInvalidLocalOffsetInMs = kMaxInt;
+  // Sentinel that denotes an invalid cache stamp.
+  // It is an invariant of DateCache that cache stamp is non-negative.
+  static const int kInvalidStamp = -1;
+
+  DateCache() : stamp_(0) {
+    ResetDateCache();
+  }
+
+  virtual ~DateCache() {}
+
+
+  // Clears cached timezone information and increments the cache stamp.
+  void ResetDateCache();
+
+
+  // Computes floor(time_ms / kMsPerDay).
+  static int DaysFromTime(int64_t time_ms) {
+    if (time_ms < 0) time_ms -= (kMsPerDay - 1);
+    return static_cast<int>(time_ms / kMsPerDay);
+  }
+
+
+  // Computes modulo(time_ms, kMsPerDay) given that
+  // days = floor(time_ms / kMsPerDay).
+  static int TimeInDay(int64_t time_ms, int days) {
+    return static_cast<int>(time_ms - days * kMsPerDay);
+  }
+
+
+  // Given the number of days since the epoch, computes the weekday.
+  // ECMA 262 - 15.9.1.6.
+  int Weekday(int days) {
+    int result = (days + 4) % 7;
+    return result >= 0 ? result : result + 7;
+  }
+
+
+  bool IsLeap(int year) {
+    return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
+  }
+
+
+  // ECMA 262 - 15.9.1.7.
+  int LocalOffsetInMs() {
+    if (local_offset_ms_ == kInvalidLocalOffsetInMs)  {
+      local_offset_ms_ = GetLocalOffsetFromOS();
+    }
+    return local_offset_ms_;
+  }
+
+
+  const char* LocalTimezone(int64_t time_ms) {
+    if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
+      time_ms = EquivalentTime(time_ms);
+    }
+    return OS::LocalTimezone(static_cast<double>(time_ms));
+  }
+
+  // ECMA 262 - 15.9.5.26
+  int TimezoneOffset(int64_t time_ms) {
+    int64_t local_ms = ToLocal(time_ms);
+    return static_cast<int>((time_ms - local_ms) / kMsPerMin);
+  }
+
+  // ECMA 262 - 15.9.1.9
+  int64_t ToLocal(int64_t time_ms) {
+    return time_ms + LocalOffsetInMs() + DaylightSavingsOffsetInMs(time_ms);
+  }
+
+  // ECMA 262 - 15.9.1.9
+  int64_t ToUTC(int64_t time_ms) {
+    time_ms -= LocalOffsetInMs();
+    return time_ms - DaylightSavingsOffsetInMs(time_ms);
+  }
+
+
+  // Computes a time equivalent to the given time according
+  // to ECMA 262 - 15.9.1.9.
+  // The issue here is that some library calls don't work right for dates
+  // that cannot be represented using a non-negative signed 32 bit integer
+  // (measured in whole seconds based on the 1970 epoch).
+  // We solve this by mapping the time to a year with same leap-year-ness
+  // and same starting day for the year. The ECMAscript specification says
+  // we must do this, but for compatibility with other browsers, we use
+  // the actual year if it is in the range 1970..2037
+  int64_t EquivalentTime(int64_t time_ms) {
+    int days = DaysFromTime(time_ms);
+    int time_within_day_ms = static_cast<int>(time_ms - days * kMsPerDay);
+    int year, month, day;
+    YearMonthDayFromDays(days, &year, &month, &day);
+    int new_days = DaysFromYearMonth(EquivalentYear(year), month) + day - 1;
+    return static_cast<int64_t>(new_days) * kMsPerDay + time_within_day_ms;
+  }
+
+  // Returns an equivalent year in the range [2008-2035] matching
+  // - leap year,
+  // - week day of first day.
+  // ECMA 262 - 15.9.1.9.
+  int EquivalentYear(int year) {
+    int week_day = Weekday(DaysFromYearMonth(year, 0));
+    int recent_year = (IsLeap(year) ? 1956 : 1967) + (week_day * 12) % 28;
+    // Find the year in the range 2008..2037 that is equivalent mod 28.
+    // Add 3*28 to give a positive argument to the modulus operator.
+    return 2008 + (recent_year + 3 * 28 - 2008) % 28;
+  }
+
+  // Given the number of days since the epoch, computes
+  // the corresponding year, month, and day.
+  void YearMonthDayFromDays(int days, int* year, int* month, int* day);
+
+  // Computes the number of days since the epoch for
+  // the first day of the given month in the given year.
+  int DaysFromYearMonth(int year, int month);
+
+  // Cache stamp is used for invalidating caches in JSDate.
+  // We increment the stamp each time when the timezone information changes.
+  // JSDate objects perform stamp check and invalidate their caches if
+  // their saved stamp is not equal to the current stamp.
+  Smi* stamp() { return stamp_; }
+  void* stamp_address() { return &stamp_; }
+
+  // These functions are virtual so that we can override them when testing.
+  virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
+    double time_ms = static_cast<double>(time_sec * 1000);
+    return static_cast<int>(OS::DaylightSavingsOffset(time_ms));
+  }
+
+  virtual int GetLocalOffsetFromOS() {
+    double offset = OS::LocalTimeOffset();
+    ASSERT(offset < kInvalidLocalOffsetInMs);
+    return static_cast<int>(offset);
+  }
+
+ private:
+  // The implementation relies on the fact that no time zones have
+  // more than one daylight savings offset change per 19 days.
+  // In Egypt in 2010 they decided to suspend DST during Ramadan. This
+  // led to a short interval where DST is in effect from September 10 to
+  // September 30.
+  static const int kDefaultDSTDeltaInSec = 19 * kSecPerDay;
+
+  // Size of the Daylight Savings Time cache.
+  static const int kDSTSize = 32;
+
+  // Daylight Savings Time segment stores a segment of time where
+  // daylight savings offset does not change.
+  struct DST {
+    int start_sec;
+    int end_sec;
+    int offset_ms;
+    int last_used;
+  };
+
+  // Computes the daylight savings offset for the given time.
+  // ECMA 262 - 15.9.1.8
+  int DaylightSavingsOffsetInMs(int64_t time_ms);
+
+  // Sets the before_ and the after_ segments from the DST cache such that
+  // the before_ segment starts earlier than the given time and
+  // the after_ segment start later than the given time.
+  // Both segments might be invalid.
+  // The last_used counters of the before_ and after_ are updated.
+  void ProbeDST(int time_sec);
+
+  // Finds the least recently used segment from the DST cache that is not
+  // equal to the given 'skip' segment.
+  DST* LeastRecentlyUsedDST(DST* skip);
+
+  // Extends the after_ segment with the given point or resets it
+  // if it starts later than the given time + kDefaultDSTDeltaInSec.
+  inline void ExtendTheAfterSegment(int time_sec, int offset_ms);
+
+  // Makes the given segment invalid.
+  inline void ClearSegment(DST* segment);
+
+  bool InvalidSegment(DST* segment) {
+    return segment->start_sec > segment->end_sec;
+  }
+
+  Smi* stamp_;
+
+  // Daylight Saving Time cache.
+  DST dst_[kDSTSize];
+  int dst_usage_counter_;
+  DST* before_;
+  DST* after_;
+
+  int local_offset_ms_;
+
+  // Year/Month/Day cache.
+  bool ymd_valid_;
+  int ymd_days_;
+  int ymd_year_;
+  int ymd_month_;
+  int ymd_day_;
+};
+
+} }   // namespace v8::internal
+
+#endif
diff --git a/src/date.js b/src/date.js
index ccefce5..75edf6d 100644
--- a/src/date.js
+++ b/src/date.js
@@ -28,190 +28,22 @@
 
 // This file relies on the fact that the following declarations have been made
 // in v8natives.js:
-// const $isFinite = GlobalIsFinite;
+// var $isFinite = GlobalIsFinite;
 
 // -------------------------------------------------------------------
 
 // This file contains date support implemented in JavaScript.
 
-
 // Keep reference to original values of some global properties.  This
 // has the added benefit that the code in this file is isolated from
 // changes to these properties.
-const $Date = global.Date;
+var $Date = global.Date;
 
 // Helper function to throw error.
 function ThrowDateTypeError() {
   throw new $TypeError('this is not a Date object.');
 }
 
-// ECMA 262 - 5.2
-function Modulo(value, remainder) {
-  var mod = value % remainder;
-  // Guard against returning -0.
-  if (mod == 0) return 0;
-  return mod >= 0 ? mod : mod + remainder;
-}
-
-
-function TimeWithinDay(time) {
-  return Modulo(time, msPerDay);
-}
-
-
-// ECMA 262 - 15.9.1.3
-function DaysInYear(year) {
-  if (year % 4 != 0) return 365;
-  if ((year % 100 == 0) && (year % 400 != 0)) return 365;
-  return 366;
-}
-
-
-function DayFromYear(year) {
-  return 365 * (year-1970)
-      + FLOOR((year-1969)/4)
-      - FLOOR((year-1901)/100)
-      + FLOOR((year-1601)/400);
-}
-
-
-function TimeFromYear(year) {
-  return msPerDay * DayFromYear(year);
-}
-
-
-function InLeapYear(time) {
-  return DaysInYear(YearFromTime(time)) - 365;  // Returns 1 or 0.
-}
-
-
-// ECMA 262 - 15.9.1.9
-function EquivalentYear(year) {
-  // Returns an equivalent year in the range [2008-2035] matching
-  // - leap year.
-  // - week day of first day.
-  var time = TimeFromYear(year);
-  var recent_year = (InLeapYear(time) == 0 ? 1967 : 1956) +
-      (WeekDay(time) * 12) % 28;
-  // Find the year in the range 2008..2037 that is equivalent mod 28.
-  // Add 3*28 to give a positive argument to the modulus operator.
-  return 2008 + (recent_year + 3*28 - 2008) % 28;
-}
-
-
-function EquivalentTime(t) {
-  // The issue here is that some library calls don't work right for dates
-  // that cannot be represented using a non-negative signed 32 bit integer
-  // (measured in whole seconds based on the 1970 epoch).
-  // We solve this by mapping the time to a year with same leap-year-ness
-  // and same starting day for the year.  The ECMAscript specification says
-  // we must do this, but for compatibility with other browsers, we use
-  // the actual year if it is in the range 1970..2037
-  if (t >= 0 && t <= 2.1e12) return t;
-
-  var day = MakeDay(EquivalentYear(YearFromTime(t)),
-                    MonthFromTime(t),
-                    DateFromTime(t));
-  return MakeDate(day, TimeWithinDay(t));
-}
-
-
-// local_time_offset is initialized when the DST_offset_cache is missed.
-// It must not be used until after a call to DaylightSavingsOffset().
-// In this way, only one check, for a DST cache miss, is needed.
-var local_time_offset;
-
-
-// Because computing the DST offset is an expensive operation,
-// we keep a cache of the last computed DST offset along with a time interval
-// where we know the cache is valid.
-// When the cache is valid, local_time_offset is also valid.
-var DST_offset_cache = {
-  // Cached DST offset.
-  offset: 0,
-  // Time interval where the cached offset is valid.
-  start: 0, end: -1,
-  // Size of next interval expansion.
-  increment: 0,
-  initial_increment: 19 * msPerDay
-};
-
-
-// NOTE: The implementation relies on the fact that no time zones have
-// more than one daylight savings offset change per 19 days.
-//
-// In Egypt in 2010 they decided to suspend DST during Ramadan. This
-// led to a short interval where DST is in effect from September 10 to
-// September 30.
-//
-// If this function is called with NaN it returns NaN.
-function DaylightSavingsOffset(t) {
-  // Load the cache object from the builtins object.
-  var cache = DST_offset_cache;
-
-  // Cache the start and the end in local variables for fast access.
-  var start = cache.start;
-  var end = cache.end;
-
-  if (start <= t) {
-    // If the time fits in the cached interval, return the cached offset.
-    if (t <= end) return cache.offset;
-
-    // If the cache misses, the local_time_offset may not be initialized.
-    if (IS_UNDEFINED(local_time_offset)) {
-      local_time_offset = %DateLocalTimeOffset();
-    }
-
-    // Compute a possible new interval end.
-    var new_end = end + cache.increment;
-
-    if (t <= new_end) {
-      var end_offset = %DateDaylightSavingsOffset(EquivalentTime(new_end));
-      if (cache.offset == end_offset) {
-        // If the offset at the end of the new interval still matches
-        // the offset in the cache, we grow the cached time interval
-        // and return the offset.
-        cache.end = new_end;
-        cache.increment = cache.initial_increment;
-        return end_offset;
-      } else {
-        var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
-        if (offset == end_offset) {
-          // The offset at the given time is equal to the offset at the
-          // new end of the interval, so that means that we've just skipped
-          // the point in time where the DST offset change occurred. Updated
-          // the interval to reflect this and reset the increment.
-          cache.start = t;
-          cache.end = new_end;
-          cache.increment = cache.initial_increment;
-        } else {
-          // The interval contains a DST offset change and the given time is
-          // before it. Adjust the increment to avoid a linear search for
-          // the offset change point and change the end of the interval.
-          cache.increment /= 3;
-          cache.end = t;
-        }
-        // Update the offset in the cache and return it.
-        cache.offset = offset;
-        return offset;
-      }
-    }
-  }
-
-  // If the cache misses, the local_time_offset may not be initialized.
-  if (IS_UNDEFINED(local_time_offset)) {
-    local_time_offset = %DateLocalTimeOffset();
-  }
-  // Compute the DST offset for the time and shrink the cache interval
-  // to only contain the time. This allows fast repeated DST offset
-  // computations for the same time.
-  var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
-  cache.offset = offset;
-  cache.start = cache.end = t;
-  cache.increment = cache.initial_increment;
-  return offset;
-}
-
 
 var timezone_cache_time = $NaN;
 var timezone_cache_timezone;
@@ -221,57 +53,18 @@
   if (t == timezone_cache_time) {
     return timezone_cache_timezone;
   }
-  var timezone = %DateLocalTimezone(EquivalentTime(t));
+  var timezone = %DateLocalTimezone(t);
   timezone_cache_time = t;
   timezone_cache_timezone = timezone;
   return timezone;
 }
 
 
-function WeekDay(time) {
-  return Modulo(DAY(time) + 4, 7);
-}
-
-
-function LocalTime(time) {
-  if (NUMBER_IS_NAN(time)) return time;
-  // DaylightSavingsOffset called before local_time_offset used.
-  return time + DaylightSavingsOffset(time) + local_time_offset;
-}
-
-
-var ltcache = {
-  key: null,
-  val: null
-};
-
-function LocalTimeNoCheck(time) {
-  var ltc = ltcache;
-  if (%_ObjectEquals(time, ltc.key)) return ltc.val;
-
-  // Inline the DST offset cache checks for speed.
-  // The cache is hit, or DaylightSavingsOffset is called,
-  // before local_time_offset is used.
-  var cache = DST_offset_cache;
-  if (cache.start <= time && time <= cache.end) {
-    var dst_offset = cache.offset;
-  } else {
-    var dst_offset = DaylightSavingsOffset(time);
-  }
-  ltc.key = time;
-  return (ltc.val = time + local_time_offset + dst_offset);
-}
-
-
 function UTC(time) {
   if (NUMBER_IS_NAN(time)) return time;
   // local_time_offset is needed before the call to DaylightSavingsOffset,
   // so it may be uninitialized.
-  if (IS_UNDEFINED(local_time_offset)) {
-    local_time_offset = %DateLocalTimeOffset();
-  }
-  var tmp = time - local_time_offset;
-  return tmp - DaylightSavingsOffset(tmp);
+  return %DateToUTC(time);
 }
 
 
@@ -294,48 +87,6 @@
 }
 
 
-var ymd_from_time_cache = [$NaN, $NaN, $NaN];
-var ymd_from_time_cached_time = $NaN;
-
-function YearFromTime(t) {
-  if (t !== ymd_from_time_cached_time) {
-    if (!$isFinite(t)) {
-      return $NaN;
-    }
-
-    %DateYMDFromTime(t, ymd_from_time_cache);
-    ymd_from_time_cached_time = t
-  }
-
-  return ymd_from_time_cache[0];
-}
-
-function MonthFromTime(t) {
-  if (t !== ymd_from_time_cached_time) {
-    if (!$isFinite(t)) {
-      return $NaN;
-    }
-    %DateYMDFromTime(t, ymd_from_time_cache);
-    ymd_from_time_cached_time = t
-  }
-
-  return ymd_from_time_cache[1];
-}
-
-function DateFromTime(t) {
-  if (t !== ymd_from_time_cached_time) {
-    if (!$isFinite(t)) {
-      return $NaN;
-    }
-
-    %DateYMDFromTime(t, ymd_from_time_cache);
-    ymd_from_time_cached_time = t
-  }
-
-  return ymd_from_time_cache[2];
-}
-
-
 // Compute number of days given a year, month, date.
 // Note that month and date can lie outside the normal range.
 //   For example:
@@ -351,13 +102,12 @@
   date = TO_INTEGER_MAP_MINUS_ZERO(date);
 
   if (year < kMinYear || year > kMaxYear ||
-      month < kMinMonth || month > kMaxMonth ||
-      date < kMinDate || date > kMaxDate) {
+      month < kMinMonth || month > kMaxMonth) {
     return $NaN;
   }
 
-  // Now we rely on year, month and date being SMIs.
-  return %DateMakeDay(year, month, date);
+  // Now we rely on year and month being SMIs.
+  return %DateMakeDay(year, month) + date - 1;
 }
 
 
@@ -387,9 +137,6 @@
 var Date_cache = {
   // Cached time value.
   time: $NaN,
-  // Cached year when interpreting the time as a local time. Only
-  // valid when the time matches cached time.
-  year: $NaN,
   // String input for which the cached time is valid.
   string: null
 };
@@ -406,11 +153,10 @@
   var value;
   if (argc == 0) {
     value = %DateCurrentTime();
-
+    SET_UTC_DATE_VALUE(this, value);
   } else if (argc == 1) {
     if (IS_NUMBER(year)) {
-      value = TimeClip(year);
-
+      value = year;
     } else if (IS_STRING(year)) {
       // Probe the Date cache. If we already have a time value for the
       // given time, we re-use that instead of parsing the string again.
@@ -421,7 +167,6 @@
         value = DateParse(year);
         if (!NUMBER_IS_NAN(value)) {
           cache.time = value;
-          cache.year = YearFromTime(LocalTimeNoCheck(value));
           cache.string = year;
         }
       }
@@ -435,9 +180,9 @@
       // which is the default for everything else than Date objects.
       // This makes us behave like KJS and SpiderMonkey.
       var time = ToPrimitive(year, NUMBER_HINT);
-      value = IS_STRING(time) ? DateParse(time) : TimeClip(ToNumber(time));
+      value = IS_STRING(time) ? DateParse(time) : ToNumber(time);
     }
-
+    SET_UTC_DATE_VALUE(this, value);
   } else {
     year = ToNumber(year);
     month = ToNumber(month);
@@ -446,13 +191,14 @@
     minutes = argc > 4 ? ToNumber(minutes) : 0;
     seconds = argc > 5 ? ToNumber(seconds) : 0;
     ms = argc > 6 ? ToNumber(ms) : 0;
-    year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
-        ? 1900 + TO_INTEGER(year) : year;
+    year = (!NUMBER_IS_NAN(year) &&
+            0 <= TO_INTEGER(year) &&
+            TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
     var day = MakeDay(year, month, date);
     var time = MakeTime(hours, minutes, seconds, ms);
-    value = TimeClip(UTC(MakeDate(day, time)));
+    value = MakeDate(day, time);
+    SET_LOCAL_DATE_VALUE(this, value);
   }
-  %_SetValueOf(this, value);
 });
 
 
@@ -460,7 +206,8 @@
 
 
 var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
-var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
+var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+              'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
 
 
 function TwoDigitString(value) {
@@ -468,50 +215,46 @@
 }
 
 
-function DateString(time) {
-  return WeekDays[WeekDay(time)] + ' '
-      + Months[MonthFromTime(time)] + ' '
-      + TwoDigitString(DateFromTime(time)) + ' '
-      + YearFromTime(time);
+function DateString(date) {
+  return WeekDays[LOCAL_WEEKDAY(date)] + ' '
+      + Months[LOCAL_MONTH(date)] + ' '
+      + TwoDigitString(LOCAL_DAY(date)) + ' '
+      + LOCAL_YEAR(date);
 }
 
 
-var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
-var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'];
+var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
+    'Thursday', 'Friday', 'Saturday'];
+var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June',
+    'July', 'August', 'September', 'October', 'November', 'December'];
 
 
-function LongDateString(time) {
-  return LongWeekDays[WeekDay(time)] + ', '
-      + LongMonths[MonthFromTime(time)] + ' '
-      + TwoDigitString(DateFromTime(time)) + ', '
-      + YearFromTime(time);
+function LongDateString(date) {
+  return LongWeekDays[LOCAL_WEEKDAY(date)] + ', '
+      + LongMonths[LOCAL_MONTH(date)] + ' '
+      + TwoDigitString(LOCAL_DAY(date)) + ', '
+      + LOCAL_YEAR(date);
 }
 
 
-function TimeString(time) {
-  return TwoDigitString(HOUR_FROM_TIME(time)) + ':'
-      + TwoDigitString(MIN_FROM_TIME(time)) + ':'
-      + TwoDigitString(SEC_FROM_TIME(time));
+function TimeString(date) {
+  return TwoDigitString(LOCAL_HOUR(date)) + ':'
+      + TwoDigitString(LOCAL_MIN(date)) + ':'
+      + TwoDigitString(LOCAL_SEC(date));
 }
 
 
-function LocalTimezoneString(time) {
-  var old_timezone = timezone_cache_timezone;
-  var timezone = LocalTimezone(time);
-  if (old_timezone && timezone != old_timezone) {
-    // If the timezone string has changed from the one that we cached,
-    // the local time offset may now be wrong. So we need to update it
-    // and try again.
-    local_time_offset = %DateLocalTimeOffset();
-    // We also need to invalidate the DST cache as the new timezone may have
-    // different DST times.
-    var dst_cache = DST_offset_cache;
-    dst_cache.start = 0;
-    dst_cache.end = -1;
-  }
+function TimeStringUTC(date) {
+  return TwoDigitString(UTC_HOUR(date)) + ':'
+      + TwoDigitString(UTC_MIN(date)) + ':'
+      + TwoDigitString(UTC_SEC(date));
+}
 
-  var timezoneOffset =
-      (DaylightSavingsOffset(time) + local_time_offset) / msPerMinute;
+
+function LocalTimezoneString(date) {
+  var timezone = LocalTimezone(UTC_DATE_VALUE(date));
+
+  var timezoneOffset = -TIMEZONE_OFFSET(date);
   var sign = (timezoneOffset >= 0) ? 1 : -1;
   var hours = FLOOR((sign * timezoneOffset)/60);
   var min   = FLOOR((sign * timezoneOffset)%60);
@@ -521,8 +264,8 @@
 }
 
 
-function DatePrintString(time) {
-  return DateString(time) + ' ' + TimeString(time);
+function DatePrintString(date) {
+  return DateString(date) + ' ' + TimeString(date);
 }
 
 // -------------------------------------------------------------------
@@ -557,11 +300,12 @@
   minutes = argc > 4 ? ToNumber(minutes) : 0;
   seconds = argc > 5 ? ToNumber(seconds) : 0;
   ms = argc > 6 ? ToNumber(ms) : 0;
-  year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
-      ? 1900 + TO_INTEGER(year) : year;
+  year = (!NUMBER_IS_NAN(year) &&
+          0 <= TO_INTEGER(year) &&
+          TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
   var day = MakeDay(year, month, date);
   var time = MakeTime(hours, minutes, seconds, ms);
-  return %_SetValueOf(this, TimeClip(MakeDate(day, time)));
+  return TimeClip(MakeDate(day, time));
 }
 
 
@@ -574,27 +318,30 @@
 
 // ECMA 262 - 15.9.5.2
 function DateToString() {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this)
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  var time_zone_string = LocalTimezoneString(t);  // May update local offset.
-  return DatePrintString(LocalTimeNoCheck(t)) + time_zone_string;
+  var time_zone_string = LocalTimezoneString(this)
+  return DatePrintString(this) + time_zone_string;
 }
 
 
 // ECMA 262 - 15.9.5.3
 function DateToDateString() {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  return DateString(LocalTimeNoCheck(t));
+  return DateString(this);
 }
 
 
 // ECMA 262 - 15.9.5.4
 function DateToTimeString() {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  var time_zone_string = LocalTimezoneString(t);  // May update local offset.
-  return TimeString(LocalTimeNoCheck(t)) + time_zone_string;
+  var time_zone_string = LocalTimezoneString(this);
+  return TimeString(this) + time_zone_string;
 }
 
 
@@ -606,357 +353,388 @@
 
 // ECMA 262 - 15.9.5.6
 function DateToLocaleDateString() {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  return LongDateString(LocalTimeNoCheck(t));
+  return LongDateString(this);
 }
 
 
 // ECMA 262 - 15.9.5.7
 function DateToLocaleTimeString() {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  var lt = LocalTimeNoCheck(t);
-  return TimeString(lt);
+  return TimeString(this);
 }
 
 
 // ECMA 262 - 15.9.5.8
 function DateValueOf() {
-  return DATE_VALUE(this);
+  CHECK_DATE(this);
+  return UTC_DATE_VALUE(this);
 }
 
 
 // ECMA 262 - 15.9.5.9
 function DateGetTime() {
-  return DATE_VALUE(this);
+  CHECK_DATE(this);
+  return UTC_DATE_VALUE(this);
 }
 
 
 // ECMA 262 - 15.9.5.10
 function DateGetFullYear() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  var cache = Date_cache;
-  if (cache.time === t) return cache.year;
-  return YearFromTime(LocalTimeNoCheck(t));
+  CHECK_DATE(this);
+  return LOCAL_YEAR(this);
 }
 
 
 // ECMA 262 - 15.9.5.11
 function DateGetUTCFullYear() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return YearFromTime(t);
+  CHECK_DATE(this);
+  return UTC_YEAR(this);
 }
 
 
 // ECMA 262 - 15.9.5.12
 function DateGetMonth() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return MonthFromTime(LocalTimeNoCheck(t));
+  CHECK_DATE(this);
+  return LOCAL_MONTH(this);
 }
 
 
 // ECMA 262 - 15.9.5.13
 function DateGetUTCMonth() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return MonthFromTime(t);
+  CHECK_DATE(this);
+  return UTC_MONTH(this);
 }
 
 
 // ECMA 262 - 15.9.5.14
 function DateGetDate() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return DateFromTime(LocalTimeNoCheck(t));
+  CHECK_DATE(this);
+  return LOCAL_DAY(this);
 }
 
 
 // ECMA 262 - 15.9.5.15
 function DateGetUTCDate() {
-  var t = DATE_VALUE(this);
-  return NAN_OR_DATE_FROM_TIME(t);
+  CHECK_DATE(this);
+  return UTC_DAY(this);
 }
 
 
 // ECMA 262 - 15.9.5.16
 function DateGetDay() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return WeekDay(LocalTimeNoCheck(t));
+  CHECK_DATE(this);
+  return LOCAL_WEEKDAY(this);
 }
 
 
 // ECMA 262 - 15.9.5.17
 function DateGetUTCDay() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return WeekDay(t);
+  CHECK_DATE(this);
+  return UTC_WEEKDAY(this);
 }
 
 
 // ECMA 262 - 15.9.5.18
 function DateGetHours() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return HOUR_FROM_TIME(LocalTimeNoCheck(t));
+  CHECK_DATE(this);
+  return LOCAL_HOUR(this);
 }
 
 
 // ECMA 262 - 15.9.5.19
 function DateGetUTCHours() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return HOUR_FROM_TIME(t);
+  CHECK_DATE(this);
+  return UTC_HOUR(this);
 }
 
 
 // ECMA 262 - 15.9.5.20
 function DateGetMinutes() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return MIN_FROM_TIME(LocalTimeNoCheck(t));
+  CHECK_DATE(this);
+  return LOCAL_MIN(this);
 }
 
 
 // ECMA 262 - 15.9.5.21
 function DateGetUTCMinutes() {
-  var t = DATE_VALUE(this);
-  return NAN_OR_MIN_FROM_TIME(t);
+  CHECK_DATE(this);
+  return UTC_MIN(this);
 }
 
 
 // ECMA 262 - 15.9.5.22
 function DateGetSeconds() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return SEC_FROM_TIME(LocalTimeNoCheck(t));
+  CHECK_DATE(this);
+  return LOCAL_SEC(this);
 }
 
 
 // ECMA 262 - 15.9.5.23
 function DateGetUTCSeconds() {
-  var t = DATE_VALUE(this);
-  return NAN_OR_SEC_FROM_TIME(t);
+  CHECK_DATE(this);
+  return UTC_SEC(this)
 }
 
 
 // ECMA 262 - 15.9.5.24
 function DateGetMilliseconds() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return MS_FROM_TIME(LocalTimeNoCheck(t));
+  CHECK_DATE(this);
+  return LOCAL_MS(this);
 }
 
 
 // ECMA 262 - 15.9.5.25
 function DateGetUTCMilliseconds() {
-  var t = DATE_VALUE(this);
-  return NAN_OR_MS_FROM_TIME(t);
+  CHECK_DATE(this);
+  return UTC_MS(this);
 }
 
 
 // ECMA 262 - 15.9.5.26
 function DateGetTimezoneOffset() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return t;
-  return (t - LocalTimeNoCheck(t)) / msPerMinute;
+  CHECK_DATE(this);
+  return TIMEZONE_OFFSET(this);
 }
 
 
 // ECMA 262 - 15.9.5.27
 function DateSetTime(ms) {
-  if (!IS_DATE(this)) ThrowDateTypeError();
-  return %_SetValueOf(this, TimeClip(ToNumber(ms)));
+  CHECK_DATE(this);
+  SET_UTC_DATE_VALUE(this, ToNumber(ms));
+  return UTC_DATE_VALUE(this);
 }
 
 
 // ECMA 262 - 15.9.5.28
 function DateSetMilliseconds(ms) {
-  var t = LocalTime(DATE_VALUE(this));
+  CHECK_DATE(this);
+  var t = LOCAL_DATE_VALUE(this);
   ms = ToNumber(ms);
-  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
-  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
+  var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
+  SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
+  return this;
 }
 
 
 // ECMA 262 - 15.9.5.29
 function DateSetUTCMilliseconds(ms) {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   ms = ToNumber(ms);
-  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
-  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
+  var time = MakeTime(UTC_HOUR(this),
+                      UTC_MIN(this),
+                      UTC_SEC(this),
+                      ms);
+  return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
 }
 
 
 // ECMA 262 - 15.9.5.30
 function DateSetSeconds(sec, ms) {
-  var t = LocalTime(DATE_VALUE(this));
+  CHECK_DATE(this);
+  var t = LOCAL_DATE_VALUE(this);
   sec = ToNumber(sec);
-  ms = %_ArgumentsLength() < 2 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
-  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
-  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
+  ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : ToNumber(ms);
+  var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), sec, ms);
+  return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
 }
 
 
 // ECMA 262 - 15.9.5.31
 function DateSetUTCSeconds(sec, ms) {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   sec = ToNumber(sec);
-  ms = %_ArgumentsLength() < 2 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
-  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
-  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
+  ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : ToNumber(ms);
+  var time = MakeTime(UTC_HOUR(this), UTC_MIN(this), sec, ms);
+  return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
 }
 
 
 // ECMA 262 - 15.9.5.33
 function DateSetMinutes(min, sec, ms) {
-  var t = LocalTime(DATE_VALUE(this));
+  CHECK_DATE(this);
+  var t = LOCAL_DATE_VALUE(this);
   min = ToNumber(min);
   var argc = %_ArgumentsLength();
-  sec = argc < 2 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
-  ms = argc < 3 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
-  var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
-  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
+  sec = argc < 2 ? LOCAL_SEC(this) : ToNumber(sec);
+  ms = argc < 3 ? LOCAL_MS(this) : ToNumber(ms);
+  var time = MakeTime(LOCAL_HOUR(this), min, sec, ms);
+  return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
 }
 
 
 // ECMA 262 - 15.9.5.34
 function DateSetUTCMinutes(min, sec, ms) {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   min = ToNumber(min);
   var argc = %_ArgumentsLength();
-  sec = argc < 2 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
-  ms = argc < 3 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
-  var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
-  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
+  sec = argc < 2 ? UTC_SEC(this) : ToNumber(sec);
+  ms = argc < 3 ? UTC_MS(this) : ToNumber(ms);
+  var time = MakeTime(UTC_HOUR(this), min, sec, ms);
+  return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
 }
 
 
 // ECMA 262 - 15.9.5.35
 function DateSetHours(hour, min, sec, ms) {
-  var t = LocalTime(DATE_VALUE(this));
+  CHECK_DATE(this);
+  var t = LOCAL_DATE_VALUE(this);
   hour = ToNumber(hour);
   var argc = %_ArgumentsLength();
-  min = argc < 2 ? NAN_OR_MIN_FROM_TIME(t) : ToNumber(min);
-  sec = argc < 3 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
-  ms = argc < 4 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
+  min = argc < 2 ? LOCAL_MIN(this) : ToNumber(min);
+  sec = argc < 3 ? LOCAL_SEC(this) : ToNumber(sec);
+  ms = argc < 4 ? LOCAL_MS(this) : ToNumber(ms);
   var time = MakeTime(hour, min, sec, ms);
-  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
+  return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
 }
 
 
 // ECMA 262 - 15.9.5.34
 function DateSetUTCHours(hour, min, sec, ms) {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   hour = ToNumber(hour);
   var argc = %_ArgumentsLength();
-  min = argc < 2 ? NAN_OR_MIN_FROM_TIME(t) : ToNumber(min);
-  sec = argc < 3 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
-  ms = argc < 4 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
+  min = argc < 2 ? UTC_MIN(this) : ToNumber(min);
+  sec = argc < 3 ? UTC_SEC(this) : ToNumber(sec);
+  ms = argc < 4 ? UTC_MS(this) : ToNumber(ms);
   var time = MakeTime(hour, min, sec, ms);
-  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
+  return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
 }
 
 
 // ECMA 262 - 15.9.5.36
 function DateSetDate(date) {
-  var t = LocalTime(DATE_VALUE(this));
+  CHECK_DATE(this);
+  var t = LOCAL_DATE_VALUE(this);
   date = ToNumber(date);
-  var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
-  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+  var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
+  return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
 }
 
 
 // ECMA 262 - 15.9.5.37
 function DateSetUTCDate(date) {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   date = ToNumber(date);
-  var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
-  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+  var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
+  return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
 }
 
 
 // ECMA 262 - 15.9.5.38
 function DateSetMonth(month, date) {
-  var t = LocalTime(DATE_VALUE(this));
+  CHECK_DATE(this);
+  var t = LOCAL_DATE_VALUE(this);
   month = ToNumber(month);
-  date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
-  var day = MakeDay(YearFromTime(t), month, date);
-  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+  date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : ToNumber(date);
+  var day = MakeDay(LOCAL_YEAR(this), month, date);
+  return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
 }
 
 
 // ECMA 262 - 15.9.5.39
 function DateSetUTCMonth(month, date) {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   month = ToNumber(month);
-  date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
-  var day = MakeDay(YearFromTime(t), month, date);
-  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+  date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : ToNumber(date);
+  var day = MakeDay(UTC_YEAR(this), month, date);
+  return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
 }
 
 
 // ECMA 262 - 15.9.5.40
 function DateSetFullYear(year, month, date) {
-  var t = DATE_VALUE(this);
-  t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
+  CHECK_DATE(this);
+  var t = LOCAL_DATE_VALUE(this);
   year = ToNumber(year);
   var argc = %_ArgumentsLength();
-  month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
-  date = argc < 3 ? DateFromTime(t) : ToNumber(date);
+  var time ;
+  if (NUMBER_IS_NAN(t)) {
+    month = argc < 2 ? 0 : ToNumber(month);
+    date = argc < 3 ? 1 : ToNumber(date);
+    time = 0;
+  } else {
+    month = argc < 2 ? LOCAL_MONTH(this) : ToNumber(month);
+    date = argc < 3 ? LOCAL_DAY(this) : ToNumber(date);
+    time = LOCAL_TIME_IN_DAY(this);
+  }
   var day = MakeDay(year, month, date);
-  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+  return SET_LOCAL_DATE_VALUE(this, MakeDate(day, time));
 }
 
 
 // ECMA 262 - 15.9.5.41
 function DateSetUTCFullYear(year, month, date) {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) t = 0;
-  var argc = %_ArgumentsLength();
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   year = ToNumber(year);
-  month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
-  date = argc < 3 ? DateFromTime(t) : ToNumber(date);
+  var argc = %_ArgumentsLength();
+  var time ;
+  if (NUMBER_IS_NAN(t)) {
+    month = argc < 2 ? 0 : ToNumber(month);
+    date = argc < 3 ? 1 : ToNumber(date);
+    time = 0;
+  } else {
+    month = argc < 2 ? UTC_MONTH(this) : ToNumber(month);
+    date = argc < 3 ? UTC_DAY(this) : ToNumber(date);
+    time = UTC_TIME_IN_DAY(this);
+  }
   var day = MakeDay(year, month, date);
-  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+  return SET_UTC_DATE_VALUE(this, MakeDate(day, time));
 }
 
 
 // ECMA 262 - 15.9.5.42
 function DateToUTCString() {
-  var t = DATE_VALUE(this);
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
   // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
-  return WeekDays[WeekDay(t)] + ', '
-      + TwoDigitString(DateFromTime(t)) + ' '
-      + Months[MonthFromTime(t)] + ' '
-      + YearFromTime(t) + ' '
-      + TimeString(t) + ' GMT';
+  return WeekDays[UTC_WEEKDAY(this)] + ', '
+      + TwoDigitString(UTC_DAY(this)) + ' '
+      + Months[UTC_MONTH(this)] + ' '
+      + UTC_YEAR(this) + ' '
+      + TimeStringUTC(this) + ' GMT';
 }
 
 
 // ECMA 262 - B.2.4
 function DateGetYear() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return $NaN;
-  return YearFromTime(LocalTimeNoCheck(t)) - 1900;
+  CHECK_DATE(this);
+  return LOCAL_YEAR(this) - 1900;
 }
 
 
 // ECMA 262 - B.2.5
 function DateSetYear(year) {
-  var t = LocalTime(DATE_VALUE(this));
-  if (NUMBER_IS_NAN(t)) t = 0;
+  CHECK_DATE(this);
   year = ToNumber(year);
-  if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
+  if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, $NaN);
   year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
       ? 1900 + TO_INTEGER(year) : year;
-  var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
-  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+  var t = LOCAL_DATE_VALUE(this);
+  var month, date, time;
+  if (NUMBER_IS_NAN(t))  {
+    month = 0;
+    date = 1;
+    time = 0;
+  } else {
+    month = LOCAL_MONTH(this);
+    date = LOCAL_DAY(this);
+    time = LOCAL_TIME_IN_DAY(this);
+  }
+  var day = MakeDay(year, month, date);
+  return SET_LOCAL_DATE_VALUE(this, MakeDate(day, time));
 }
 
 
@@ -978,9 +756,11 @@
 }
 
 
+// ECMA 262 - 15.9.5.43
 function DateToISOString() {
-  var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  CHECK_DATE(this);
+  var t = UTC_DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) throw MakeRangeError("invalid_time_value", []);
   var year = this.getUTCFullYear();
   var year_string;
   if (year >= 0 && year <= 9999) {
@@ -1014,34 +794,13 @@
 
 
 function ResetDateCache() {
-
-  // Reset the local_time_offset:
-  local_time_offset = %DateLocalTimeOffset();
-
-  // Reset the DST offset cache:
-  var cache = DST_offset_cache;
-  cache.offset = 0;
-  cache.start = 0;
-  cache.end = -1;
-  cache.increment = 0;
-  cache.initial_increment = 19 * msPerDay;
-
   // Reset the timezone cache:
   timezone_cache_time = $NaN;
   timezone_cache_timezone = undefined;
 
-  // Reset the ltcache:
-  ltcache.key = null;
-  ltcache.val = null;
-
-  // Reset the ymd_from_time_cache:
-  ymd_from_time_cache = [$NaN, $NaN, $NaN];
-  ymd_from_time_cached_time = $NaN;
-
   // Reset the date cache:
   cache = Date_cache;
   cache.time = $NaN;
-  cache.year = $NaN;
   cache.string = null;
 }
 
@@ -1062,7 +821,7 @@
 
   // Set up non-enumerable functions of the Date prototype object and
   // set their names.
-  InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
+  InstallFunctions($Date.prototype, DONT_ENUM, $Array(
     "toString", DateToString,
     "toDateString", DateToDateString,
     "toTimeString", DateToTimeString,
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 591d0b3..511663d 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -229,8 +229,6 @@
 
 
 const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
-const int DebuggerAgentUtil::kContentLengthSize =
-    StrLength(kContentLength);
 
 
 SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
@@ -374,8 +372,11 @@
 
   // Calculate the message size in UTF-8 encoding.
   int utf8_len = 0;
+  int previous = unibrow::Utf16::kNoPreviousCharacter;
   for (int i = 0; i < message.length(); i++) {
-    utf8_len += unibrow::Utf8::Length(message[i]);
+    uint16_t character = message[i];
+    utf8_len += unibrow::Utf8::Length(character, previous);
+    previous = character;
   }
 
   // Send the header.
@@ -390,17 +391,33 @@
 
   // Send message body as UTF-8.
   int buffer_position = 0;  // Current buffer position.
+  previous = unibrow::Utf16::kNoPreviousCharacter;
   for (int i = 0; i < message.length(); i++) {
     // Write next UTF-8 encoded character to buffer.
+    uint16_t character = message[i];
     buffer_position +=
-        unibrow::Utf8::Encode(buffer + buffer_position, message[i]);
+        unibrow::Utf8::Encode(buffer + buffer_position, character, previous);
     ASSERT(buffer_position < kBufferSize);
 
     // Send buffer if full or last character is encoded.
-    if (kBufferSize - buffer_position < 3 || i == message.length() - 1) {
-      conn->Send(buffer, buffer_position);
-      buffer_position = 0;
+    if (kBufferSize - buffer_position <
+          unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit ||
+        i == message.length() - 1) {
+      if (unibrow::Utf16::IsLeadSurrogate(character)) {
+        const int kEncodedSurrogateLength =
+            unibrow::Utf16::kUtf8BytesToCodeASurrogate;
+        ASSERT(buffer_position >= kEncodedSurrogateLength);
+        conn->Send(buffer, buffer_position - kEncodedSurrogateLength);
+        for (int i = 0; i < kEncodedSurrogateLength; i++) {
+          buffer[i] = buffer[buffer_position + i];
+        }
+        buffer_position = kEncodedSurrogateLength;
+      } else {
+        conn->Send(buffer, buffer_position);
+        buffer_position = 0;
+      }
     }
+    previous = character;
   }
 
   return true;
diff --git a/src/debug-agent.h b/src/debug-agent.h
index a07fb0f..6115190 100644
--- a/src/debug-agent.h
+++ b/src/debug-agent.h
@@ -115,7 +115,6 @@
 class DebuggerAgentUtil {
  public:
   static const char* const kContentLength;
-  static const int kContentLengthSize;
 
   static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
   static bool SendConnectMessage(const Socket* conn,
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index d254ee5..802f622 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -26,14 +26,14 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Default number of frames to include in the response to backtrace request.
-const kDefaultBacktraceLength = 10;
+var kDefaultBacktraceLength = 10;
 
-const Debug = {};
+var Debug = {};
 
 // Regular expression to skip "crud" at the beginning of a source line which is
 // not really code. Currently the regular expression matches whitespace and
 // comments.
-const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
+var sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
 
 // Debug events which can occour in the V8 JavaScript engine. These originate
 // from the API include file debug.h.
@@ -286,7 +286,7 @@
   copy.condition_ = this.condition_;
   copy.ignoreCount_ = this.ignoreCount_;
   return copy;
-}
+};
 
 
 ScriptBreakPoint.prototype.number = function() {
@@ -335,13 +335,13 @@
     locations.push(this.break_points_[i].actual_location);
   }
   return locations;
-}
+};
 
 
 ScriptBreakPoint.prototype.update_positions = function(line, column) {
   this.line_ = line;
   this.column_ = column;
-}
+};
 
 
 ScriptBreakPoint.prototype.hit_count = function() {
@@ -477,9 +477,11 @@
 // break points set in this script.
 function UpdateScriptBreakPoints(script) {
   for (var i = 0; i < script_break_points.length; i++) {
-    if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
-        script_break_points[i].matchesScript(script)) {
-      script_break_points[i].set(script);
+    var break_point = script_break_points[i];
+    if ((break_point.type() == Debug.ScriptBreakPointType.ScriptName ||
+         break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) &&
+        break_point.matchesScript(script)) {
+      break_point.set(script);
     }
   }
 }
@@ -585,7 +587,7 @@
   var script = %FunctionGetScript(func);
   var script_offset = %FunctionGetScriptSourcePosition(func);
   return script.locationFromLine(opt_line, opt_column, script_offset);
-}
+};
 
 
 // Returns the character position in a script based on a line number and an
@@ -593,7 +595,7 @@
 Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
   var location = script.locationFromLine(opt_line, opt_column);
   return location ? location.position : null;
-}
+};
 
 
 Debug.findBreakPoint = function(break_point_number, remove) {
@@ -627,7 +629,7 @@
     }
   }
   return [];
-}
+};
 
 Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
   if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
@@ -677,8 +679,9 @@
 {
   break_point = MakeBreakPoint(position);
   break_point.setCondition(condition);
-  if (!enabled)
+  if (!enabled) {
     break_point.disable();
+  }
   var scripts = this.scripts();
   for (var i = 0; i < scripts.length; i++) {
     if (script_id == scripts[i].id) {
@@ -771,7 +774,7 @@
     }
   }
   return script_break_point;
-}
+};
 
 
 // Sets a breakpoint in a script identified through id or name at the
@@ -799,7 +802,7 @@
   }
 
   return script_break_point.number();
-}
+};
 
 
 Debug.setScriptBreakPointById = function(script_id,
@@ -808,7 +811,7 @@
   return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
                                   script_id, opt_line, opt_column,
                                   opt_condition, opt_groupId);
-}
+};
 
 
 Debug.setScriptBreakPointByName = function(script_name,
@@ -817,7 +820,7 @@
   return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
                                   script_name, opt_line, opt_column,
                                   opt_condition, opt_groupId);
-}
+};
 
 
 Debug.setScriptBreakPointByRegExp = function(script_regexp,
@@ -826,7 +829,7 @@
   return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
                                   script_regexp, opt_line, opt_column,
                                   opt_condition, opt_groupId);
-}
+};
 
 
 Debug.enableScriptBreakPoint = function(break_point_number) {
@@ -841,13 +844,15 @@
 };
 
 
-Debug.changeScriptBreakPointCondition = function(break_point_number, condition) {
+Debug.changeScriptBreakPointCondition = function(
+    break_point_number, condition) {
   var script_break_point = this.findScriptBreakPoint(break_point_number, false);
   script_break_point.setCondition(condition);
 };
 
 
-Debug.changeScriptBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
+Debug.changeScriptBreakPointIgnoreCount = function(
+    break_point_number, ignoreCount) {
   if (ignoreCount < 0) {
     throw new Error('Invalid argument');
   }
@@ -858,12 +863,12 @@
 
 Debug.scriptBreakPoints = function() {
   return script_break_points;
-}
+};
 
 
 Debug.clearStepping = function() {
   %ClearStepping();
-}
+};
 
 Debug.setBreakOnException = function() {
   return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
@@ -940,7 +945,7 @@
   var count = opt_count ? %ToNumber(opt_count) : 1;
 
   return %PrepareStep(this.break_id, action, count);
-}
+};
 
 ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
     opt_additional_context) {
@@ -960,8 +965,9 @@
 ExecutionState.prototype.frame = function(opt_index) {
   // If no index supplied return the selected frame.
   if (opt_index == null) opt_index = this.selected_frame;
-  if (opt_index < 0 || opt_index >= this.frameCount())
+  if (opt_index < 0 || opt_index >= this.frameCount()) {
     throw new Error('Illegal frame index.');
+  }
   return new FrameMirror(this.break_id, opt_index);
 };
 
@@ -1088,12 +1094,12 @@
 
 ExceptionEvent.prototype.exception = function() {
   return this.exception_;
-}
+};
 
 
 ExceptionEvent.prototype.uncaught = function() {
   return this.uncaught_;
-}
+};
 
 
 ExceptionEvent.prototype.func = function() {
@@ -1185,7 +1191,7 @@
   o.body.script = this.script_;
 
   return o.toJSONProtocol();
-}
+};
 
 
 function MakeNewFunctionEvent(func) {
@@ -1241,7 +1247,7 @@
   o.body = {};
   o.body.script = { id: this.id() };
   return o.toJSONProtocol();
-}
+};
 
 
 function MakeScriptObject_(script, include_source) {
@@ -1258,18 +1264,18 @@
     o.source = script.source();
   }
   return o;
-};
+}
 
 
 function DebugCommandProcessor(exec_state, opt_is_running) {
   this.exec_state_ = exec_state;
   this.running_ = opt_is_running || false;
-};
+}
 
 
 DebugCommandProcessor.prototype.processDebugRequest = function (request) {
   return this.processDebugJSONRequest(request);
-}
+};
 
 
 function ProtocolMessage(request) {
@@ -1297,13 +1303,13 @@
     this.options_ = {};
   }
   this.options_[name] = value;
-}
+};
 
 
 ProtocolMessage.prototype.failed = function(message) {
   this.success = false;
   this.message = message;
-}
+};
 
 
 ProtocolMessage.prototype.toJSONProtocol = function() {
@@ -1351,7 +1357,7 @@
   }
   json.running = this.running;
   return JSON.stringify(json);
-}
+};
 
 
 DebugCommandProcessor.prototype.createResponse = function(request) {
@@ -1359,7 +1365,8 @@
 };
 
 
-DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) {
+DebugCommandProcessor.prototype.processDebugJSONRequest = function(
+    json_request) {
   var request;  // Current request.
   var response;  // Generated response.
   try {
@@ -1541,7 +1548,7 @@
       }
     }
 
-    // Setup the VM for stepping.
+    // Set up the VM for stepping.
     this.exec_state_.prepareStep(action, count);
   }
 
@@ -1646,7 +1653,7 @@
 
   // Add the break point number to the response.
   response.body = { type: type,
-                    breakpoint: break_point_number }
+                    breakpoint: break_point_number };
 
   // Add break point information to the response.
   if (break_point instanceof ScriptBreakPoint) {
@@ -1660,7 +1667,8 @@
       response.body.type = 'scriptRegExp';
       response.body.script_regexp = break_point.script_regexp_object().source;
     } else {
-      throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
+      throw new Error("Internal error: Unexpected breakpoint type: " +
+                      break_point.type());
     }
     response.body.line = break_point.line();
     response.body.column = break_point.column();
@@ -1672,7 +1680,8 @@
 };
 
 
-DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
+    request, response) {
   // Check for legal request.
   if (!request.arguments) {
     response.failed('Missing arguments');
@@ -1709,10 +1718,11 @@
   if (!IS_UNDEFINED(ignoreCount)) {
     Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
   }
-}
+};
 
 
-DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(
+    request, response) {
   // Check for legal request.
   if (!request.arguments) {
     response.failed('Missing arguments');
@@ -1743,10 +1753,11 @@
 
   // Add the cleared break point numbers to the response.
   response.body = { breakpoints: cleared_break_points };
-}
+};
 
 
-DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(
+    request, response) {
   // Check for legal request.
   if (!request.arguments) {
     response.failed('Missing arguments');
@@ -1766,11 +1777,12 @@
   Debug.clearBreakPoint(break_point);
 
   // Add the cleared break point number to the response.
-  response.body = { breakpoint: break_point }
-}
+  response.body = { breakpoint: break_point };
+};
 
 
-DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(
+    request, response) {
   var array = [];
   for (var i = 0; i < script_break_points.length; i++) {
     var break_point = script_break_points[i];
@@ -1785,7 +1797,7 @@
       condition: break_point.condition(),
       ignoreCount: break_point.ignoreCount(),
       actual_locations: break_point.actual_locations()
-    }
+    };
 
     if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
       description.type = 'scriptId';
@@ -1797,7 +1809,8 @@
       description.type = 'scriptRegExp';
       description.script_regexp = break_point.script_regexp_object().source;
     } else {
-      throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
+      throw new Error("Internal error: Unexpected breakpoint type: " +
+                      break_point.type());
     }
     array.push(description);
   }
@@ -1806,15 +1819,15 @@
     breakpoints: array,
     breakOnExceptions: Debug.isBreakOnException(),
     breakOnUncaughtExceptions: Debug.isBreakOnUncaughtException()
-  }
-}
+  };
+};
 
 
 DebugCommandProcessor.prototype.disconnectRequest_ =
     function(request, response) {
   Debug.disableAllBreakPoints();
   this.continueRequest_(request, response);
-}
+};
 
 
 DebugCommandProcessor.prototype.setExceptionBreakRequest_ =
@@ -1859,10 +1872,11 @@
 
   // Add the cleared break point number to the response.
   response.body = { 'type': type, 'enabled': enabled };
-}
+};
 
 
-DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.backtraceRequest_ = function(
+    request, response) {
   // Get the number of frames.
   var total_frames = this.exec_state_.frameCount();
 
@@ -1870,12 +1884,12 @@
   if (total_frames == 0) {
     response.body = {
       totalFrames: total_frames
-    }
+    };
     return;
   }
 
   // Default frame range to include in backtrace.
-  var from_index = 0
+  var from_index = 0;
   var to_index = kDefaultBacktraceLength;
 
   // Get the range from the arguments.
@@ -1888,7 +1902,7 @@
     }
     if (request.arguments.bottom) {
       var tmp_index = total_frames - from_index;
-      from_index = total_frames - to_index
+      from_index = total_frames - to_index;
       to_index = tmp_index;
     }
     if (from_index < 0 || to_index < 0) {
@@ -1914,7 +1928,7 @@
     toFrame: to_index,
     totalFrames: total_frames,
     frames: frames
-  }
+  };
 };
 
 
@@ -1938,8 +1952,8 @@
 
 
 DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
-  // Get the frame for which the scope or scopes are requested. With no frameNumber
-  // argument use the currently selected frame.
+  // Get the frame for which the scope or scopes are requested.
+  // With no frameNumber argument use the currently selected frame.
   if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
     frame_index = request.arguments.frameNumber;
     if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
@@ -1949,7 +1963,7 @@
   } else {
     return this.exec_state_.frame();
   }
-}
+};
 
 
 DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
@@ -1972,7 +1986,7 @@
     toScope: total_scopes,
     totalScopes: total_scopes,
     scopes: scopes
-  }
+  };
 };
 
 
@@ -2217,7 +2231,8 @@
     if (!IS_UNDEFINED(request.arguments.types)) {
       types = %ToNumber(request.arguments.types);
       if (isNaN(types) || types < 0) {
-        return response.failed('Invalid types "' + request.arguments.types + '"');
+        return response.failed('Invalid types "' +
+                               request.arguments.types + '"');
       }
     }
 
@@ -2286,7 +2301,7 @@
     var details = %GetThreadDetails(this.exec_state_.break_id, i);
     var thread_info = { current: details[0],
                         id: details[1]
-                      }
+                      };
     threads.push(thread_info);
   }
 
@@ -2294,7 +2309,7 @@
   response.body = {
     totalThreads: total_threads,
     threads: threads
-  }
+  };
 };
 
 
@@ -2306,7 +2321,7 @@
 DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
   response.body = {
     V8Version: %GetV8Version()
-  }
+  };
 };
 
 
@@ -2322,7 +2337,8 @@
 };
 
 
-DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.changeLiveRequest_ = function(
+    request, response) {
   if (!Debug.LiveEdit) {
     return response.failed('LiveEdit feature is not supported');
   }
@@ -2393,7 +2409,7 @@
       response.body.flags.push({ name: name, value: value });
     }
   }
-}
+};
 
 
 DebugCommandProcessor.prototype.v8FlagsRequest_ = function(request, response) {
@@ -2499,7 +2515,7 @@
 // running.
 DebugCommandProcessor.prototype.isRunning = function() {
   return this.running_;
-}
+};
 
 
 DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
@@ -2515,7 +2531,7 @@
     n = n >>> 4;
   }
   return r;
-};
+}
 
 
 /**
@@ -2591,7 +2607,7 @@
     case 'string':
     case 'number':
       json = value;
-      break
+      break;
 
     default:
       json = null;
diff --git a/src/debug.cc b/src/debug.cc
index 20cd802..f8a1ecf 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,9 +37,11 @@
 #include "debug.h"
 #include "deoptimizer.h"
 #include "execution.h"
+#include "full-codegen.h"
 #include "global-handles.h"
 #include "ic.h"
 #include "ic-inl.h"
+#include "isolate-inl.h"
 #include "list.h"
 #include "messages.h"
 #include "natives.h"
@@ -84,21 +86,9 @@
 }
 
 
-static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
-  Isolate* isolate = Isolate::Current();
-  CALL_HEAP_FUNCTION(
-      isolate,
-      isolate->stub_cache()->ComputeCallDebugBreak(argc, kind),
-      Code);
-}
-
-
 static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
   Isolate* isolate = Isolate::Current();
-  CALL_HEAP_FUNCTION(
-      isolate,
-      isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind),
-      Code);
+  return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
 }
 
 
@@ -401,15 +391,15 @@
   // Step in can only be prepared if currently positioned on an IC call,
   // construct call or CallFunction stub call.
   Address target = rinfo()->target_address();
-  Handle<Code> code(Code::GetCodeFromTargetAddress(target));
-  if (code->is_call_stub() || code->is_keyed_call_stub()) {
+  Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+  if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
     // Step in through IC call is handled by the runtime system. Therefore make
     // sure that the any current IC is cleared and the runtime system is
     // called. If the executing code has a debug break at the location change
     // the call in the original code as it is the code there that will be
     // executed in place of the debug break call.
-    Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
-                                                      code->kind());
+    Handle<Code> stub = ComputeCallDebugPrepareStepIn(
+        target_code->arguments_count(), target_code->kind());
     if (IsDebugBreak()) {
       original_rinfo()->set_target_address(stub->entry());
     } else {
@@ -419,7 +409,7 @@
 #ifdef DEBUG
     // All the following stuff is needed only for assertion checks so the code
     // is wrapped in ifdef.
-    Handle<Code> maybe_call_function_stub = code;
+    Handle<Code> maybe_call_function_stub = target_code;
     if (IsDebugBreak()) {
       Address original_target = original_rinfo()->target_address();
       maybe_call_function_stub =
@@ -436,8 +426,9 @@
     // Step in through CallFunction stub should also be prepared by caller of
     // this function (Debug::PrepareStep) which should flood target function
     // with breakpoints.
-    ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
-           || is_call_function_stub);
+    ASSERT(RelocInfo::IsConstructCall(rmode()) ||
+           target_code->is_inline_cache_stub() ||
+           is_call_function_stub);
 #endif
   }
 }
@@ -474,11 +465,11 @@
   RelocInfo::Mode mode = rmode();
   if (RelocInfo::IsCodeTarget(mode)) {
     Address target = rinfo()->target_address();
-    Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+    Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
 
     // Patch the code to invoke the builtin debug break function matching the
     // calling convention used by the call site.
-    Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
+    Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode));
     rinfo()->set_target_address(dbgbrk_code->entry());
   }
 }
@@ -686,7 +677,7 @@
 }
 
 
-void Debug::Setup(bool create_heap_objects) {
+void Debug::SetUp(bool create_heap_objects) {
   ThreadInit();
   if (create_heap_objects) {
     // Get code to handle debug break on return.
@@ -772,19 +763,26 @@
 
   // Execute the shared function in the debugger context.
   Handle<Context> context = isolate->global_context();
-  bool caught_exception = false;
+  bool caught_exception;
   Handle<JSFunction> function =
       factory->NewFunctionFromSharedFunctionInfo(function_info, context);
 
-  Execution::TryCall(function, Handle<Object>(context->global()),
-                     0, NULL, &caught_exception);
+  Handle<Object> exception =
+      Execution::TryCall(function, Handle<Object>(context->global()),
+                         0, NULL, &caught_exception);
 
   // Check for caught exceptions.
   if (caught_exception) {
+    ASSERT(!isolate->has_pending_exception());
+    MessageLocation computed_location;
+    isolate->ComputeLocation(&computed_location);
     Handle<Object> message = MessageHandler::MakeMessageObject(
-        "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
-        Handle<String>(), Handle<JSArray>());
+        "error_loading_debugger", &computed_location,
+        Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
+    ASSERT(!isolate->has_pending_exception());
+    isolate->set_pending_exception(*exception);
     MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
+    isolate->clear_pending_exception();
     return false;
   }
 
@@ -822,6 +820,9 @@
           v8::Handle<ObjectTemplate>(),
           NULL);
 
+  // Fail if no context could be created.
+  if (context.is_null()) return false;
+
   // Use the debugger context.
   SaveContext save(isolate_);
   isolate_->set_context(*context);
@@ -831,8 +832,8 @@
   Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
   RETURN_IF_EMPTY_HANDLE_VALUE(
       isolate_,
-      SetProperty(global, key, Handle<Object>(global->builtins()),
-                  NONE, kNonStrictMode),
+      JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()),
+                              NONE, kNonStrictMode),
       false);
 
   // Compile the JavaScript for the debugger in the debugger context.
@@ -1103,14 +1104,13 @@
   Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
 
   // Call HandleBreakPointx.
-  bool caught_exception = false;
-  const int argc = 2;
-  Object** argv[argc] = {
-    break_id.location(),
-    reinterpret_cast<Object**>(break_point_object.location())
-  };
+  bool caught_exception;
+  Handle<Object> argv[] = { break_id, break_point_object };
   Handle<Object> result = Execution::TryCall(check_break_point,
-      isolate_->js_builtins_object(), argc, argv, &caught_exception);
+                                             isolate_->js_builtins_object(),
+                                             ARRAY_SIZE(argv),
+                                             argv,
+                                             &caught_exception);
 
   // If exception or non boolean result handle as not triggered
   if (caught_exception || !result->IsBoolean()) {
@@ -1151,7 +1151,7 @@
 
   Handle<DebugInfo> debug_info = GetDebugInfo(shared);
   // Source positions starts with zero.
-  ASSERT(source_position >= 0);
+  ASSERT(*source_position >= 0);
 
   // Find the break point and change it.
   BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
@@ -1218,7 +1218,7 @@
 
 void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
   PrepareForBreakPoints();
-  // Make sure the function has setup the debug info.
+  // Make sure the function has set up the debug info.
   if (!EnsureDebugInfo(shared)) {
     // Return if we failed to retrieve the debug info.
     return;
@@ -1233,6 +1233,18 @@
 }
 
 
+void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
+  Handle<FixedArray> new_bindings(function->function_bindings());
+  Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex));
+
+  if (!bindee.is_null() && bindee->IsJSFunction() &&
+      !JSFunction::cast(*bindee)->IsBuiltin()) {
+    Handle<SharedFunctionInfo> shared_info(JSFunction::cast(*bindee)->shared());
+    Debug::FloodWithOneShot(shared_info);
+  }
+}
+
+
 void Debug::FloodHandlerWithOneShot() {
   // Iterate through the JavaScript stack looking for handlers.
   StackFrame::Id id = break_frame_id();
@@ -1452,8 +1464,10 @@
           expressions_count - 2 - call_function_arg_count);
       if (fun->IsJSFunction()) {
         Handle<JSFunction> js_function(JSFunction::cast(fun));
-        // Don't step into builtins.
-        if (!js_function->IsBuiltin()) {
+        if (js_function->shared()->bound()) {
+          Debug::FloodBoundFunctionWithOneShot(js_function);
+        } else if (!js_function->IsBuiltin()) {
+          // Don't step into builtins.
           // It will also compile target function if it's not compiled yet.
           FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
         }
@@ -1543,40 +1557,47 @@
 
 // Find the builtin to use for invoking the debug break
 Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
+  Isolate* isolate = Isolate::Current();
+
   // Find the builtin debug break function matching the calling convention
   // used by the call site.
   if (code->is_inline_cache_stub()) {
     switch (code->kind()) {
       case Code::CALL_IC:
       case Code::KEYED_CALL_IC:
-        return ComputeCallDebugBreak(code->arguments_count(), code->kind());
+        return isolate->stub_cache()->ComputeCallDebugBreak(
+            code->arguments_count(), code->kind());
 
       case Code::LOAD_IC:
-        return Isolate::Current()->builtins()->LoadIC_DebugBreak();
+        return isolate->builtins()->LoadIC_DebugBreak();
 
       case Code::STORE_IC:
-        return Isolate::Current()->builtins()->StoreIC_DebugBreak();
+        return isolate->builtins()->StoreIC_DebugBreak();
 
       case Code::KEYED_LOAD_IC:
-        return Isolate::Current()->builtins()->KeyedLoadIC_DebugBreak();
+        return isolate->builtins()->KeyedLoadIC_DebugBreak();
 
       case Code::KEYED_STORE_IC:
-        return Isolate::Current()->builtins()->KeyedStoreIC_DebugBreak();
+        return isolate->builtins()->KeyedStoreIC_DebugBreak();
 
       default:
         UNREACHABLE();
     }
   }
   if (RelocInfo::IsConstructCall(mode)) {
-    Handle<Code> result =
-        Isolate::Current()->builtins()->ConstructCall_DebugBreak();
-    return result;
+    if (code->has_function_cache()) {
+      return isolate->builtins()->CallConstructStub_Recording_DebugBreak();
+    } else {
+      return isolate->builtins()->CallConstructStub_DebugBreak();
+    }
   }
   if (code->kind() == Code::STUB) {
     ASSERT(code->major_key() == CodeStub::CallFunction);
-    Handle<Code> result =
-        Isolate::Current()->builtins()->StubNoRegisters_DebugBreak();
-    return result;
+    if (code->has_function_cache()) {
+      return isolate->builtins()->CallFunctionStub_Recording_DebugBreak();
+    } else {
+      return isolate->builtins()->CallFunctionStub_DebugBreak();
+    }
   }
 
   UNREACHABLE();
@@ -1642,8 +1663,11 @@
   // Flood the function with one-shot break points if it is called from where
   // step into was requested.
   if (fp == step_in_fp()) {
-    // Don't allow step into functions in the native context.
-    if (!function->IsBuiltin()) {
+    if (function->shared()->bound()) {
+      // Handle Function.prototype.bind
+      Debug::FloodBoundFunctionWithOneShot(function);
+    } else if (!function->IsBuiltin()) {
+      // Don't allow step into functions in the native context.
       if (function->shared()->code() ==
           Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) ||
           function->shared()->code() ==
@@ -1726,11 +1750,289 @@
 }
 
 
+// Helper function to compile full code for debugging. This code will
+// have debug break slots and deoptimization
+// information. Deoptimization information is required in case that an
+// optimized version of this function is still activated on the
+// stack. It will also make sure that the full code is compiled with
+// the same flags as the previous version - that is flags which can
+// change the code generated. The current method of mapping from
+// already compiled full code without debug break slots to full code
+// with debug break slots depends on the generated code is otherwise
+// exactly the same.
+static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
+                                        Handle<Code> current_code) {
+  ASSERT(!current_code->has_debug_break_slots());
+
+  CompilationInfo info(shared);
+  info.MarkCompilingForDebugging(current_code);
+  ASSERT(!info.shared_info()->is_compiled());
+  ASSERT(!info.isolate()->has_pending_exception());
+
+  // Use compile lazy which will end up compiling the full code in the
+  // configuration configured above.
+  bool result = Compiler::CompileLazy(&info);
+  ASSERT(result != Isolate::Current()->has_pending_exception());
+  info.isolate()->clear_pending_exception();
+#if DEBUG
+  if (result) {
+    Handle<Code> new_code(shared->code());
+    ASSERT(new_code->has_debug_break_slots());
+    ASSERT(current_code->is_compiled_optimizable() ==
+           new_code->is_compiled_optimizable());
+  }
+#endif
+  return result;
+}
+
+
+static void CollectActiveFunctionsFromThread(
+    Isolate* isolate,
+    ThreadLocalTop* top,
+    List<Handle<JSFunction> >* active_functions,
+    Object* active_code_marker) {
+  // Find all non-optimized code functions with activation frames
+  // on the stack. This includes functions which have optimized
+  // activations (including inlined functions) on the stack as the
+  // non-optimized code is needed for the lazy deoptimization.
+  for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+    JavaScriptFrame* frame = it.frame();
+    if (frame->is_optimized()) {
+      List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
+      frame->GetFunctions(&functions);
+      for (int i = 0; i < functions.length(); i++) {
+        JSFunction* function = functions[i];
+        active_functions->Add(Handle<JSFunction>(function));
+        function->shared()->code()->set_gc_metadata(active_code_marker);
+      }
+    } else if (frame->function()->IsJSFunction()) {
+      JSFunction* function = JSFunction::cast(frame->function());
+      ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+      active_functions->Add(Handle<JSFunction>(function));
+      function->shared()->code()->set_gc_metadata(active_code_marker);
+    }
+  }
+}
+
+
+static void RedirectActivationsToRecompiledCodeOnThread(
+    Isolate* isolate,
+    ThreadLocalTop* top) {
+  for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+    JavaScriptFrame* frame = it.frame();
+
+    if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
+
+    JSFunction* function = JSFunction::cast(frame->function());
+
+    ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+
+    Handle<Code> frame_code(frame->LookupCode());
+    if (frame_code->has_debug_break_slots()) continue;
+
+    Handle<Code> new_code(function->shared()->code());
+    if (new_code->kind() != Code::FUNCTION ||
+        !new_code->has_debug_break_slots()) {
+      continue;
+    }
+
+    intptr_t delta = frame->pc() - frame_code->instruction_start();
+    int debug_break_slot_count = 0;
+    int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
+    for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
+      // Check if the pc in the new code with debug break
+      // slots is before this slot.
+      RelocInfo* info = it.rinfo();
+      int debug_break_slot_bytes =
+          debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+      intptr_t new_delta =
+          info->pc() -
+          new_code->instruction_start() -
+          debug_break_slot_bytes;
+      if (new_delta > delta) {
+        break;
+      }
+
+      // Passed a debug break slot in the full code with debug
+      // break slots.
+      debug_break_slot_count++;
+    }
+    if (frame_code->has_self_optimization_header() &&
+        !new_code->has_self_optimization_header()) {
+      delta -= FullCodeGenerator::self_optimization_header_size();
+    } else {
+      ASSERT(frame_code->has_self_optimization_header() ==
+             new_code->has_self_optimization_header());
+    }
+    int debug_break_slot_bytes =
+        debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+    if (FLAG_trace_deopt) {
+      PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
+             "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
+             "for debugging, "
+             "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
+             reinterpret_cast<intptr_t>(
+                 frame_code->instruction_start()),
+             reinterpret_cast<intptr_t>(
+                 frame_code->instruction_start()) +
+             frame_code->instruction_size(),
+             frame_code->instruction_size(),
+             reinterpret_cast<intptr_t>(new_code->instruction_start()),
+             reinterpret_cast<intptr_t>(new_code->instruction_start()) +
+             new_code->instruction_size(),
+             new_code->instruction_size(),
+             reinterpret_cast<intptr_t>(frame->pc()),
+             reinterpret_cast<intptr_t>(new_code->instruction_start()) +
+             delta + debug_break_slot_bytes);
+    }
+
+    // Patch the return address to return into the code with
+    // debug break slots.
+    frame->set_pc(
+        new_code->instruction_start() + delta + debug_break_slot_bytes);
+  }
+}
+
+
+class ActiveFunctionsCollector : public ThreadVisitor {
+ public:
+  explicit ActiveFunctionsCollector(List<Handle<JSFunction> >* active_functions,
+                                    Object* active_code_marker)
+      : active_functions_(active_functions),
+        active_code_marker_(active_code_marker) { }
+
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    CollectActiveFunctionsFromThread(isolate,
+                                     top,
+                                     active_functions_,
+                                     active_code_marker_);
+  }
+
+ private:
+  List<Handle<JSFunction> >* active_functions_;
+  Object* active_code_marker_;
+};
+
+
+class ActiveFunctionsRedirector : public ThreadVisitor {
+ public:
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    RedirectActivationsToRecompiledCodeOnThread(isolate, top);
+  }
+};
+
+
 void Debug::PrepareForBreakPoints() {
   // If preparing for the first break point make sure to deoptimize all
   // functions as debugging does not work with optimized code.
   if (!has_break_points_) {
     Deoptimizer::DeoptimizeAll();
+
+    Handle<Code> lazy_compile =
+        Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
+
+    // Keep the list of activated functions in a handlified list as it
+    // is used both in GC and non-GC code.
+    List<Handle<JSFunction> > active_functions(100);
+
+    {
+      // We are going to iterate heap to find all functions without
+      // debug break slots.
+      isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                                          "preparing for breakpoints");
+
+      // Ensure no GC in this scope as we are going to use gc_metadata
+      // field in the Code object to mark active functions.
+      AssertNoAllocation no_allocation;
+
+      Object* active_code_marker = isolate_->heap()->the_hole_value();
+
+      CollectActiveFunctionsFromThread(isolate_,
+                                       isolate_->thread_local_top(),
+                                       &active_functions,
+                                       active_code_marker);
+      ActiveFunctionsCollector active_functions_collector(&active_functions,
+                                                          active_code_marker);
+      isolate_->thread_manager()->IterateArchivedThreads(
+          &active_functions_collector);
+
+      // Scan the heap for all non-optimized functions which have no
+      // debug break slots and are not active or inlined into an active
+      // function and mark them for lazy compilation.
+      HeapIterator iterator;
+      HeapObject* obj = NULL;
+      while (((obj = iterator.next()) != NULL)) {
+        if (obj->IsJSFunction()) {
+          JSFunction* function = JSFunction::cast(obj);
+          SharedFunctionInfo* shared = function->shared();
+          if (shared->allows_lazy_compilation() &&
+              shared->script()->IsScript() &&
+              function->code()->kind() == Code::FUNCTION &&
+              !function->code()->has_debug_break_slots() &&
+              shared->code()->gc_metadata() != active_code_marker) {
+            function->set_code(*lazy_compile);
+            function->shared()->set_code(*lazy_compile);
+          }
+        }
+      }
+
+      // Clear gc_metadata field.
+      for (int i = 0; i < active_functions.length(); i++) {
+        Handle<JSFunction> function = active_functions[i];
+        function->shared()->code()->set_gc_metadata(Smi::FromInt(0));
+      }
+    }
+
+    // Now recompile all functions with activation frames and and
+    // patch the return address to run in the new compiled code.
+    for (int i = 0; i < active_functions.length(); i++) {
+      Handle<JSFunction> function = active_functions[i];
+
+      if (function->code()->kind() == Code::FUNCTION &&
+          function->code()->has_debug_break_slots()) {
+        // Nothing to do. Function code already had debug break slots.
+        continue;
+      }
+
+      Handle<SharedFunctionInfo> shared(function->shared());
+      // If recompilation is not possible just skip it.
+      if (shared->is_toplevel() ||
+          !shared->allows_lazy_compilation() ||
+          shared->code()->kind() == Code::BUILTIN) {
+        continue;
+      }
+
+      // Make sure that the shared full code is compiled with debug
+      // break slots.
+      if (!shared->code()->has_debug_break_slots()) {
+        // Try to compile the full code with debug break slots. If it
+        // fails just keep the current code.
+        Handle<Code> current_code(function->shared()->code());
+        ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
+        shared->set_code(*lazy_compile);
+        bool prev_force_debugger_active =
+            isolate_->debugger()->force_debugger_active();
+        isolate_->debugger()->set_force_debugger_active(true);
+        ASSERT(current_code->kind() == Code::FUNCTION);
+        CompileFullCodeForDebugging(shared, current_code);
+        isolate_->debugger()->set_force_debugger_active(
+            prev_force_debugger_active);
+        if (!shared->is_compiled()) {
+          shared->set_code(*current_code);
+          continue;
+        }
+      }
+
+      // Keep function code in sync with shared function info.
+      function->set_code(shared->code());
+    }
+
+    RedirectActivationsToRecompiledCodeOnThread(isolate_,
+                                                isolate_->thread_local_top());
+
+    ActiveFunctionsRedirector active_functions_redirector;
+    isolate_->thread_manager()->IterateArchivedThreads(
+          &active_functions_redirector);
   }
 }
 
@@ -1744,7 +2046,9 @@
   }
 
   // Ensure shared in compiled. Return false if this failed.
-  if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+  if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+    return false;
+  }
 
   // Create the debug info object.
   Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
@@ -1959,9 +2263,11 @@
 
   // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
   // rid of all the cached script wrappers and the second gets rid of the
-  // scripts which are no longer referenced.
-  heap->CollectAllGarbage(false);
-  heap->CollectAllGarbage(false);
+  // scripts which are no longer referenced.  The second also sweeps precisely,
+  // which saves us doing yet another GC to make the heap iterable.
+  heap->CollectAllGarbage(Heap::kNoGCFlags, "Debug::CreateScriptCache");
+  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "Debug::CreateScriptCache");
 
   ASSERT(script_cache_ == NULL);
   script_cache_ = new ScriptCache();
@@ -1969,6 +2275,8 @@
   // Scan heap for Script objects.
   int count = 0;
   HeapIterator iterator;
+  AssertNoAllocation no_allocation;
+
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
       script_cache_->Add(Handle<Script>(Script::cast(obj)));
@@ -2009,7 +2317,8 @@
 
   // Perform GC to get unreferenced scripts evicted from the cache before
   // returning the content.
-  isolate_->heap()->CollectAllGarbage(false);
+  isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+                                      "Debug::GetLoadedScripts");
 
   // Get the scripts from the cache.
   return script_cache_->GetScripts();
@@ -2031,6 +2340,7 @@
       compiling_natives_(false),
       is_loading_debugger_(false),
       never_unload_debugger_(false),
+      force_debugger_active_(false),
       message_handler_(NULL),
       debugger_unload_pending_(false),
       host_dispatch_handler_(NULL),
@@ -2055,7 +2365,8 @@
 
 
 Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
-                                      int argc, Object*** argv,
+                                      int argc,
+                                      Handle<Object> argv[],
                                       bool* caught_exception) {
   ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
 
@@ -2072,7 +2383,9 @@
   Handle<Object> js_object = Execution::TryCall(
       Handle<JSFunction>::cast(constructor),
       Handle<JSObject>(isolate_->debug()->debug_context()->global()),
-      argc, argv, caught_exception);
+      argc,
+      argv,
+      caught_exception);
   return js_object;
 }
 
@@ -2081,10 +2394,11 @@
   // Create the execution state object.
   Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
       isolate_->debug()->break_id());
-  const int argc = 1;
-  Object** argv[argc] = { break_id.location() };
+  Handle<Object> argv[] = { break_id };
   return MakeJSObject(CStrVector("MakeExecutionState"),
-                      argc, argv, caught_exception);
+                      ARRAY_SIZE(argv),
+                      argv,
+                      caught_exception);
 }
 
 
@@ -2092,11 +2406,9 @@
                                         Handle<Object> break_points_hit,
                                         bool* caught_exception) {
   // Create the new break event object.
-  const int argc = 2;
-  Object** argv[argc] = { exec_state.location(),
-                          break_points_hit.location() };
+  Handle<Object> argv[] = { exec_state, break_points_hit };
   return MakeJSObject(CStrVector("MakeBreakEvent"),
-                      argc,
+                      ARRAY_SIZE(argv),
                       argv,
                       caught_exception);
 }
@@ -2108,23 +2420,24 @@
                                             bool* caught_exception) {
   Factory* factory = isolate_->factory();
   // Create the new exception event object.
-  const int argc = 3;
-  Object** argv[argc] = { exec_state.location(),
-                          exception.location(),
-                          uncaught ? factory->true_value().location() :
-                                     factory->false_value().location()};
+  Handle<Object> argv[] = { exec_state,
+                            exception,
+                            factory->ToBoolean(uncaught) };
   return MakeJSObject(CStrVector("MakeExceptionEvent"),
-                      argc, argv, caught_exception);
+                      ARRAY_SIZE(argv),
+                      argv,
+                      caught_exception);
 }
 
 
 Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
                                               bool* caught_exception) {
   // Create the new function event object.
-  const int argc = 1;
-  Object** argv[argc] = { function.location() };
+  Handle<Object> argv[] = { function };
   return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
-                      argc, argv, caught_exception);
+                      ARRAY_SIZE(argv),
+                      argv,
+                      caught_exception);
 }
 
 
@@ -2135,14 +2448,11 @@
   // Create the compile event object.
   Handle<Object> exec_state = MakeExecutionState(caught_exception);
   Handle<Object> script_wrapper = GetScriptWrapper(script);
-  const int argc = 3;
-  Object** argv[argc] = { exec_state.location(),
-                          script_wrapper.location(),
-                          before ? factory->true_value().location() :
-                                   factory->false_value().location() };
-
+  Handle<Object> argv[] = { exec_state,
+                            script_wrapper,
+                            factory->ToBoolean(before) };
   return MakeJSObject(CStrVector("MakeCompileEvent"),
-                      argc,
+                      ARRAY_SIZE(argv),
                       argv,
                       caught_exception);
 }
@@ -2153,11 +2463,10 @@
   // Create the script collected event object.
   Handle<Object> exec_state = MakeExecutionState(caught_exception);
   Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
-  const int argc = 2;
-  Object** argv[argc] = { exec_state.location(), id_object.location() };
+  Handle<Object> argv[] = { exec_state, id_object };
 
   return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
-                      argc,
+                      ARRAY_SIZE(argv),
                       argv,
                       caught_exception);
 }
@@ -2307,12 +2616,13 @@
   Handle<JSValue> wrapper = GetScriptWrapper(script);
 
   // Call UpdateScriptBreakPoints expect no exceptions.
-  bool caught_exception = false;
-  const int argc = 1;
-  Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
+  bool caught_exception;
+  Handle<Object> argv[] = { wrapper };
   Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
-      Isolate::Current()->js_builtins_object(), argc, argv,
-      &caught_exception);
+                     Isolate::Current()->js_builtins_object(),
+                     ARRAY_SIZE(argv),
+                     argv,
+                     &caught_exception);
   if (caught_exception) {
     return;
   }
@@ -2425,7 +2735,8 @@
                                   v8::Debug::ClientData* client_data) {
   Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
   v8::Debug::EventCallback2 callback =
-      FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->address());
+      FUNCTION_CAST<v8::Debug::EventCallback2>(
+          callback_obj->foreign_address());
   EventDetailsImpl event_details(
       event,
       Handle<JSObject>::cast(exec_state),
@@ -2443,13 +2754,16 @@
   Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
 
   // Invoke the JavaScript debug event listener.
-  const int argc = 4;
-  Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
-                          exec_state.location(),
-                          Handle<Object>::cast(event_data).location(),
-                          event_listener_data_.location() };
-  bool caught_exception = false;
-  Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
+  Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)),
+                            exec_state,
+                            event_data,
+                            event_listener_data_ };
+  bool caught_exception;
+  Execution::TryCall(fun,
+                     isolate_->global(),
+                     ARRAY_SIZE(argv),
+                     argv,
+                     &caught_exception);
   // Silently ignore exceptions from debug event listeners.
 }
 
@@ -2640,7 +2954,7 @@
     command.Dispose();
 
     // Return from debug event processing if either the VM is put into the
-    // runnning state (through a continue command) or auto continue is active
+    // running state (through a continue command) or auto continue is active
     // and there are no more commands queued.
     if (running && !HasCommands()) {
       return;
@@ -2795,7 +3109,9 @@
 bool Debugger::IsDebuggerActive() {
   ScopedLock with(debugger_access_);
 
-  return message_handler_ != NULL || !event_listener_.is_null();
+  return message_handler_ != NULL ||
+      !event_listener_.is_null() ||
+      force_debugger_active_;
 }
 
 
@@ -2818,12 +3134,11 @@
     return isolate_->factory()->undefined_value();
   }
 
-  static const int kArgc = 2;
-  Object** argv[kArgc] = { exec_state.location(), data.location() };
+  Handle<Object> argv[] = { exec_state, data };
   Handle<Object> result = Execution::Call(
       fun,
       Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
-      kArgc,
+      ARRAY_SIZE(argv),
       argv,
       pending_exception);
   return result;
@@ -2849,7 +3164,7 @@
     v8::Debug::DebugBreak();
   }
 
-  if (Socket::Setup()) {
+  if (Socket::SetUp()) {
     if (agent_ == NULL) {
       agent_ = new DebuggerAgent(name, port);
       agent_->Start();
@@ -2891,6 +3206,94 @@
 }
 
 
+EnterDebugger::EnterDebugger()
+    : isolate_(Isolate::Current()),
+      prev_(isolate_->debug()->debugger_entry()),
+      it_(isolate_),
+      has_js_frames_(!it_.done()),
+      save_(isolate_) {
+  Debug* debug = isolate_->debug();
+  ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
+  ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
+
+  // Link recursive debugger entry.
+  debug->set_debugger_entry(this);
+
+  // Store the previous break id and frame id.
+  break_id_ = debug->break_id();
+  break_frame_id_ = debug->break_frame_id();
+
+  // Create the new break info. If there is no JavaScript frames there is no
+  // break frame id.
+  if (has_js_frames_) {
+    debug->NewBreak(it_.frame()->id());
+  } else {
+    debug->NewBreak(StackFrame::NO_ID);
+  }
+
+  // Make sure that debugger is loaded and enter the debugger context.
+  load_failed_ = !debug->Load();
+  if (!load_failed_) {
+    // NOTE the member variable save which saves the previous context before
+    // this change.
+    isolate_->set_context(*debug->debug_context());
+  }
+}
+
+
+EnterDebugger::~EnterDebugger() {
+  ASSERT(Isolate::Current() == isolate_);
+  Debug* debug = isolate_->debug();
+
+  // Restore to the previous break state.
+  debug->SetBreak(break_frame_id_, break_id_);
+
+  // Check for leaving the debugger.
+  if (!load_failed_ && prev_ == NULL) {
+    // Clear mirror cache when leaving the debugger. Skip this if there is a
+    // pending exception as clearing the mirror cache calls back into
+    // JavaScript. This can happen if the v8::Debug::Call is used in which
+    // case the exception should end up in the calling code.
+    if (!isolate_->has_pending_exception()) {
+      // Try to avoid any pending debug break breaking in the clear mirror
+      // cache JavaScript code.
+      if (isolate_->stack_guard()->IsDebugBreak()) {
+        debug->set_interrupts_pending(DEBUGBREAK);
+        isolate_->stack_guard()->Continue(DEBUGBREAK);
+      }
+      debug->ClearMirrorCache();
+    }
+
+    // Request preemption and debug break when leaving the last debugger entry
+    // if any of these where recorded while debugging.
+    if (debug->is_interrupt_pending(PREEMPT)) {
+      // This re-scheduling of preemption is to avoid starvation in some
+      // debugging scenarios.
+      debug->clear_interrupt_pending(PREEMPT);
+      isolate_->stack_guard()->Preempt();
+    }
+    if (debug->is_interrupt_pending(DEBUGBREAK)) {
+      debug->clear_interrupt_pending(DEBUGBREAK);
+      isolate_->stack_guard()->DebugBreak();
+    }
+
+    // If there are commands in the queue when leaving the debugger request
+    // that these commands are processed.
+    if (isolate_->debugger()->HasCommands()) {
+      isolate_->stack_guard()->DebugCommand();
+    }
+
+    // If leaving the debugger with the debugger no longer active unload it.
+    if (!isolate_->debugger()->IsDebuggerActive()) {
+      isolate_->debugger()->UnloadDebugger();
+    }
+  }
+
+  // Leaving this debugger entry.
+  debug->set_debugger_entry(prev_);
+}
+
+
 MessageImpl MessageImpl::NewEvent(DebugEvent event,
                                   bool running,
                                   Handle<JSObject> exec_state,
diff --git a/src/debug.h b/src/debug.h
index a5083eb..474b90b 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -224,7 +224,7 @@
 // DebugInfo.
 class Debug {
  public:
-  void Setup(bool create_heap_objects);
+  void SetUp(bool create_heap_objects);
   bool Load();
   void Unload();
   bool IsLoaded() { return !debug_context_.is_null(); }
@@ -239,6 +239,7 @@
   void ClearBreakPoint(Handle<Object> break_point_object);
   void ClearAllBreakPoints();
   void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
+  void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
   void FloodHandlerWithOneShot();
   void ChangeBreakOnException(ExceptionBreakType type, bool enable);
   bool IsBreakOnException(ExceptionBreakType type);
@@ -402,9 +403,11 @@
   static void GenerateStoreICDebugBreak(MacroAssembler* masm);
   static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
   static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
-  static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
   static void GenerateReturnDebugBreak(MacroAssembler* masm);
-  static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
+  static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
+  static void GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm);
+  static void GenerateCallConstructStubDebugBreak(MacroAssembler* masm);
+  static void GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm);
   static void GenerateSlotDebugBreak(MacroAssembler* masm);
   static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
 
@@ -707,7 +710,8 @@
   void DebugRequest(const uint16_t* json_request, int length);
 
   Handle<Object> MakeJSObject(Vector<const char> constructor_name,
-                              int argc, Object*** argv,
+                              int argc,
+                              Handle<Object> argv[],
                               bool* caught_exception);
   Handle<Object> MakeExecutionState(bool* caught_exception);
   Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
@@ -811,11 +815,15 @@
   }
 
   void set_compiling_natives(bool compiling_natives) {
-    Debugger::compiling_natives_ = compiling_natives;
+    compiling_natives_ = compiling_natives;
   }
   bool compiling_natives() const { return compiling_natives_; }
   void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
   bool is_loading_debugger() const { return is_loading_debugger_; }
+  void set_force_debugger_active(bool force_debugger_active) {
+    force_debugger_active_ = force_debugger_active;
+  }
+  bool force_debugger_active() const { return force_debugger_active_; }
 
   bool IsDebuggerActive();
 
@@ -841,6 +849,7 @@
   bool compiling_natives_;  // Are we compiling natives?
   bool is_loading_debugger_;  // Are we loading the debugger?
   bool never_unload_debugger_;  // Can we unload the debugger?
+  bool force_debugger_active_;  // Activate debugger without event listeners.
   v8::Debug::MessageHandler2 message_handler_;
   bool debugger_unload_pending_;  // Was message handler cleared?
   v8::Debug::HostDispatchHandler host_dispatch_handler_;
@@ -871,91 +880,8 @@
 // some reason could not be entered FailedToEnter will return true.
 class EnterDebugger BASE_EMBEDDED {
  public:
-  EnterDebugger()
-      : isolate_(Isolate::Current()),
-        prev_(isolate_->debug()->debugger_entry()),
-        it_(isolate_),
-        has_js_frames_(!it_.done()),
-        save_(isolate_) {
-    Debug* debug = isolate_->debug();
-    ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
-    ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
-
-    // Link recursive debugger entry.
-    debug->set_debugger_entry(this);
-
-    // Store the previous break id and frame id.
-    break_id_ = debug->break_id();
-    break_frame_id_ = debug->break_frame_id();
-
-    // Create the new break info. If there is no JavaScript frames there is no
-    // break frame id.
-    if (has_js_frames_) {
-      debug->NewBreak(it_.frame()->id());
-    } else {
-      debug->NewBreak(StackFrame::NO_ID);
-    }
-
-    // Make sure that debugger is loaded and enter the debugger context.
-    load_failed_ = !debug->Load();
-    if (!load_failed_) {
-      // NOTE the member variable save which saves the previous context before
-      // this change.
-      isolate_->set_context(*debug->debug_context());
-    }
-  }
-
-  ~EnterDebugger() {
-    ASSERT(Isolate::Current() == isolate_);
-    Debug* debug = isolate_->debug();
-
-    // Restore to the previous break state.
-    debug->SetBreak(break_frame_id_, break_id_);
-
-    // Check for leaving the debugger.
-    if (prev_ == NULL) {
-      // Clear mirror cache when leaving the debugger. Skip this if there is a
-      // pending exception as clearing the mirror cache calls back into
-      // JavaScript. This can happen if the v8::Debug::Call is used in which
-      // case the exception should end up in the calling code.
-      if (!isolate_->has_pending_exception()) {
-        // Try to avoid any pending debug break breaking in the clear mirror
-        // cache JavaScript code.
-        if (isolate_->stack_guard()->IsDebugBreak()) {
-          debug->set_interrupts_pending(DEBUGBREAK);
-          isolate_->stack_guard()->Continue(DEBUGBREAK);
-        }
-        debug->ClearMirrorCache();
-      }
-
-      // Request preemption and debug break when leaving the last debugger entry
-      // if any of these where recorded while debugging.
-      if (debug->is_interrupt_pending(PREEMPT)) {
-        // This re-scheduling of preemption is to avoid starvation in some
-        // debugging scenarios.
-        debug->clear_interrupt_pending(PREEMPT);
-        isolate_->stack_guard()->Preempt();
-      }
-      if (debug->is_interrupt_pending(DEBUGBREAK)) {
-        debug->clear_interrupt_pending(DEBUGBREAK);
-        isolate_->stack_guard()->DebugBreak();
-      }
-
-      // If there are commands in the queue when leaving the debugger request
-      // that these commands are processed.
-      if (isolate_->debugger()->HasCommands()) {
-        isolate_->stack_guard()->DebugCommand();
-      }
-
-      // If leaving the debugger with the debugger no longer active unload it.
-      if (!isolate_->debugger()->IsDebuggerActive()) {
-        isolate_->debugger()->UnloadDebugger();
-      }
-    }
-
-    // Leaving this debugger entry.
-    debug->set_debugger_entry(prev_);
-  }
+  EnterDebugger();
+  ~EnterDebugger();
 
   // Check whether the debugger could be entered.
   inline bool FailedToEnter() { return load_failed_; }
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 0ada28b..2a30ddd 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,11 +52,13 @@
 
 DeoptimizerData::~DeoptimizerData() {
   if (eager_deoptimization_entry_code_ != NULL) {
-    eager_deoptimization_entry_code_->Free(EXECUTABLE);
+    Isolate::Current()->memory_allocator()->Free(
+        eager_deoptimization_entry_code_);
     eager_deoptimization_entry_code_ = NULL;
   }
   if (lazy_deoptimization_entry_code_ != NULL) {
-    lazy_deoptimization_entry_code_->Free(EXECUTABLE);
+    Isolate::Current()->memory_allocator()->Free(
+        lazy_deoptimization_entry_code_);
     lazy_deoptimization_entry_code_ = NULL;
   }
 }
@@ -71,6 +73,8 @@
 #endif
 
 
+// We rely on this function not causing a GC.  It is called from generated code
+// without having a real stack frame in place.
 Deoptimizer* Deoptimizer::New(JSFunction* function,
                               BailoutType type,
                               unsigned bailout_id,
@@ -100,10 +104,27 @@
   return result;
 }
 
+
+int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) {
+  if (jsframe_index == 0) return 0;
+
+  int frame_index = 0;
+  while (jsframe_index >= 0) {
+    FrameDescription* frame = output_[frame_index];
+    if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) {
+      jsframe_index--;
+    }
+    frame_index++;
+  }
+
+  return frame_index - 1;
+}
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
     JavaScriptFrame* frame,
-    int frame_index,
+    int jsframe_index,
     Isolate* isolate) {
   ASSERT(isolate == Isolate::Current());
   ASSERT(frame->is_optimized());
@@ -139,22 +160,48 @@
 
   // Create the GC safe output frame information and register it for GC
   // handling.
-  ASSERT_LT(frame_index, deoptimizer->output_count());
-  DeoptimizedFrameInfo* info =
-      new DeoptimizedFrameInfo(deoptimizer, frame_index);
+  ASSERT_LT(jsframe_index, deoptimizer->jsframe_count());
+
+  // Convert JS frame index into frame index.
+  int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
+
+  bool has_arguments_adaptor =
+      frame_index > 0 &&
+      deoptimizer->output_[frame_index - 1]->GetFrameType() ==
+      StackFrame::ARGUMENTS_ADAPTOR;
+
+  int construct_offset = has_arguments_adaptor ? 2 : 1;
+  bool has_construct_stub =
+      frame_index >= construct_offset &&
+      deoptimizer->output_[frame_index - construct_offset]->GetFrameType() ==
+      StackFrame::CONSTRUCT;
+
+  DeoptimizedFrameInfo* info = new DeoptimizedFrameInfo(deoptimizer,
+                                                        frame_index,
+                                                        has_arguments_adaptor,
+                                                        has_construct_stub);
   isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
 
   // Get the "simulated" top and size for the requested frame.
-  Address top =
-      reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop());
-  uint32_t size = deoptimizer->output_[frame_index]->GetFrameSize();
+  FrameDescription* parameters_frame =
+      deoptimizer->output_[
+          has_arguments_adaptor ? (frame_index - 1) : frame_index];
+
+  uint32_t parameters_size = (info->parameters_count() + 1) * kPointerSize;
+  Address parameters_top = reinterpret_cast<Address>(
+      parameters_frame->GetTop() + (parameters_frame->GetFrameSize() -
+                                    parameters_size));
+
+  uint32_t expressions_size = info->expression_count() * kPointerSize;
+  Address expressions_top = reinterpret_cast<Address>(
+      deoptimizer->output_[frame_index]->GetTop());
 
   // Done with the GC-unsafe frame descriptions. This re-enables allocation.
   deoptimizer->DeleteFrameDescriptions();
 
   // Allocate a heap number for the doubles belonging to this frame.
   deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
-      top, size, info);
+      parameters_top, parameters_size, expressions_top, expressions_size, info);
 
   // Finished using the deoptimizer instance.
   delete deoptimizer;
@@ -260,11 +307,16 @@
   AssertNoAllocation no_allocation;
 
   // Run through the list of all global contexts and deoptimize.
-  Object* global = Isolate::Current()->heap()->global_contexts_list();
-  while (!global->IsUndefined()) {
-    VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
-                                              visitor);
-    global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK);
+  Object* context = Isolate::Current()->heap()->global_contexts_list();
+  while (!context->IsUndefined()) {
+    // GC can happen when the context is not fully initialized,
+    // so the global field of the context can be undefined.
+    Object* global = Context::cast(context)->get(Context::GLOBAL_INDEX);
+    if (!global->IsUndefined()) {
+      VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global),
+                                                visitor);
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
   }
 }
 
@@ -304,6 +356,7 @@
       fp_to_sp_delta_(fp_to_sp_delta),
       input_(NULL),
       output_count_(0),
+      jsframe_count_(0),
       output_(NULL),
       deferred_heap_numbers_(0) {
   if (FLAG_trace_deopt && type != OSR) {
@@ -329,6 +382,26 @@
   if (type == EAGER) {
     ASSERT(from == NULL);
     optimized_code_ = function_->code();
+    if (FLAG_trace_deopt && FLAG_code_comments) {
+      // Print instruction associated with this bailout.
+      const char* last_comment = NULL;
+      int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
+          | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+      for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
+        RelocInfo* info = it.rinfo();
+        if (info->rmode() == RelocInfo::COMMENT) {
+          last_comment = reinterpret_cast<const char*>(info->data());
+        }
+        if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
+          unsigned id = Deoptimizer::GetDeoptimizationId(
+              info->target_address(), Deoptimizer::EAGER);
+          if (id == bailout_id && last_comment != NULL) {
+            PrintF("            %s\n", last_comment);
+            break;
+          }
+        }
+      }
+    }
   } else if (type == LAZY) {
     optimized_code_ = FindDeoptimizingCodeFromAddress(from);
     ASSERT(optimized_code_ != NULL);
@@ -346,9 +419,7 @@
   ASSERT(HEAP->allow_allocation(false));
   unsigned size = ComputeInputFrameSize();
   input_ = new(size) FrameDescription(size, function);
-#ifdef DEBUG
-  input_->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+  input_->SetFrameType(StackFrame::JAVA_SCRIPT);
 }
 
 
@@ -372,7 +443,7 @@
 Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
   ASSERT(id >= 0);
   if (id >= kNumberOfEntries) return NULL;
-  LargeObjectChunk* base = NULL;
+  MemoryChunk* base = NULL;
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   if (type == EAGER) {
     if (data->eager_deoptimization_entry_code_ == NULL) {
@@ -386,12 +457,12 @@
     base = data->lazy_deoptimization_entry_code_;
   }
   return
-      static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
+      static_cast<Address>(base->area_start()) + (id * table_entry_size_);
 }
 
 
 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
-  LargeObjectChunk* base = NULL;
+  MemoryChunk* base = NULL;
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   if (type == EAGER) {
     base = data->eager_deoptimization_entry_code_;
@@ -399,14 +470,14 @@
     base = data->lazy_deoptimization_entry_code_;
   }
   if (base == NULL ||
-      addr < base->GetStartAddress() ||
-      addr >= base->GetStartAddress() +
+      addr < base->area_start() ||
+      addr >= base->area_start() +
           (kNumberOfEntries * table_entry_size_)) {
     return kNotDeoptimizationEntry;
   }
   ASSERT_EQ(0,
-      static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
-  return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
+      static_cast<int>(addr - base->area_start()) % table_entry_size_);
+  return static_cast<int>(addr - base->area_start()) / table_entry_size_;
 }
 
 
@@ -448,6 +519,8 @@
 }
 
 
+// We rely on this function not causing a GC.  It is called from generated code
+// without having a real stack frame in place.
 void Deoptimizer::DoComputeOutputFrames() {
   if (bailout_type_ == OSR) {
     DoComputeOsrOutputFrame();
@@ -482,6 +555,7 @@
   // Read the number of output frames and allocate an array for their
   // descriptions.
   int count = iterator.Next();
+  iterator.Next();  // Drop JS frames count.
   ASSERT(output_ == NULL);
   output_ = new FrameDescription*[count];
   for (int i = 0; i < count; ++i) {
@@ -491,7 +565,24 @@
 
   // Translate each output frame.
   for (int i = 0; i < count; ++i) {
-    DoComputeFrame(&iterator, i);
+    // Read the ast node id, function, and frame height for this output frame.
+    Translation::Opcode opcode =
+        static_cast<Translation::Opcode>(iterator.Next());
+    switch (opcode) {
+      case Translation::JS_FRAME:
+        DoComputeJSFrame(&iterator, i);
+        jsframe_count_++;
+        break;
+      case Translation::ARGUMENTS_ADAPTOR_FRAME:
+        DoComputeArgumentsAdaptorFrame(&iterator, i);
+        break;
+      case Translation::CONSTRUCT_STUB_FRAME:
+        DoComputeConstructStubFrame(&iterator, i);
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
   }
 
   // Print some helpful diagnostic information.
@@ -532,39 +623,52 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
-    Address top, uint32_t size, DeoptimizedFrameInfo* info) {
+    Address parameters_top,
+    uint32_t parameters_size,
+    Address expressions_top,
+    uint32_t expressions_size,
+    DeoptimizedFrameInfo* info) {
   ASSERT_EQ(DEBUGGER, bailout_type_);
+  Address parameters_bottom = parameters_top + parameters_size;
+  Address expressions_bottom = expressions_top + expressions_size;
   for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
     HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
 
     // Check of the heap number to materialize actually belong to the frame
     // being extracted.
     Address slot = d.slot_address();
-    if (top <= slot && slot < top + size) {
+    if (parameters_top <= slot && slot < parameters_bottom) {
       Handle<Object> num = isolate_->factory()->NewNumber(d.value());
-      // Calculate the index with the botton of the expression stack
-      // at index 0, and the fixed part (including incoming arguments)
-      // at negative indexes.
-      int index = static_cast<int>(
-          info->expression_count_ - (slot - top) / kPointerSize - 1);
+
+      int index = (info->parameters_count() - 1) -
+          static_cast<int>(slot - parameters_top) / kPointerSize;
+
       if (FLAG_trace_deopt) {
         PrintF("Materializing a new heap number %p [%e] in slot %p"
-               "for stack index %d\n",
+               "for parameter slot #%d\n",
                reinterpret_cast<void*>(*num),
                d.value(),
                d.slot_address(),
                index);
       }
-      if (index >=0) {
-        info->SetExpression(index, *num);
-      } else {
-        // Calculate parameter index subtracting one for the receiver.
-        int parameter_index =
-            index +
-            static_cast<int>(size) / kPointerSize -
-            info->expression_count_ - 1;
-        info->SetParameter(parameter_index, *num);
+
+      info->SetParameter(index, *num);
+    } else if (expressions_top <= slot && slot < expressions_bottom) {
+      Handle<Object> num = isolate_->factory()->NewNumber(d.value());
+
+      int index = info->expression_count() - 1 -
+          static_cast<int>(slot - expressions_top) / kPointerSize;
+
+      if (FLAG_trace_deopt) {
+        PrintF("Materializing a new heap number %p [%e] in slot %p"
+               "for expression slot #%d\n",
+               reinterpret_cast<void*>(*num),
+               d.value(),
+               d.slot_address(),
+               index);
       }
+
+      info->SetExpression(index, *num);
     }
   }
 }
@@ -589,7 +693,9 @@
 
   switch (opcode) {
     case Translation::BEGIN:
-    case Translation::FRAME:
+    case Translation::JS_FRAME:
+    case Translation::ARGUMENTS_ADAPTOR_FRAME:
+    case Translation::CONSTRUCT_STUB_FRAME:
     case Translation::DUPLICATE:
       UNREACHABLE();
       return;
@@ -599,11 +705,13 @@
       intptr_t input_value = input_->GetRegister(input_reg);
       if (FLAG_trace_deopt) {
         PrintF(
-            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
+            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
             output_[frame_index]->GetTop() + output_offset,
             output_offset,
             input_value,
             converter.NameOfCPURegister(input_reg));
+        reinterpret_cast<Object*>(input_value)->ShortPrint();
+        PrintF("\n");
       }
       output_[frame_index]->SetFrameSlot(output_offset, input_value);
       return;
@@ -656,15 +764,17 @@
     case Translation::STACK_SLOT: {
       int input_slot_index = iterator->Next();
       unsigned input_offset =
-          input_->GetOffsetFromSlotIndex(this, input_slot_index);
+          input_->GetOffsetFromSlotIndex(input_slot_index);
       intptr_t input_value = input_->GetFrameSlot(input_offset);
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": ",
                output_[frame_index]->GetTop() + output_offset);
-        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
+        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
                output_offset,
                input_value,
                input_offset);
+        reinterpret_cast<Object*>(input_value)->ShortPrint();
+        PrintF("\n");
       }
       output_[frame_index]->SetFrameSlot(output_offset, input_value);
       return;
@@ -673,7 +783,7 @@
     case Translation::INT32_STACK_SLOT: {
       int input_slot_index = iterator->Next();
       unsigned input_offset =
-          input_->GetOffsetFromSlotIndex(this, input_slot_index);
+          input_->GetOffsetFromSlotIndex(input_slot_index);
       intptr_t value = input_->GetFrameSlot(input_offset);
       bool is_smi = Smi::IsValid(value);
       if (FLAG_trace_deopt) {
@@ -702,7 +812,7 @@
     case Translation::DOUBLE_STACK_SLOT: {
       int input_slot_index = iterator->Next();
       unsigned input_offset =
-          input_->GetOffsetFromSlotIndex(this, input_slot_index);
+          input_->GetOffsetFromSlotIndex(input_slot_index);
       double value = input_->GetDoubleFrameSlot(input_offset);
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
@@ -735,7 +845,6 @@
     case Translation::ARGUMENTS_OBJECT: {
       // Use the arguments marker value as a sentinel and fill in the arguments
       // object after the deoptimized frame is built.
-      ASSERT(frame_index == 0);  // Only supported for first frame.
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- ",
                output_[frame_index]->GetTop() + output_offset,
@@ -771,7 +880,9 @@
 
   switch (opcode) {
     case Translation::BEGIN:
-    case Translation::FRAME:
+    case Translation::JS_FRAME:
+    case Translation::ARGUMENTS_ADAPTOR_FRAME:
+    case Translation::CONSTRUCT_STUB_FRAME:
     case Translation::DUPLICATE:
       UNREACHABLE();  // Malformed input.
        return false;
@@ -834,12 +945,14 @@
     case Translation::STACK_SLOT: {
       int output_index = iterator->Next();
       unsigned output_offset =
-          output->GetOffsetFromSlotIndex(this, output_index);
+          output->GetOffsetFromSlotIndex(output_index);
       if (FLAG_trace_osr) {
-        PrintF("    [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
+        PrintF("    [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
                output_offset,
                input_value,
                *input_offset);
+        reinterpret_cast<Object*>(input_value)->ShortPrint();
+        PrintF("\n");
       }
       output->SetFrameSlot(output_offset, input_value);
       break;
@@ -851,7 +964,7 @@
 
       int output_index = iterator->Next();
       unsigned output_offset =
-          output->GetOffsetFromSlotIndex(this, output_index);
+          output->GetOffsetFromSlotIndex(output_index);
       int int32_value = input_object->IsSmi()
           ? Smi::cast(input_object)->value()
           : DoubleToInt32(input_object->Number());
@@ -883,7 +996,7 @@
 
       int output_index = iterator->Next();
       unsigned output_offset =
-          output->GetOffsetFromSlotIndex(this, output_index);
+          output->GetOffsetFromSlotIndex(output_index);
       double double_value = input_object->Number();
       uint64_t int_value = BitCast<uint64_t, double>(double_value);
       int32_t lower = static_cast<int32_t>(int_value);
@@ -939,7 +1052,10 @@
   for (uint32_t i = 0; i < table_length; ++i) {
     uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
     Address pc_after = unoptimized_code->instruction_start() + pc_offset;
-    PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
+    PatchStackCheckCodeAt(unoptimized_code,
+                          pc_after,
+                          check_code,
+                          replacement_code);
     stack_check_cursor += 2 * kIntSize;
   }
 }
@@ -958,7 +1074,10 @@
   for (uint32_t i = 0; i < table_length; ++i) {
     uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
     Address pc_after = unoptimized_code->instruction_start() + pc_offset;
-    RevertStackCheckCodeAt(pc_after, check_code, replacement_code);
+    RevertStackCheckCodeAt(unoptimized_code,
+                           pc_after,
+                           check_code,
+                           replacement_code);
     stack_check_cursor += 2 * kIntSize;
   }
 }
@@ -988,8 +1107,8 @@
 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
   // The fixed part of the frame consists of the return address, frame
   // pointer, function, context, and all the incoming arguments.
-  static const unsigned kFixedSlotSize = 4 * kPointerSize;
-  return ComputeIncomingArgumentSize(function) + kFixedSlotSize;
+  return ComputeIncomingArgumentSize(function) +
+      StandardFrameConstants::kFixedFrameSize;
 }
 
 
@@ -1025,7 +1144,7 @@
 }
 
 
-LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
+MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
   // We cannot run this if the serializer is enabled because this will
   // cause us to emit relocation information for the external
   // references. This is fine because the deoptimizer's code section
@@ -1039,12 +1158,16 @@
   masm.GetCode(&desc);
   ASSERT(desc.reloc_size == 0);
 
-  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+  MemoryChunk* chunk =
+      Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
+                                                            EXECUTABLE,
+                                                            NULL);
+  ASSERT(chunk->area_size() >= desc.instr_size);
   if (chunk == NULL) {
     V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
   }
-  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
-  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
+  memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->area_start(), desc.instr_size);
   return chunk;
 }
 
@@ -1093,7 +1216,8 @@
       function_(function),
       top_(kZapUint32),
       pc_(kZapUint32),
-      fp_(kZapUint32) {
+      fp_(kZapUint32),
+      context_(kZapUint32) {
   // Zap all the registers.
   for (int r = 0; r < Register::kNumRegisters; r++) {
     SetRegister(r, kZapUint32);
@@ -1106,49 +1230,62 @@
 }
 
 
-unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
-                                                  int slot_index) {
+int FrameDescription::ComputeFixedSize() {
+  return StandardFrameConstants::kFixedFrameSize +
+      (ComputeParametersCount() + 1) * kPointerSize;
+}
+
+
+unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) {
   if (slot_index >= 0) {
     // Local or spill slots. Skip the fixed part of the frame
     // including all arguments.
-    unsigned base =
-        GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
+    unsigned base = GetFrameSize() - ComputeFixedSize();
     return base - ((slot_index + 1) * kPointerSize);
   } else {
     // Incoming parameter.
-    unsigned base = GetFrameSize() -
-        deoptimizer->ComputeIncomingArgumentSize(GetFunction());
+    int arg_size = (ComputeParametersCount() + 1) * kPointerSize;
+    unsigned base = GetFrameSize() - arg_size;
     return base - ((slot_index + 1) * kPointerSize);
   }
 }
 
 
 int FrameDescription::ComputeParametersCount() {
-  return function_->shared()->formal_parameter_count();
+  switch (type_) {
+    case StackFrame::JAVA_SCRIPT:
+      return function_->shared()->formal_parameter_count();
+    case StackFrame::ARGUMENTS_ADAPTOR: {
+      // Last slot contains number of incomming arguments as a smi.
+      // Can't use GetExpression(0) because it would cause infinite recursion.
+      return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
+    }
+    default:
+      UNREACHABLE();
+      return 0;
+  }
 }
 
 
-Object* FrameDescription::GetParameter(Deoptimizer* deoptimizer, int index) {
-  ASSERT_EQ(Code::FUNCTION, kind_);
+Object* FrameDescription::GetParameter(int index) {
   ASSERT(index >= 0);
   ASSERT(index < ComputeParametersCount());
   // The slot indexes for incoming arguments are negative.
-  unsigned offset = GetOffsetFromSlotIndex(deoptimizer,
-                                           index - ComputeParametersCount());
+  unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
   return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
 }
 
 
-unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) {
-  ASSERT_EQ(Code::FUNCTION, kind_);
-  unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
+unsigned FrameDescription::GetExpressionCount() {
+  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
+  unsigned size = GetFrameSize() - ComputeFixedSize();
   return size / kPointerSize;
 }
 
 
-Object* FrameDescription::GetExpression(Deoptimizer* deoptimizer, int index) {
-  ASSERT_EQ(Code::FUNCTION, kind_);
-  unsigned offset = GetOffsetFromSlotIndex(deoptimizer, index);
+Object* FrameDescription::GetExpression(int index) {
+  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
+  unsigned offset = GetOffsetFromSlotIndex(index);
   return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
 }
 
@@ -1194,8 +1331,22 @@
 }
 
 
-void Translation::BeginFrame(int node_id, int literal_id, unsigned height) {
-  buffer_->Add(FRAME);
+void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
+  buffer_->Add(CONSTRUCT_STUB_FRAME);
+  buffer_->Add(literal_id);
+  buffer_->Add(height);
+}
+
+
+void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
+  buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
+  buffer_->Add(literal_id);
+  buffer_->Add(height);
+}
+
+
+void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) {
+  buffer_->Add(JS_FRAME);
   buffer_->Add(node_id);
   buffer_->Add(literal_id);
   buffer_->Add(height);
@@ -1259,7 +1410,6 @@
     case ARGUMENTS_OBJECT:
     case DUPLICATE:
       return 0;
-    case BEGIN:
     case REGISTER:
     case INT32_REGISTER:
     case DOUBLE_REGISTER:
@@ -1268,7 +1418,11 @@
     case DOUBLE_STACK_SLOT:
     case LITERAL:
       return 1;
-    case FRAME:
+    case BEGIN:
+    case ARGUMENTS_ADAPTOR_FRAME:
+    case CONSTRUCT_STUB_FRAME:
+      return 2;
+    case JS_FRAME:
       return 3;
   }
   UNREACHABLE();
@@ -1282,8 +1436,12 @@
   switch (opcode) {
     case BEGIN:
       return "BEGIN";
-    case FRAME:
-      return "FRAME";
+    case JS_FRAME:
+      return "JS_FRAME";
+    case ARGUMENTS_ADAPTOR_FRAME:
+      return "ARGUMENTS_ADAPTOR_FRAME";
+    case CONSTRUCT_STUB_FRAME:
+      return "CONSTRUCT_STUB_FRAME";
     case REGISTER:
       return "REGISTER";
     case INT32_REGISTER:
@@ -1337,7 +1495,9 @@
 
   switch (opcode) {
     case Translation::BEGIN:
-    case Translation::FRAME:
+    case Translation::JS_FRAME:
+    case Translation::ARGUMENTS_ADAPTOR_FRAME:
+    case Translation::CONSTRUCT_STUB_FRAME:
       // Peeled off before getting here.
       break;
 
@@ -1383,9 +1543,27 @@
 }
 
 
-void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
-                                             int inlined_frame_index,
-                                             Vector<SlotRef>* args_slots) {
+void SlotRef::ComputeSlotsForArguments(Vector<SlotRef>* args_slots,
+                                       TranslationIterator* it,
+                                       DeoptimizationInputData* data,
+                                       JavaScriptFrame* frame) {
+  // Process the translation commands for the arguments.
+
+  // Skip the translation command for the receiver.
+  it->Skip(Translation::NumberOfOperandsFor(
+      static_cast<Translation::Opcode>(it->Next())));
+
+  // Compute slots for arguments.
+  for (int i = 0; i < args_slots->length(); ++i) {
+    (*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame);
+  }
+}
+
+
+Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
+    JavaScriptFrame* frame,
+    int inlined_jsframe_index,
+    int formal_parameter_count) {
   AssertNoAllocation no_gc;
   int deopt_index = AstNode::kNoNumber;
   DeoptimizationInputData* data =
@@ -1394,51 +1572,81 @@
                          data->TranslationIndex(deopt_index)->value());
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
   ASSERT(opcode == Translation::BEGIN);
-  int frame_count = it.Next();
-  USE(frame_count);
-  ASSERT(frame_count > inlined_frame_index);
-  int frames_to_skip = inlined_frame_index;
+  it.Next();  // Drop frame count.
+  int jsframe_count = it.Next();
+  USE(jsframe_count);
+  ASSERT(jsframe_count > inlined_jsframe_index);
+  int jsframes_to_skip = inlined_jsframe_index;
   while (true) {
     opcode = static_cast<Translation::Opcode>(it.Next());
-    // Skip over operands to advance to the next opcode.
-    it.Skip(Translation::NumberOfOperandsFor(opcode));
-    if (opcode == Translation::FRAME) {
-      if (frames_to_skip == 0) {
+    if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
+      if (jsframes_to_skip == 0) {
+        ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
+
+        it.Skip(1);  // literal id
+        int height = it.Next();
+
+        // We reached the arguments adaptor frame corresponding to the
+        // inlined function in question.  Number of arguments is height - 1.
+        Vector<SlotRef> args_slots =
+            Vector<SlotRef>::New(height - 1);  // Minus receiver.
+        ComputeSlotsForArguments(&args_slots, &it, data, frame);
+        return args_slots;
+      }
+    } else if (opcode == Translation::JS_FRAME) {
+      if (jsframes_to_skip == 0) {
+        // Skip over operands to advance to the next opcode.
+        it.Skip(Translation::NumberOfOperandsFor(opcode));
+
         // We reached the frame corresponding to the inlined function
         // in question.  Process the translation commands for the
-        // arguments.
-        //
-        // Skip the translation command for the receiver.
-        it.Skip(Translation::NumberOfOperandsFor(
-            static_cast<Translation::Opcode>(it.Next())));
-        // Compute slots for arguments.
-        for (int i = 0; i < args_slots->length(); ++i) {
-          (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
-        }
-        return;
+        // arguments.  Number of arguments is equal to the number of
+        // format parameter count.
+        Vector<SlotRef> args_slots =
+            Vector<SlotRef>::New(formal_parameter_count);
+        ComputeSlotsForArguments(&args_slots, &it, data, frame);
+        return args_slots;
       }
-      frames_to_skip--;
+      jsframes_to_skip--;
     }
+
+    // Skip over operands to advance to the next opcode.
+    it.Skip(Translation::NumberOfOperandsFor(opcode));
   }
 
   UNREACHABLE();
+  return Vector<SlotRef>();
 }
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
-DeoptimizedFrameInfo::DeoptimizedFrameInfo(
-    Deoptimizer* deoptimizer, int frame_index) {
+DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
+                                           int frame_index,
+                                           bool has_arguments_adaptor,
+                                           bool has_construct_stub) {
   FrameDescription* output_frame = deoptimizer->output_[frame_index];
-  SetFunction(output_frame->GetFunction());
-  expression_count_ = output_frame->GetExpressionCount(deoptimizer);
+  function_ = output_frame->GetFunction();
+  has_construct_stub_ = has_construct_stub;
+  expression_count_ = output_frame->GetExpressionCount();
+  expression_stack_ = new Object*[expression_count_];
+  // Get the source position using the unoptimized code.
+  Address pc = reinterpret_cast<Address>(output_frame->GetPc());
+  Code* code = Code::cast(Isolate::Current()->heap()->FindCodeObject(pc));
+  source_position_ = code->SourcePosition(pc);
+
+  for (int i = 0; i < expression_count_; i++) {
+    SetExpression(i, output_frame->GetExpression(i));
+  }
+
+  if (has_arguments_adaptor) {
+    output_frame = deoptimizer->output_[frame_index - 1];
+    ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR);
+  }
+
   parameters_count_ = output_frame->ComputeParametersCount();
   parameters_ = new Object*[parameters_count_];
   for (int i = 0; i < parameters_count_; i++) {
-    SetParameter(i, output_frame->GetParameter(deoptimizer, i));
-  }
-  expression_stack_ = new Object*[expression_count_];
-  for (int i = 0; i < expression_count_; i++) {
-    SetExpression(i, output_frame->GetExpression(deoptimizer, i));
+    SetParameter(i, output_frame->GetParameter(i));
   }
 }
 
@@ -1448,6 +1656,7 @@
   delete[] parameters_;
 }
 
+
 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
   v->VisitPointer(BitCast<Object**>(&function_));
   v->VisitPointers(parameters_, parameters_ + parameters_count_);
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 8641261..6bc4a51 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -86,8 +86,8 @@
 #endif
 
  private:
-  LargeObjectChunk* eager_deoptimization_entry_code_;
-  LargeObjectChunk* lazy_deoptimization_entry_code_;
+  MemoryChunk* eager_deoptimization_entry_code_;
+  MemoryChunk* lazy_deoptimization_entry_code_;
   Deoptimizer* current_;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -119,6 +119,9 @@
 
   int output_count() const { return output_count_; }
 
+  // Number of created JS frames. Not all created frames are necessarily JS.
+  int jsframe_count() const { return jsframe_count_; }
+
   static Deoptimizer* New(JSFunction* function,
                           BailoutType type,
                           unsigned bailout_id,
@@ -131,7 +134,7 @@
   // The returned object with information on the optimized frame needs to be
   // freed before another one can be generated.
   static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
-                                                        int frame_index,
+                                                        int jsframe_index,
                                                         Isolate* isolate);
   static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
                                              Isolate* isolate);
@@ -173,7 +176,8 @@
 
   // Patch stack guard check at instruction before pc_after in
   // the unoptimized code to unconditionally call replacement_code.
-  static void PatchStackCheckCodeAt(Address pc_after,
+  static void PatchStackCheckCodeAt(Code* unoptimized_code,
+                                    Address pc_after,
                                     Code* check_code,
                                     Code* replacement_code);
 
@@ -185,7 +189,8 @@
 
   // Change all patched stack guard checks in the unoptimized code
   // back to a normal stack guard check.
-  static void RevertStackCheckCodeAt(Address pc_after,
+  static void RevertStackCheckCodeAt(Code* unoptimized_code,
+                                     Address pc_after,
                                      Code* check_code,
                                      Code* replacement_code);
 
@@ -194,7 +199,11 @@
   void MaterializeHeapNumbers();
 #ifdef ENABLE_DEBUGGER_SUPPORT
   void MaterializeHeapNumbersForDebuggerInspectableFrame(
-      Address top, uint32_t size, DeoptimizedFrameInfo* info);
+      Address parameters_top,
+      uint32_t parameters_size,
+      Address expressions_top,
+      uint32_t expressions_size,
+      DeoptimizedFrameInfo* info);
 #endif
 
   static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@@ -250,8 +259,10 @@
     int count_;
   };
 
+  int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
+
  private:
-  static const int kNumberOfEntries = 4096;
+  static const int kNumberOfEntries = 16384;
 
   Deoptimizer(Isolate* isolate,
               JSFunction* function,
@@ -264,7 +275,11 @@
 
   void DoComputeOutputFrames();
   void DoComputeOsrOutputFrame();
-  void DoComputeFrame(TranslationIterator* iterator, int frame_index);
+  void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
+  void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+                                      int frame_index);
+  void DoComputeConstructStubFrame(TranslationIterator* iterator,
+                                   int frame_index);
   void DoTranslateCommand(TranslationIterator* iterator,
                           int frame_index,
                           unsigned output_offset);
@@ -285,7 +300,7 @@
 
   void AddDoubleValue(intptr_t slot_address, double value);
 
-  static LargeObjectChunk* CreateCode(BailoutType type);
+  static MemoryChunk* CreateCode(BailoutType type);
   static void GenerateDeoptimizationEntries(
       MacroAssembler* masm, int count, BailoutType type);
 
@@ -312,6 +327,8 @@
   FrameDescription* input_;
   // Number of output frames.
   int output_count_;
+  // Number of output js frames.
+  int jsframe_count_;
   // Array of output frame descriptions.
   FrameDescription** output_;
 
@@ -351,14 +368,27 @@
 
   JSFunction* GetFunction() const { return function_; }
 
-  unsigned GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, int slot_index);
+  unsigned GetOffsetFromSlotIndex(int slot_index);
 
   intptr_t GetFrameSlot(unsigned offset) {
     return *GetFrameSlotPointer(offset);
   }
 
   double GetDoubleFrameSlot(unsigned offset) {
-    return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
+    intptr_t* ptr = GetFrameSlotPointer(offset);
+#if V8_TARGET_ARCH_MIPS
+    // Prevent gcc from using load-double (mips ldc1) on (possibly)
+    // non-64-bit aligned double. Uses two lwc1 instructions.
+    union conversion {
+      double d;
+      uint32_t u[2];
+    } c;
+    c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
+    c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
+    return c.d;
+#else
+    return *reinterpret_cast<double*>(ptr);
+#endif
   }
 
   void SetFrameSlot(unsigned offset, intptr_t value) {
@@ -394,27 +424,28 @@
   intptr_t GetFp() const { return fp_; }
   void SetFp(intptr_t fp) { fp_ = fp; }
 
+  intptr_t GetContext() const { return context_; }
+  void SetContext(intptr_t context) { context_ = context; }
+
   Smi* GetState() const { return state_; }
   void SetState(Smi* state) { state_ = state; }
 
   void SetContinuation(intptr_t pc) { continuation_ = pc; }
 
-#ifdef DEBUG
-  Code::Kind GetKind() const { return kind_; }
-  void SetKind(Code::Kind kind) { kind_ = kind; }
-#endif
+  StackFrame::Type GetFrameType() const { return type_; }
+  void SetFrameType(StackFrame::Type type) { type_ = type; }
 
   // Get the incoming arguments count.
   int ComputeParametersCount();
 
   // Get a parameter value for an unoptimized frame.
-  Object* GetParameter(Deoptimizer* deoptimizer, int index);
+  Object* GetParameter(int index);
 
   // Get the expression stack height for a unoptimized frame.
-  unsigned GetExpressionCount(Deoptimizer* deoptimizer);
+  unsigned GetExpressionCount();
 
   // Get the expression stack value for an unoptimized frame.
-  Object* GetExpression(Deoptimizer* deoptimizer, int index);
+  Object* GetExpression(int index);
 
   static int registers_offset() {
     return OFFSET_OF(FrameDescription, registers_);
@@ -457,6 +488,8 @@
   intptr_t top_;
   intptr_t pc_;
   intptr_t fp_;
+  intptr_t context_;
+  StackFrame::Type type_;
   Smi* state_;
 #ifdef DEBUG
   Code::Kind kind_;
@@ -475,6 +508,8 @@
     return reinterpret_cast<intptr_t*>(
         reinterpret_cast<Address>(this) + frame_content_offset() + offset);
   }
+
+  int ComputeFixedSize();
 };
 
 
@@ -517,7 +552,9 @@
  public:
   enum Opcode {
     BEGIN,
-    FRAME,
+    JS_FRAME,
+    CONSTRUCT_STUB_FRAME,
+    ARGUMENTS_ADAPTOR_FRAME,
     REGISTER,
     INT32_REGISTER,
     DOUBLE_REGISTER,
@@ -532,17 +569,20 @@
     DUPLICATE
   };
 
-  Translation(TranslationBuffer* buffer, int frame_count)
+  Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count)
       : buffer_(buffer),
         index_(buffer->CurrentIndex()) {
     buffer_->Add(BEGIN);
     buffer_->Add(frame_count);
+    buffer_->Add(jsframe_count);
   }
 
   int index() const { return index_; }
 
   // Commands.
-  void BeginFrame(int node_id, int literal_id, unsigned height);
+  void BeginJSFrame(int node_id, int literal_id, unsigned height);
+  void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
+  void BeginConstructStubFrame(int literal_id, unsigned height);
   void StoreRegister(Register reg);
   void StoreInt32Register(Register reg);
   void StoreDoubleRegister(DoubleRegister reg);
@@ -632,9 +672,10 @@
     }
   }
 
-  static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
-                                             int inlined_frame_index,
-                                             Vector<SlotRef>* args_slots);
+  static Vector<SlotRef> ComputeSlotMappingForArguments(
+      JavaScriptFrame* frame,
+      int inlined_frame_index,
+      int formal_parameter_count);
 
  private:
   Address addr_;
@@ -654,6 +695,12 @@
   static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
                                             DeoptimizationInputData* data,
                                             JavaScriptFrame* frame);
+
+  static void ComputeSlotsForArguments(
+      Vector<SlotRef>* args_slots,
+      TranslationIterator* iterator,
+      DeoptimizationInputData* data,
+      JavaScriptFrame* frame);
 };
 
 
@@ -662,9 +709,14 @@
 // needs to inspect a frame that is part of an optimized frame. The
 // internally used FrameDescription objects are not GC safe so for use
 // by the debugger frame information is copied to an object of this type.
+// Represents parameters in unadapted form so their number might mismatch
+// formal parameter count.
 class DeoptimizedFrameInfo : public Malloced {
  public:
-  DeoptimizedFrameInfo(Deoptimizer* deoptimizer, int frame_index);
+  DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
+                       int frame_index,
+                       bool has_arguments_adaptor,
+                       bool has_construct_stub);
   virtual ~DeoptimizedFrameInfo();
 
   // GC support.
@@ -681,6 +733,12 @@
     return function_;
   }
 
+  // Check if this frame is preceded by construct stub frame.  The bottom-most
+  // inlined frame might still be called by an uninlined construct stub.
+  bool HasConstructStub() {
+    return has_construct_stub_;
+  }
+
   // Get an incoming argument.
   Object* GetParameter(int index) {
     ASSERT(0 <= index && index < parameters_count());
@@ -693,12 +751,11 @@
     return expression_stack_[index];
   }
 
- private:
-  // Set the frame function.
-  void SetFunction(JSFunction* function) {
-    function_ = function;
+  int GetSourcePosition() {
+    return source_position_;
   }
 
+ private:
   // Set an incoming argument.
   void SetParameter(int index, Object* obj) {
     ASSERT(0 <= index && index < parameters_count());
@@ -712,10 +769,12 @@
   }
 
   JSFunction* function_;
+  bool has_construct_stub_;
   int parameters_count_;
   int expression_count_;
   Object** parameters_;
   Object** expression_stack_;
+  int source_position_;
 
   friend class Deoptimizer;
 };
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 1e67b4c..e3b40ab 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -200,7 +200,7 @@
     // Print all the reloc info for this instruction which are not comments.
     for (int i = 0; i < pcs.length(); i++) {
       // Put together the reloc info
-      RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
+      RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
 
       // Indent the printing of the reloc info.
       if (i == 0) {
diff --git a/src/double.h b/src/double.h
index 65eded9..16a3245 100644
--- a/src/double.h
+++ b/src/double.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,8 +34,8 @@
 namespace internal {
 
 // We assume that doubles and uint64_t have the same endianness.
-static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
-static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
+inline uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
+inline double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
 
 // Helper functions for doubles.
 class Double {
diff --git a/src/dtoa.h b/src/dtoa.h
index b3e79af..948a079 100644
--- a/src/dtoa.h
+++ b/src/dtoa.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,9 +47,9 @@
 // The maximal length of digits a double can have in base 10.
 // Note that DoubleToAscii null-terminates its input. So the given buffer should
 // be at least kBase10MaximalLength + 1 characters long.
-static const int kBase10MaximalLength = 17;
+const int kBase10MaximalLength = 17;
 
-// Converts the given double 'v' to ascii.
+// Converts the given double 'v' to ASCII.
 // The result should be interpreted as buffer * 10^(point-length).
 //
 // The output depends on the given mode:
diff --git a/src/elements.cc b/src/elements.cc
index 0454644..aa51ea9 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,14 +31,85 @@
 #include "elements.h"
 #include "utils.h"
 
+
+// Each concrete ElementsAccessor can handle exactly one ElementsKind,
+// several abstract ElementsAccessor classes are used to allow sharing
+// common code.
+//
+// Inheritance hierarchy:
+// - ElementsAccessorBase                        (abstract)
+//   - FastElementsAccessor                      (abstract)
+//     - FastObjectElementsAccessor
+//     - FastDoubleElementsAccessor
+//   - ExternalElementsAccessor                  (abstract)
+//     - ExternalByteElementsAccessor
+//     - ExternalUnsignedByteElementsAccessor
+//     - ExternalShortElementsAccessor
+//     - ExternalUnsignedShortElementsAccessor
+//     - ExternalIntElementsAccessor
+//     - ExternalUnsignedIntElementsAccessor
+//     - ExternalFloatElementsAccessor
+//     - ExternalDoubleElementsAccessor
+//     - PixelElementsAccessor
+//   - DictionaryElementsAccessor
+//   - NonStrictArgumentsElementsAccessor
+
+
 namespace v8 {
 namespace internal {
 
 
+// First argument in list is the accessor class, the second argument is the
+// accessor ElementsKind, and the third is the backing store class.  Use the
+// fast element handler for smi-only arrays.  The implementation is currently
+// identical.  Note that the order must match that of the ElementsKind enum for
+// the |accessor_array[]| below to work.
+#define ELEMENTS_LIST(V)                                                \
+  V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray)     \
+  V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray)              \
+  V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
+  V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS,                    \
+    SeededNumberDictionary)                                             \
+  V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS,  \
+    FixedArray)                                                         \
+  V(ExternalByteElementsAccessor, EXTERNAL_BYTE_ELEMENTS,               \
+    ExternalByteArray)                                                  \
+  V(ExternalUnsignedByteElementsAccessor,                               \
+    EXTERNAL_UNSIGNED_BYTE_ELEMENTS, ExternalUnsignedByteArray)         \
+  V(ExternalShortElementsAccessor, EXTERNAL_SHORT_ELEMENTS,             \
+    ExternalShortArray)                                                 \
+  V(ExternalUnsignedShortElementsAccessor,                              \
+    EXTERNAL_UNSIGNED_SHORT_ELEMENTS, ExternalUnsignedShortArray)       \
+  V(ExternalIntElementsAccessor, EXTERNAL_INT_ELEMENTS,                 \
+    ExternalIntArray)                                                   \
+  V(ExternalUnsignedIntElementsAccessor,                                \
+    EXTERNAL_UNSIGNED_INT_ELEMENTS, ExternalUnsignedIntArray)           \
+  V(ExternalFloatElementsAccessor,                                      \
+    EXTERNAL_FLOAT_ELEMENTS, ExternalFloatArray)                        \
+  V(ExternalDoubleElementsAccessor,                                     \
+    EXTERNAL_DOUBLE_ELEMENTS, ExternalDoubleArray)                      \
+  V(PixelElementsAccessor, EXTERNAL_PIXEL_ELEMENTS, ExternalPixelArray)
+
+
+template<ElementsKind Kind> class ElementsKindTraits {
+ public:
+  typedef FixedArrayBase BackingStore;
+};
+
+#define ELEMENTS_TRAITS(Class, KindParam, Store)               \
+template<> class ElementsKindTraits<KindParam> {               \
+  public:                                                      \
+  static const ElementsKind Kind = KindParam;                  \
+  typedef Store BackingStore;                                  \
+};
+ELEMENTS_LIST(ELEMENTS_TRAITS)
+#undef ELEMENTS_TRAITS
+
+
 ElementsAccessor** ElementsAccessor::elements_accessors_;
 
 
-bool HasKey(FixedArray* array, Object* key) {
+static bool HasKey(FixedArray* array, Object* key) {
   int len0 = array->length();
   for (int i = 0; i < len0; i++) {
     Object* element = array->get(i);
@@ -52,6 +123,258 @@
 }
 
 
+static Failure* ThrowArrayLengthRangeError(Heap* heap) {
+  HandleScope scope(heap->isolate());
+  return heap->isolate()->Throw(
+      *heap->isolate()->factory()->NewRangeError("invalid_array_length",
+          HandleVector<Object>(NULL, 0)));
+}
+
+
+void CopyObjectToObjectElements(FixedArray* from,
+                                ElementsKind from_kind,
+                                uint32_t from_start,
+                                FixedArray* to,
+                                ElementsKind to_kind,
+                                uint32_t to_start,
+                                int raw_copy_size) {
+  ASSERT(to->map() != HEAP->fixed_cow_array_map());
+  ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
+  ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
+  int copy_size = raw_copy_size;
+  if (raw_copy_size < 0) {
+    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+           raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+    copy_size = Min(from->length() - from_start,
+                    to->length() - to_start);
+#ifdef DEBUG
+    // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+    // marked with the hole.
+    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+      for (int i = to_start + copy_size; i < to->length(); ++i) {
+        ASSERT(to->get(i)->IsTheHole());
+      }
+    }
+#endif
+  }
+  ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+         (copy_size + static_cast<int>(from_start)) <= from->length());
+  if (copy_size == 0) return;
+  Address to_address = to->address() + FixedArray::kHeaderSize;
+  Address from_address = from->address() + FixedArray::kHeaderSize;
+  CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
+            reinterpret_cast<Object**>(from_address) + from_start,
+            copy_size);
+  if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Heap* heap = from->GetHeap();
+    if (!heap->InNewSpace(to)) {
+      heap->RecordWrites(to->address(),
+                         to->OffsetOfElementAt(to_start),
+                         copy_size);
+    }
+    heap->incremental_marking()->RecordWrites(to);
+  }
+}
+
+
+static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
+                                           uint32_t from_start,
+                                           FixedArray* to,
+                                           ElementsKind to_kind,
+                                           uint32_t to_start,
+                                           int raw_copy_size) {
+  int copy_size = raw_copy_size;
+  Heap* heap = from->GetHeap();
+  if (raw_copy_size < 0) {
+    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+           raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+    copy_size = from->max_number_key() + 1 - from_start;
+#ifdef DEBUG
+    // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+    // marked with the hole.
+    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+      for (int i = to_start + copy_size; i < to->length(); ++i) {
+        ASSERT(to->get(i)->IsTheHole());
+      }
+    }
+#endif
+  }
+  ASSERT(to != from);
+  ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
+  if (copy_size == 0) return;
+  uint32_t to_length = to->length();
+  if (to_start + copy_size > to_length) {
+    copy_size = to_length - to_start;
+  }
+  for (int i = 0; i < copy_size; i++) {
+    int entry = from->FindEntry(i + from_start);
+    if (entry != SeededNumberDictionary::kNotFound) {
+      Object* value = from->ValueAt(entry);
+      ASSERT(!value->IsTheHole());
+      to->set(i + to_start, value, SKIP_WRITE_BARRIER);
+    } else {
+      to->set_the_hole(i + to_start);
+    }
+  }
+  if (to_kind == FAST_ELEMENTS) {
+    if (!heap->InNewSpace(to)) {
+      heap->RecordWrites(to->address(),
+                         to->OffsetOfElementAt(to_start),
+                         copy_size);
+    }
+    heap->incremental_marking()->RecordWrites(to);
+  }
+}
+
+
+MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
+    FixedDoubleArray* from,
+    uint32_t from_start,
+    FixedArray* to,
+    ElementsKind to_kind,
+    uint32_t to_start,
+    int raw_copy_size) {
+  ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
+  int copy_size = raw_copy_size;
+  if (raw_copy_size < 0) {
+    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+           raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+    copy_size = Min(from->length() - from_start,
+                    to->length() - to_start);
+#ifdef DEBUG
+    // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+    // marked with the hole.
+    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+      for (int i = to_start + copy_size; i < to->length(); ++i) {
+        ASSERT(to->get(i)->IsTheHole());
+      }
+    }
+#endif
+  }
+  ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+         (copy_size + static_cast<int>(from_start)) <= from->length());
+  if (copy_size == 0) return from;
+  for (int i = 0; i < copy_size; ++i) {
+    if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
+      UNIMPLEMENTED();
+      return Failure::Exception();
+    } else {
+      MaybeObject* maybe_value = from->get(i + from_start);
+      Object* value;
+      ASSERT(to_kind == FAST_ELEMENTS);
+      // Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
+      // iteratively, the allocate must succeed within a single GC cycle,
+      // otherwise the retry after the GC will also fail. In order to ensure
+      // that no GC is triggered, allocate HeapNumbers from old space if they
+      // can't be taken from new space.
+      if (!maybe_value->ToObject(&value)) {
+        ASSERT(maybe_value->IsRetryAfterGC() || maybe_value->IsOutOfMemory());
+        Heap* heap = from->GetHeap();
+        MaybeObject* maybe_value_object =
+            heap->AllocateHeapNumber(from->get_scalar(i + from_start),
+                                     TENURED);
+        if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
+      }
+      to->set(i + to_start, value, UPDATE_WRITE_BARRIER);
+    }
+  }
+  return to;
+}
+
+
+static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
+                                       uint32_t from_start,
+                                       FixedDoubleArray* to,
+                                       uint32_t to_start,
+                                       int raw_copy_size) {
+  int copy_size = raw_copy_size;
+  if (raw_copy_size < 0) {
+    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+           raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+    copy_size = Min(from->length() - from_start,
+                    to->length() - to_start);
+    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+      for (int i = to_start + copy_size; i < to->length(); ++i) {
+        to->set_the_hole(i);
+      }
+    }
+  }
+  ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+         (copy_size + static_cast<int>(from_start)) <= from->length());
+  if (copy_size == 0) return;
+  Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
+  Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
+  to_address += kDoubleSize * to_start;
+  from_address += kDoubleSize * from_start;
+  int words_per_double = (kDoubleSize / kPointerSize);
+  CopyWords(reinterpret_cast<Object**>(to_address),
+            reinterpret_cast<Object**>(from_address),
+            words_per_double * copy_size);
+}
+
+
+static void CopyObjectToDoubleElements(FixedArray* from,
+                                       uint32_t from_start,
+                                       FixedDoubleArray* to,
+                                       uint32_t to_start,
+                                       int raw_copy_size) {
+  int copy_size = raw_copy_size;
+  if (raw_copy_size < 0) {
+    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+           raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+    copy_size = from->length() - from_start;
+    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+      for (int i = to_start + copy_size; i < to->length(); ++i) {
+        to->set_the_hole(i);
+      }
+    }
+  }
+  ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+         (copy_size + static_cast<int>(from_start)) <= from->length());
+  if (copy_size == 0) return;
+  for (int i = 0; i < copy_size; i++) {
+    Object* hole_or_object = from->get(i + from_start);
+    if (hole_or_object->IsTheHole()) {
+      to->set_the_hole(i + to_start);
+    } else {
+      to->set(i + to_start, hole_or_object->Number());
+    }
+  }
+}
+
+
+static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
+                                           uint32_t from_start,
+                                           FixedDoubleArray* to,
+                                           uint32_t to_start,
+                                           int raw_copy_size) {
+  int copy_size = raw_copy_size;
+  if (copy_size < 0) {
+    ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
+           copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+    copy_size = from->max_number_key() + 1 - from_start;
+    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+      for (int i = to_start + copy_size; i < to->length(); ++i) {
+        to->set_the_hole(i);
+      }
+    }
+  }
+  if (copy_size == 0) return;
+  uint32_t to_length = to->length();
+  if (to_start + copy_size > to_length) {
+    copy_size = to_length - to_start;
+  }
+  for (int i = 0; i < copy_size; i++) {
+    int entry = from->FindEntry(i + from_start);
+    if (entry != SeededNumberDictionary::kNotFound) {
+      to->set(i + to_start, from->ValueAt(entry)->Number());
+    } else {
+      to->set_the_hole(i + to_start);
+    }
+  }
+}
+
+
 // Base class for element handler implementations. Contains the
 // the common logic for objects with different ElementsKinds.
 // Subclasses must specialize method for which the element
@@ -69,36 +392,119 @@
 // http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).  We use
 // CRTP to guarantee aggressive compile time optimizations (i.e.  inlining and
 // specialization of SomeElementsAccessor methods).
-template <typename ElementsAccessorSubclass, typename BackingStoreClass>
+template <typename ElementsAccessorSubclass,
+          typename ElementsTraitsParam>
 class ElementsAccessorBase : public ElementsAccessor {
  protected:
-  ElementsAccessorBase() { }
-  virtual MaybeObject* Get(FixedArrayBase* backing_store,
-                           uint32_t key,
-                           JSObject* obj,
-                           Object* receiver) {
-    return ElementsAccessorSubclass::Get(
-        BackingStoreClass::cast(backing_store), key, obj, receiver);
+  explicit ElementsAccessorBase(const char* name)
+      : ElementsAccessor(name) { }
+
+  typedef ElementsTraitsParam ElementsTraits;
+  typedef typename ElementsTraitsParam::BackingStore BackingStore;
+
+  virtual ElementsKind kind() const { return ElementsTraits::Kind; }
+
+  static bool HasElementImpl(Object* receiver,
+                             JSObject* holder,
+                             uint32_t key,
+                             BackingStore* backing_store) {
+    MaybeObject* element =
+        ElementsAccessorSubclass::GetImpl(receiver, holder, key, backing_store);
+    return !element->IsTheHole();
   }
 
-  static MaybeObject* Get(BackingStoreClass* backing_store,
+  virtual bool HasElement(Object* receiver,
+                          JSObject* holder,
                           uint32_t key,
-                          JSObject* obj,
-                          Object* receiver) {
-    if (key < ElementsAccessorSubclass::GetCapacity(backing_store)) {
-      return backing_store->get(key);
+                          FixedArrayBase* backing_store) {
+    if (backing_store == NULL) {
+      backing_store = holder->elements();
     }
-    return backing_store->GetHeap()->the_hole_value();
+    return ElementsAccessorSubclass::HasElementImpl(
+        receiver, holder, key, BackingStore::cast(backing_store));
+  }
+
+  virtual MaybeObject* Get(Object* receiver,
+                           JSObject* holder,
+                           uint32_t key,
+                           FixedArrayBase* backing_store) {
+    if (backing_store == NULL) {
+      backing_store = holder->elements();
+    }
+    return ElementsAccessorSubclass::GetImpl(
+        receiver, holder, key, BackingStore::cast(backing_store));
+  }
+
+  static MaybeObject* GetImpl(Object* receiver,
+                              JSObject* obj,
+                              uint32_t key,
+                              BackingStore* backing_store) {
+    return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
+           ? backing_store->get(key)
+           : backing_store->GetHeap()->the_hole_value();
+  }
+
+  virtual MaybeObject* SetLength(JSArray* array,
+                                 Object* length) {
+    return ElementsAccessorSubclass::SetLengthImpl(
+        array, length, BackingStore::cast(array->elements()));
+  }
+
+  static MaybeObject* SetLengthImpl(JSObject* obj,
+                                    Object* length,
+                                    BackingStore* backing_store);
+
+  virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+                                            int capacity,
+                                            int length) {
+    return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
+        array,
+        capacity,
+        length);
+  }
+
+  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+                                                       int capacity,
+                                                       int length) {
+    UNIMPLEMENTED();
+    return obj;
   }
 
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) = 0;
 
-  virtual MaybeObject* AddElementsToFixedArray(FixedArrayBase* from,
-                                               FixedArray* to,
+  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                       uint32_t from_start,
+                                       FixedArrayBase* to,
+                                       ElementsKind to_kind,
+                                       uint32_t to_start,
+                                       int copy_size) {
+    UNREACHABLE();
+    return NULL;
+  }
+
+  virtual MaybeObject* CopyElements(JSObject* from_holder,
+                                    uint32_t from_start,
+                                    FixedArrayBase* to,
+                                    ElementsKind to_kind,
+                                    uint32_t to_start,
+                                    int copy_size,
+                                    FixedArrayBase* from) {
+    if (from == NULL) {
+      from = from_holder->elements();
+    }
+    if (from->length() == 0) {
+      return from;
+    }
+    return ElementsAccessorSubclass::CopyElementsImpl(
+        from, from_start, to, to_kind, to_start, copy_size);
+  }
+
+  virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
                                                JSObject* holder,
-                                               Object* receiver) {
+                                               FixedArray* to,
+                                               FixedArrayBase* from) {
     int len0 = to->length();
 #ifdef DEBUG
     if (FLAG_enable_slow_asserts) {
@@ -107,24 +513,26 @@
       }
     }
 #endif
-    BackingStoreClass* backing_store = BackingStoreClass::cast(from);
-    uint32_t len1 = ElementsAccessorSubclass::GetCapacity(backing_store);
+    if (from == NULL) {
+      from = holder->elements();
+    }
+    BackingStore* backing_store = BackingStore::cast(from);
+    uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store);
 
     // Optimize if 'other' is empty.
     // We cannot optimize if 'this' is empty, as other may have holes.
     if (len1 == 0) return to;
 
     // Compute how many elements are not in other.
-    int extra = 0;
+    uint32_t extra = 0;
     for (uint32_t y = 0; y < len1; y++) {
-      if (ElementsAccessorSubclass::HasElementAtIndex(backing_store,
-                                                      y,
-                                                      holder,
-                                                      receiver)) {
-        uint32_t key =
-            ElementsAccessorSubclass::GetKeyForIndex(backing_store, y);
+      uint32_t key =
+          ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
+      if (ElementsAccessorSubclass::HasElementImpl(
+              receiver, holder, key, backing_store)) {
         MaybeObject* maybe_value =
-            ElementsAccessorSubclass::Get(backing_store, key, holder, receiver);
+            ElementsAccessorSubclass::GetImpl(receiver, holder,
+                                              key, backing_store);
         Object* value;
         if (!maybe_value->ToObject(&value)) return maybe_value;
         ASSERT(!value->IsTheHole());
@@ -153,16 +561,15 @@
       }
     }
     // Fill in the extra values.
-    int index = 0;
+    uint32_t index = 0;
     for (uint32_t y = 0; y < len1; y++) {
-      if (ElementsAccessorSubclass::HasElementAtIndex(backing_store,
-                                                      y,
-                                                      holder,
-                                                      receiver)) {
-        uint32_t key =
-            ElementsAccessorSubclass::GetKeyForIndex(backing_store, y);
+      uint32_t key =
+          ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
+      if (ElementsAccessorSubclass::HasElementImpl(
+              receiver, holder, key, backing_store)) {
         MaybeObject* maybe_value =
-            ElementsAccessorSubclass::Get(backing_store, key, holder, receiver);
+            ElementsAccessorSubclass::GetImpl(receiver, holder,
+                                              key, backing_store);
         Object* value;
         if (!maybe_value->ToObject(&value)) return maybe_value;
         if (!value->IsTheHole() && !HasKey(to, value)) {
@@ -176,45 +583,24 @@
   }
 
  protected:
-  static uint32_t GetCapacity(BackingStoreClass* backing_store) {
+  static uint32_t GetCapacityImpl(BackingStore* backing_store) {
     return backing_store->length();
   }
 
   virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
-    return ElementsAccessorSubclass::GetCapacity(
-        BackingStoreClass::cast(backing_store));
+    return ElementsAccessorSubclass::GetCapacityImpl(
+        BackingStore::cast(backing_store));
   }
 
-  static bool HasElementAtIndex(BackingStoreClass* backing_store,
-                                uint32_t index,
-                                JSObject* holder,
-                                Object* receiver) {
-    uint32_t key =
-        ElementsAccessorSubclass::GetKeyForIndex(backing_store, index);
-    MaybeObject* element = ElementsAccessorSubclass::Get(backing_store,
-                                                         key,
-                                                         holder,
-                                                         receiver);
-    return !element->IsTheHole();
-  }
-
-  virtual bool HasElementAtIndex(FixedArrayBase* backing_store,
-                                 uint32_t index,
-                                 JSObject* holder,
-                                 Object* receiver) {
-    return ElementsAccessorSubclass::HasElementAtIndex(
-        BackingStoreClass::cast(backing_store), index, holder, receiver);
-  }
-
-  static uint32_t GetKeyForIndex(BackingStoreClass* backing_store,
-                                 uint32_t index) {
+  static uint32_t GetKeyForIndexImpl(BackingStore* backing_store,
+                                     uint32_t index) {
     return index;
   }
 
   virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
-                                              uint32_t index) {
-    return ElementsAccessorSubclass::GetKeyForIndex(
-        BackingStoreClass::cast(backing_store), index);
+                                  uint32_t index) {
+    return ElementsAccessorSubclass::GetKeyForIndexImpl(
+        BackingStore::cast(backing_store), index);
   }
 
  private:
@@ -222,12 +608,87 @@
 };
 
 
+// Super class for all fast element arrays.
+template<typename FastElementsAccessorSubclass,
+         typename KindTraits,
+         int ElementSize>
 class FastElementsAccessor
-    : public ElementsAccessorBase<FastElementsAccessor, FixedArray> {
+    : public ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits> {
  public:
+  explicit FastElementsAccessor(const char* name)
+      : ElementsAccessorBase<FastElementsAccessorSubclass,
+                             KindTraits>(name) {}
+ protected:
+  friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
+
+  typedef typename KindTraits::BackingStore BackingStore;
+
+  // Adjusts the length of the fast backing store or returns the new length or
+  // undefined in case conversion to a slow backing store should be performed.
+  static MaybeObject* SetLengthWithoutNormalize(BackingStore* backing_store,
+                                                JSArray* array,
+                                                Object* length_object,
+                                                uint32_t length) {
+    uint32_t old_capacity = backing_store->length();
+
+    // Check whether the backing store should be shrunk.
+    if (length <= old_capacity) {
+      if (array->HasFastTypeElements()) {
+        MaybeObject* maybe_obj = array->EnsureWritableFastElements();
+        if (!maybe_obj->To(&backing_store)) return maybe_obj;
+      }
+      if (2 * length <= old_capacity) {
+        // If more than half the elements won't be used, trim the array.
+        if (length == 0) {
+          array->initialize_elements();
+        } else {
+          backing_store->set_length(length);
+          Address filler_start = backing_store->address() +
+              BackingStore::OffsetOfElementAt(length);
+          int filler_size = (old_capacity - length) * ElementSize;
+          array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
+        }
+      } else {
+        // Otherwise, fill the unused tail with holes.
+        int old_length = FastD2I(array->length()->Number());
+        for (int i = length; i < old_length; i++) {
+          backing_store->set_the_hole(i);
+        }
+      }
+      return length_object;
+    }
+
+    // Check whether the backing store should be expanded.
+    uint32_t min = JSObject::NewElementsCapacity(old_capacity);
+    uint32_t new_capacity = length > min ? length : min;
+    if (!array->ShouldConvertToSlowElements(new_capacity)) {
+      MaybeObject* result = FastElementsAccessorSubclass::
+          SetFastElementsCapacityAndLength(array, new_capacity, length);
+      if (result->IsFailure()) return result;
+      return length_object;
+    }
+
+    // Request conversion to slow elements.
+    return array->GetHeap()->undefined_value();
+  }
+};
+
+
+class FastObjectElementsAccessor
+    : public FastElementsAccessor<FastObjectElementsAccessor,
+                                  ElementsKindTraits<FAST_ELEMENTS>,
+                                  kPointerSize> {
+ public:
+  explicit FastObjectElementsAccessor(const char* name)
+      : FastElementsAccessor<FastObjectElementsAccessor,
+                             ElementsKindTraits<FAST_ELEMENTS>,
+                             kPointerSize>(name) {}
+
   static MaybeObject* DeleteCommon(JSObject* obj,
                                    uint32_t key) {
-    ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements());
+    ASSERT(obj->HasFastElements() ||
+           obj->HasFastSmiOnlyElements() ||
+           obj->HasFastArgumentsElements());
     Heap* heap = obj->GetHeap();
     FixedArray* backing_store = FixedArray::cast(obj->elements());
     if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
@@ -269,7 +730,49 @@
     return heap->true_value();
   }
 
+  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                       uint32_t from_start,
+                                       FixedArrayBase* to,
+                                       ElementsKind to_kind,
+                                       uint32_t to_start,
+                                       int copy_size) {
+    switch (to_kind) {
+      case FAST_SMI_ONLY_ELEMENTS:
+      case FAST_ELEMENTS: {
+        CopyObjectToObjectElements(
+            FixedArray::cast(from), ElementsTraits::Kind, from_start,
+            FixedArray::cast(to), to_kind, to_start, copy_size);
+        return from;
+      }
+      case FAST_DOUBLE_ELEMENTS:
+        CopyObjectToDoubleElements(
+            FixedArray::cast(from), from_start,
+            FixedDoubleArray::cast(to), to_start, copy_size);
+        return from;
+      default:
+        UNREACHABLE();
+    }
+    return to->GetHeap()->undefined_value();
+  }
+
+
+  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+                                                       uint32_t capacity,
+                                                       uint32_t length) {
+    JSObject::SetFastElementsCapacityMode set_capacity_mode =
+        obj->HasFastSmiOnlyElements()
+            ? JSObject::kAllowSmiOnlyElements
+            : JSObject::kDontAllowSmiOnlyElements;
+    return obj->SetFastElementsCapacityAndLength(capacity,
+                                                 length,
+                                                 set_capacity_mode);
+  }
+
  protected:
+  friend class FastElementsAccessor<FastObjectElementsAccessor,
+                                    ElementsKindTraits<FAST_ELEMENTS>,
+                                    kPointerSize>;
+
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) {
@@ -279,11 +782,50 @@
 
 
 class FastDoubleElementsAccessor
-    : public ElementsAccessorBase<FastDoubleElementsAccessor,
-                                  FixedDoubleArray> {
+    : public FastElementsAccessor<FastDoubleElementsAccessor,
+                                  ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
+                                  kDoubleSize> {
+ public:
+  explicit FastDoubleElementsAccessor(const char* name)
+      : FastElementsAccessor<FastDoubleElementsAccessor,
+                             ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
+                             kDoubleSize>(name) {}
+
+  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+                                                       uint32_t capacity,
+                                                       uint32_t length) {
+    return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
+  }
+
  protected:
   friend class ElementsAccessorBase<FastDoubleElementsAccessor,
-                                    FixedDoubleArray>;
+                                    ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
+  friend class FastElementsAccessor<FastDoubleElementsAccessor,
+                                    ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
+                                    kDoubleSize>;
+
+  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                       uint32_t from_start,
+                                       FixedArrayBase* to,
+                                       ElementsKind to_kind,
+                                       uint32_t to_start,
+                                       int copy_size) {
+    switch (to_kind) {
+      case FAST_SMI_ONLY_ELEMENTS:
+      case FAST_ELEMENTS:
+        return CopyDoubleToObjectElements(
+            FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
+            to_kind, to_start, copy_size);
+      case FAST_DOUBLE_ELEMENTS:
+        CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
+                                   FixedDoubleArray::cast(to),
+                                   to_start, copy_size);
+        return from;
+      default:
+        UNREACHABLE();
+    }
+    return to->GetHeap()->undefined_value();
+  }
 
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
@@ -297,34 +839,49 @@
     return obj->GetHeap()->true_value();
   }
 
-  static bool HasElementAtIndex(FixedDoubleArray* backing_store,
-                                uint32_t index,
-                                JSObject* holder,
-                                Object* receiver) {
-    return !backing_store->is_the_hole(index);
+  static bool HasElementImpl(Object* receiver,
+                             JSObject* holder,
+                             uint32_t key,
+                             FixedDoubleArray* backing_store) {
+    return key < static_cast<uint32_t>(backing_store->length()) &&
+        !backing_store->is_the_hole(key);
   }
 };
 
 
 // Super class for all external element arrays.
 template<typename ExternalElementsAccessorSubclass,
-         typename ExternalArray>
+         ElementsKind Kind>
 class ExternalElementsAccessor
     : public ElementsAccessorBase<ExternalElementsAccessorSubclass,
-                                  ExternalArray> {
- protected:
-  friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
-                                    ExternalArray>;
+                                  ElementsKindTraits<Kind> > {
+ public:
+  explicit ExternalElementsAccessor(const char* name)
+      : ElementsAccessorBase<ExternalElementsAccessorSubclass,
+                             ElementsKindTraits<Kind> >(name) {}
 
-  static MaybeObject* Get(ExternalArray* backing_store,
-                          uint32_t key,
-                          JSObject* obj,
-                          Object* receiver) {
-    if (key < ExternalElementsAccessorSubclass::GetCapacity(backing_store)) {
-      return backing_store->get(key);
-    } else {
-      return backing_store->GetHeap()->undefined_value();
-    }
+ protected:
+  typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
+
+  friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
+                                    ElementsKindTraits<Kind> >;
+
+  static MaybeObject* GetImpl(Object* receiver,
+                              JSObject* obj,
+                              uint32_t key,
+                              BackingStore* backing_store) {
+    return
+        key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+        ? backing_store->get(key)
+        : backing_store->GetHeap()->undefined_value();
+  }
+
+  static MaybeObject* SetLengthImpl(JSObject* obj,
+                                    Object* length,
+                                    BackingStore* backing_store) {
+    // External arrays do not support changing their length.
+    UNREACHABLE();
+    return obj;
   }
 
   virtual MaybeObject* Delete(JSObject* obj,
@@ -333,67 +890,173 @@
     // External arrays always ignore deletes.
     return obj->GetHeap()->true_value();
   }
+
+  static bool HasElementImpl(Object* receiver,
+                             JSObject* holder,
+                             uint32_t key,
+                             BackingStore* backing_store) {
+    uint32_t capacity =
+        ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store);
+    return key < capacity;
+  }
 };
 
 
 class ExternalByteElementsAccessor
     : public ExternalElementsAccessor<ExternalByteElementsAccessor,
-                                      ExternalByteArray> {
+                                      EXTERNAL_BYTE_ELEMENTS> {
+ public:
+  explicit ExternalByteElementsAccessor(const char* name)
+      : ExternalElementsAccessor<ExternalByteElementsAccessor,
+                                 EXTERNAL_BYTE_ELEMENTS>(name) {}
 };
 
 
 class ExternalUnsignedByteElementsAccessor
     : public ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
-                                      ExternalUnsignedByteArray> {
+                                      EXTERNAL_UNSIGNED_BYTE_ELEMENTS> {
+ public:
+  explicit ExternalUnsignedByteElementsAccessor(const char* name)
+      : ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
+                                 EXTERNAL_UNSIGNED_BYTE_ELEMENTS>(name) {}
 };
 
 
 class ExternalShortElementsAccessor
     : public ExternalElementsAccessor<ExternalShortElementsAccessor,
-                                      ExternalShortArray> {
+                                      EXTERNAL_SHORT_ELEMENTS> {
+ public:
+  explicit ExternalShortElementsAccessor(const char* name)
+      : ExternalElementsAccessor<ExternalShortElementsAccessor,
+                                 EXTERNAL_SHORT_ELEMENTS>(name) {}
 };
 
 
 class ExternalUnsignedShortElementsAccessor
     : public ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
-                                      ExternalUnsignedShortArray> {
+                                      EXTERNAL_UNSIGNED_SHORT_ELEMENTS> {
+ public:
+  explicit ExternalUnsignedShortElementsAccessor(const char* name)
+      : ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
+                                 EXTERNAL_UNSIGNED_SHORT_ELEMENTS>(name) {}
 };
 
 
 class ExternalIntElementsAccessor
     : public ExternalElementsAccessor<ExternalIntElementsAccessor,
-                                      ExternalIntArray> {
+                                      EXTERNAL_INT_ELEMENTS> {
+ public:
+  explicit ExternalIntElementsAccessor(const char* name)
+      : ExternalElementsAccessor<ExternalIntElementsAccessor,
+                                 EXTERNAL_INT_ELEMENTS>(name) {}
 };
 
 
 class ExternalUnsignedIntElementsAccessor
     : public ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
-                                      ExternalUnsignedIntArray> {
+                                      EXTERNAL_UNSIGNED_INT_ELEMENTS> {
+ public:
+  explicit ExternalUnsignedIntElementsAccessor(const char* name)
+      : ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
+                                 EXTERNAL_UNSIGNED_INT_ELEMENTS>(name) {}
 };
 
 
 class ExternalFloatElementsAccessor
     : public ExternalElementsAccessor<ExternalFloatElementsAccessor,
-                                      ExternalFloatArray> {
+                                      EXTERNAL_FLOAT_ELEMENTS> {
+ public:
+  explicit ExternalFloatElementsAccessor(const char* name)
+      : ExternalElementsAccessor<ExternalFloatElementsAccessor,
+                                 EXTERNAL_FLOAT_ELEMENTS>(name) {}
 };
 
 
 class ExternalDoubleElementsAccessor
     : public ExternalElementsAccessor<ExternalDoubleElementsAccessor,
-                                      ExternalDoubleArray> {
+                                      EXTERNAL_DOUBLE_ELEMENTS> {
+ public:
+  explicit ExternalDoubleElementsAccessor(const char* name)
+      : ExternalElementsAccessor<ExternalDoubleElementsAccessor,
+                                 EXTERNAL_DOUBLE_ELEMENTS>(name) {}
 };
 
 
 class PixelElementsAccessor
     : public ExternalElementsAccessor<PixelElementsAccessor,
-                                      ExternalPixelArray> {
+                                      EXTERNAL_PIXEL_ELEMENTS> {
+ public:
+  explicit PixelElementsAccessor(const char* name)
+      : ExternalElementsAccessor<PixelElementsAccessor,
+                                 EXTERNAL_PIXEL_ELEMENTS>(name) {}
 };
 
 
 class DictionaryElementsAccessor
     : public ElementsAccessorBase<DictionaryElementsAccessor,
-                                  SeededNumberDictionary> {
+                                  ElementsKindTraits<DICTIONARY_ELEMENTS> > {
  public:
+  explicit DictionaryElementsAccessor(const char* name)
+      : ElementsAccessorBase<DictionaryElementsAccessor,
+                             ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
+
+  // Adjusts the length of the dictionary backing store and returns the new
+  // length according to ES5 section 15.4.5.2 behavior.
+  static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
+                                                JSArray* array,
+                                                Object* length_object,
+                                                uint32_t length) {
+    if (length == 0) {
+      // If the length of a slow array is reset to zero, we clear
+      // the array and flush backing storage. This has the added
+      // benefit that the array returns to fast mode.
+      Object* obj;
+      MaybeObject* maybe_obj = array->ResetElements();
+      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+    } else {
+      uint32_t new_length = length;
+      uint32_t old_length = static_cast<uint32_t>(array->length()->Number());
+      if (new_length < old_length) {
+        // Find last non-deletable element in range of elements to be
+        // deleted and adjust range accordingly.
+        Heap* heap = array->GetHeap();
+        int capacity = dict->Capacity();
+        for (int i = 0; i < capacity; i++) {
+          Object* key = dict->KeyAt(i);
+          if (key->IsNumber()) {
+            uint32_t number = static_cast<uint32_t>(key->Number());
+            if (new_length <= number && number < old_length) {
+              PropertyDetails details = dict->DetailsAt(i);
+              if (details.IsDontDelete()) new_length = number + 1;
+            }
+          }
+        }
+        if (new_length != length) {
+          MaybeObject* maybe_object = heap->NumberFromUint32(new_length);
+          if (!maybe_object->To(&length_object)) return maybe_object;
+        }
+
+        // Remove elements that should be deleted.
+        int removed_entries = 0;
+        Object* the_hole_value = heap->the_hole_value();
+        for (int i = 0; i < capacity; i++) {
+          Object* key = dict->KeyAt(i);
+          if (key->IsNumber()) {
+            uint32_t number = static_cast<uint32_t>(key->Number());
+            if (new_length <= number && number < old_length) {
+              dict->SetEntry(i, the_hole_value, the_hole_value);
+              removed_entries++;
+            }
+          }
+        }
+
+        // Update the number of elements.
+        dict->ElementsRemoved(removed_entries);
+      }
+    }
+    return length_object;
+  }
+
   static MaybeObject* DeleteCommon(JSObject* obj,
                                    uint32_t key,
                                    JSReceiver::DeleteMode mode) {
@@ -439,9 +1102,34 @@
     return heap->true_value();
   }
 
+  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                       uint32_t from_start,
+                                       FixedArrayBase* to,
+                                       ElementsKind to_kind,
+                                       uint32_t to_start,
+                                       int copy_size) {
+    switch (to_kind) {
+      case FAST_SMI_ONLY_ELEMENTS:
+      case FAST_ELEMENTS:
+        CopyDictionaryToObjectElements(
+            SeededNumberDictionary::cast(from), from_start,
+            FixedArray::cast(to), to_kind, to_start, copy_size);
+        return from;
+      case FAST_DOUBLE_ELEMENTS:
+        CopyDictionaryToDoubleElements(
+            SeededNumberDictionary::cast(from), from_start,
+            FixedDoubleArray::cast(to), to_start, copy_size);
+        return from;
+      default:
+        UNREACHABLE();
+    }
+    return to->GetHeap()->undefined_value();
+  }
+
+
  protected:
   friend class ElementsAccessorBase<DictionaryElementsAccessor,
-                                    SeededNumberDictionary>;
+                                    ElementsKindTraits<DICTIONARY_ELEMENTS> >;
 
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
@@ -449,10 +1137,10 @@
     return DeleteCommon(obj, key, mode);
   }
 
-  static MaybeObject* Get(SeededNumberDictionary* backing_store,
-                          uint32_t key,
-                          JSObject* obj,
-                          Object* receiver) {
+  static MaybeObject* GetImpl(Object* receiver,
+                              JSObject* obj,
+                              uint32_t key,
+                              SeededNumberDictionary* backing_store) {
     int entry = backing_store->FindEntry(key);
     if (entry != SeededNumberDictionary::kNotFound) {
       Object* element = backing_store->ValueAt(entry);
@@ -469,26 +1157,40 @@
     return obj->GetHeap()->the_hole_value();
   }
 
-  static uint32_t GetKeyForIndex(SeededNumberDictionary* dict,
-                                 uint32_t index) {
+  static bool HasElementImpl(Object* receiver,
+                             JSObject* holder,
+                             uint32_t key,
+                             SeededNumberDictionary* backing_store) {
+    return backing_store->FindEntry(key) !=
+        SeededNumberDictionary::kNotFound;
+  }
+
+  static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict,
+                                     uint32_t index) {
     Object* key = dict->KeyAt(index);
     return Smi::cast(key)->value();
   }
 };
 
 
-class NonStrictArgumentsElementsAccessor
-    : public ElementsAccessorBase<NonStrictArgumentsElementsAccessor,
-                                  FixedArray> {
+class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
+    NonStrictArgumentsElementsAccessor,
+    ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> > {
+ public:
+  explicit NonStrictArgumentsElementsAccessor(const char* name)
+      : ElementsAccessorBase<
+          NonStrictArgumentsElementsAccessor,
+          ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >(name) {}
  protected:
-  friend class ElementsAccessorBase<NonStrictArgumentsElementsAccessor,
-                                    FixedArray>;
+  friend class ElementsAccessorBase<
+      NonStrictArgumentsElementsAccessor,
+      ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
 
-  static MaybeObject* Get(FixedArray* parameter_map,
-                          uint32_t key,
-                          JSObject* obj,
-                          Object* receiver) {
-    Object* probe = GetParameterMapArg(parameter_map, key);
+  static MaybeObject* GetImpl(Object* receiver,
+                              JSObject* obj,
+                              uint32_t key,
+                              FixedArray* parameter_map) {
+    Object* probe = GetParameterMapArg(obj, parameter_map, key);
     if (!probe->IsTheHole()) {
       Context* context = Context::cast(parameter_map->get(0));
       int context_index = Smi::cast(probe)->value();
@@ -497,19 +1199,37 @@
     } else {
       // Object is not mapped, defer to the arguments.
       FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-      return ElementsAccessor::ForArray(arguments)->Get(arguments,
-                                                        key,
-                                                        obj,
-                                                        receiver);
+      MaybeObject* maybe_result = ElementsAccessor::ForArray(arguments)->Get(
+          receiver, obj, key, arguments);
+      Object* result;
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+      // Elements of the arguments object in slow mode might be slow aliases.
+      if (result->IsAliasedArgumentsEntry()) {
+        AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(result);
+        Context* context = Context::cast(parameter_map->get(0));
+        int context_index = entry->aliased_context_slot();
+        ASSERT(!context->get(context_index)->IsTheHole());
+        return context->get(context_index);
+      } else {
+        return result;
+      }
     }
   }
 
+  static MaybeObject* SetLengthImpl(JSObject* obj,
+                                    Object* length,
+                                    FixedArray* parameter_map) {
+    // TODO(mstarzinger): This was never implemented but will be used once we
+    // correctly implement [[DefineOwnProperty]] on arrays.
+    UNIMPLEMENTED();
+    return obj;
+  }
+
   virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key
-                              ,
+                              uint32_t key,
                               JSReceiver::DeleteMode mode) {
     FixedArray* parameter_map = FixedArray::cast(obj->elements());
-    Object* probe = GetParameterMapArg(parameter_map, key);
+    Object* probe = GetParameterMapArg(obj, parameter_map, key);
     if (!probe->IsTheHole()) {
       // TODO(kmillikin): We could check if this was the last aliased
       // parameter, and revert to normal elements in that case.  That
@@ -520,41 +1240,57 @@
       if (arguments->IsDictionary()) {
         return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
       } else {
-        return FastElementsAccessor::DeleteCommon(obj, key);
+        return FastObjectElementsAccessor::DeleteCommon(obj, key);
       }
     }
     return obj->GetHeap()->true_value();
   }
 
-  static uint32_t GetCapacity(FixedArray* parameter_map) {
+  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                       uint32_t from_start,
+                                       FixedArrayBase* to,
+                                       ElementsKind to_kind,
+                                       uint32_t to_start,
+                                       int copy_size) {
+    FixedArray* parameter_map = FixedArray::cast(from);
+    FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+    ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
+    return accessor->CopyElements(NULL, from_start, to, to_kind,
+                                  to_start, copy_size, arguments);
+  }
+
+  static uint32_t GetCapacityImpl(FixedArray* parameter_map) {
     FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
     return Max(static_cast<uint32_t>(parameter_map->length() - 2),
                ForArray(arguments)->GetCapacity(arguments));
   }
 
-  static uint32_t GetKeyForIndex(FixedArray* dict,
-                                 uint32_t index) {
+  static uint32_t GetKeyForIndexImpl(FixedArray* dict,
+                                     uint32_t index) {
     return index;
   }
 
-  static bool HasElementAtIndex(FixedArray* parameter_map,
-                                uint32_t index,
-                                JSObject* holder,
-                                Object* receiver) {
-    Object* probe = GetParameterMapArg(parameter_map, index);
+  static bool HasElementImpl(Object* receiver,
+                             JSObject* holder,
+                             uint32_t key,
+                             FixedArray* parameter_map) {
+    Object* probe = GetParameterMapArg(holder, parameter_map, key);
     if (!probe->IsTheHole()) {
       return true;
     } else {
       FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
       ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
-      return !accessor->Get(arguments, index, holder, receiver)->IsTheHole();
+      return !accessor->Get(receiver, holder, key, arguments)->IsTheHole();
     }
   }
 
  private:
-  static Object* GetParameterMapArg(FixedArray* parameter_map,
+  static Object* GetParameterMapArg(JSObject* holder,
+                                    FixedArray* parameter_map,
                                     uint32_t key) {
-    uint32_t length = parameter_map->length();
+    uint32_t length = holder->IsJSArray()
+        ? Smi::cast(JSArray::cast(holder)->length())->value()
+        : parameter_map->length();
     return key < (length - 2 )
         ? parameter_map->get(key + 2)
         : parameter_map->GetHeap()->the_hole_value();
@@ -597,39 +1333,87 @@
 
 void ElementsAccessor::InitializeOncePerProcess() {
   static struct ConcreteElementsAccessors {
-    FastElementsAccessor fast_elements_handler;
-    FastDoubleElementsAccessor fast_double_elements_handler;
-    DictionaryElementsAccessor dictionary_elements_handler;
-    NonStrictArgumentsElementsAccessor non_strict_arguments_elements_handler;
-    ExternalByteElementsAccessor byte_elements_handler;
-    ExternalUnsignedByteElementsAccessor unsigned_byte_elements_handler;
-    ExternalShortElementsAccessor short_elements_handler;
-    ExternalUnsignedShortElementsAccessor unsigned_short_elements_handler;
-    ExternalIntElementsAccessor int_elements_handler;
-    ExternalUnsignedIntElementsAccessor unsigned_int_elements_handler;
-    ExternalFloatElementsAccessor float_elements_handler;
-    ExternalDoubleElementsAccessor double_elements_handler;
-    PixelElementsAccessor pixel_elements_handler;
-  } element_accessors;
-
-  static ElementsAccessor* accessor_array[] = {
-    &element_accessors.fast_elements_handler,
-    &element_accessors.fast_double_elements_handler,
-    &element_accessors.dictionary_elements_handler,
-    &element_accessors.non_strict_arguments_elements_handler,
-    &element_accessors.byte_elements_handler,
-    &element_accessors.unsigned_byte_elements_handler,
-    &element_accessors.short_elements_handler,
-    &element_accessors.unsigned_short_elements_handler,
-    &element_accessors.int_elements_handler,
-    &element_accessors.unsigned_int_elements_handler,
-    &element_accessors.float_elements_handler,
-    &element_accessors.double_elements_handler,
-    &element_accessors.pixel_elements_handler
+#define ACCESSOR_STRUCT(Class, Kind, Store) Class* Kind##_handler;
+    ELEMENTS_LIST(ACCESSOR_STRUCT)
+#undef ACCESSOR_STRUCT
+  } element_accessors = {
+#define ACCESSOR_INIT(Class, Kind, Store) new Class(#Kind),
+    ELEMENTS_LIST(ACCESSOR_INIT)
+#undef ACCESSOR_INIT
   };
 
+  static ElementsAccessor* accessor_array[] = {
+#define ACCESSOR_ARRAY(Class, Kind, Store) element_accessors.Kind##_handler,
+    ELEMENTS_LIST(ACCESSOR_ARRAY)
+#undef ACCESSOR_ARRAY
+  };
+
+  STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
+                kElementsKindCount);
+
   elements_accessors_ = accessor_array;
 }
 
 
+template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
+MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
+                                  ElementsKindTraits>::
+    SetLengthImpl(JSObject* obj,
+                  Object* length,
+                  typename ElementsKindTraits::BackingStore* backing_store) {
+  JSArray* array = JSArray::cast(obj);
+
+  // Fast case: The new length fits into a Smi.
+  MaybeObject* maybe_smi_length = length->ToSmi();
+  Object* smi_length = Smi::FromInt(0);
+  if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
+    const int value = Smi::cast(smi_length)->value();
+    if (value >= 0) {
+      Object* new_length;
+      MaybeObject* result = ElementsAccessorSubclass::
+          SetLengthWithoutNormalize(backing_store, array, smi_length, value);
+      if (!result->ToObject(&new_length)) return result;
+      ASSERT(new_length->IsSmi() || new_length->IsUndefined());
+      if (new_length->IsSmi()) {
+        array->set_length(Smi::cast(new_length));
+        return array;
+      }
+    } else {
+      return ThrowArrayLengthRangeError(array->GetHeap());
+    }
+  }
+
+  // Slow case: The new length does not fit into a Smi or conversion
+  // to slow elements is needed for other reasons.
+  if (length->IsNumber()) {
+    uint32_t value;
+    if (length->ToArrayIndex(&value)) {
+      SeededNumberDictionary* dictionary;
+      MaybeObject* maybe_object = array->NormalizeElements();
+      if (!maybe_object->To(&dictionary)) return maybe_object;
+      Object* new_length;
+      MaybeObject* result = DictionaryElementsAccessor::
+          SetLengthWithoutNormalize(dictionary, array, length, value);
+      if (!result->ToObject(&new_length)) return result;
+      ASSERT(new_length->IsNumber());
+      array->set_length(new_length);
+      return array;
+    } else {
+      return ThrowArrayLengthRangeError(array->GetHeap());
+    }
+  }
+
+  // Fall-back case: The new length is not a number so make the array
+  // size one and set only element to length.
+  FixedArray* new_backing_store;
+  MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1);
+  if (!maybe_obj->To(&new_backing_store)) return maybe_obj;
+  new_backing_store->set(0, length);
+  { MaybeObject* result = array->SetContent(new_backing_store);
+    if (result->IsFailure()) return result;
+  }
+  return array;
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/elements.h b/src/elements.h
index 851c8c3..ff97c08 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,8 @@
 #define V8_ELEMENTS_H_
 
 #include "objects.h"
+#include "heap.h"
+#include "isolate.h"
 
 namespace v8 {
 namespace internal {
@@ -37,21 +39,88 @@
 // ElementsKinds.
 class ElementsAccessor {
  public:
-  ElementsAccessor() { }
+  explicit ElementsAccessor(const char* name) : name_(name) { }
   virtual ~ElementsAccessor() { }
-  virtual MaybeObject* Get(FixedArrayBase* backing_store,
-                           uint32_t key,
-                           JSObject* holder,
-                           Object* receiver) = 0;
 
+  virtual ElementsKind kind() const = 0;
+  const char* name() const { return name_; }
+
+  // Returns true if a holder contains an element with the specified key
+  // without iterating up the prototype chain.  The caller can optionally pass
+  // in the backing store to use for the check, which must be compatible with
+  // the ElementsKind of the ElementsAccessor. If backing_store is NULL, the
+  // holder->elements() is used as the backing store.
+  virtual bool HasElement(Object* receiver,
+                          JSObject* holder,
+                          uint32_t key,
+                          FixedArrayBase* backing_store = NULL) = 0;
+
+  // Returns the element with the specified key or undefined if there is no such
+  // element. This method doesn't iterate up the prototype chain.  The caller
+  // can optionally pass in the backing store to use for the check, which must
+  // be compatible with the ElementsKind of the ElementsAccessor. If
+  // backing_store is NULL, the holder->elements() is used as the backing store.
+  virtual MaybeObject* Get(Object* receiver,
+                           JSObject* holder,
+                           uint32_t key,
+                           FixedArrayBase* backing_store = NULL) = 0;
+
+  // Modifies the length data property as specified for JSArrays and resizes the
+  // underlying backing store accordingly. The method honors the semantics of
+  // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
+  // have non-deletable elements can only be shrunk to the size of highest
+  // element that is non-deletable.
+  virtual MaybeObject* SetLength(JSArray* holder,
+                                 Object* new_length) = 0;
+
+  // Modifies both the length and capacity of a JSArray, resizing the underlying
+  // backing store as necessary. This method does NOT honor the semantics of
+  // EcmaScript 5.1 15.4.5.2, arrays can be shrunk beyond non-deletable
+  // elements. This method should only be called for array expansion OR by
+  // runtime JavaScript code that use InternalArrays and don't care about
+  // EcmaScript 5.1 semantics.
+  virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+                                            int capacity,
+                                            int length) = 0;
+
+  // Deletes an element in an object, returning a new elements backing store.
   virtual MaybeObject* Delete(JSObject* holder,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) = 0;
 
-  virtual MaybeObject* AddElementsToFixedArray(FixedArrayBase* from,
-                                               FixedArray* to,
+  // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
+  // of elements from source after source_start to the destination array.
+  static const int kCopyToEnd = -1;
+  // If kCopyToEndAndInitializeToHole is specified as the copy_size to
+  // CopyElements, it copies all of elements from source after source_start to
+  // destination array, padding any remaining uninitialized elements in the
+  // destination array with the hole.
+  static const int kCopyToEndAndInitializeToHole = -2;
+
+  // Copy elements from one backing store to another. Typically, callers specify
+  // the source JSObject or JSArray in source_holder. If the holder's backing
+  // store is available, it can be passed in source and source_holder is
+  // ignored.
+  virtual MaybeObject* CopyElements(JSObject* source_holder,
+                                    uint32_t source_start,
+                                    FixedArrayBase* destination,
+                                    ElementsKind destination_kind,
+                                    uint32_t destination_start,
+                                    int copy_size,
+                                    FixedArrayBase* source = NULL) = 0;
+
+  MaybeObject* CopyElements(JSObject* from_holder,
+                            FixedArrayBase* to,
+                            ElementsKind to_kind,
+                            FixedArrayBase* from = NULL) {
+    return CopyElements(from_holder, 0, to, to_kind, 0,
+                        kCopyToEndAndInitializeToHole, from);
+  }
+
+  virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
                                                JSObject* holder,
-                                               Object* receiver) = 0;
+                                               FixedArray* to,
+                                               FixedArrayBase* from = NULL) = 0;
 
   // Returns a shared ElementsAccessor for the specified ElementsKind.
   static ElementsAccessor* ForKind(ElementsKind elements_kind) {
@@ -68,28 +137,34 @@
 
   virtual uint32_t GetCapacity(FixedArrayBase* backing_store) = 0;
 
-  virtual bool HasElementAtIndex(FixedArrayBase* backing_store,
-                                 uint32_t index,
-                                 JSObject* holder,
-                                 Object* receiver) = 0;
-
-  // Element handlers distinguish between indexes and keys when the manipulate
+  // Element handlers distinguish between indexes and keys when they manipulate
   // elements.  Indexes refer to elements in terms of their location in the
-  // underlying storage's backing store representation, and are between 0
+  // underlying storage's backing store representation, and are between 0 and
   // GetCapacity.  Keys refer to elements in terms of the value that would be
-  // specific in JavaScript to access the element. In most implementations, keys
-  // are equivalent to indexes, and GetKeyForIndex returns the same value it is
-  // passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps the
-  // index to a key using the KeyAt method on the NumberDictionary.
+  // specified in JavaScript to access the element. In most implementations,
+  // keys are equivalent to indexes, and GetKeyForIndex returns the same value
+  // it is passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps
+  // the index to a key using the KeyAt method on the NumberDictionary.
   virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
                                   uint32_t index) = 0;
 
  private:
   static ElementsAccessor** elements_accessors_;
+  const char* name_;
 
   DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
 };
 
+
+void CopyObjectToObjectElements(FixedArray* from_obj,
+                                ElementsKind from_kind,
+                                uint32_t from_start,
+                                FixedArray* to_obj,
+                                ElementsKind to_kind,
+                                uint32_t to_start,
+                                int copy_size);
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_ELEMENTS_H_
diff --git a/src/execution.cc b/src/execution.cc
index f36d4e4..5618975 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,6 +33,7 @@
 #include "bootstrapper.h"
 #include "codegen.h"
 #include "debug.h"
+#include "isolate-inl.h"
 #include "runtime-profiler.h"
 #include "simulator.h"
 #include "v8threads.h"
@@ -65,13 +66,13 @@
 }
 
 
-static Handle<Object> Invoke(bool construct,
-                             Handle<JSFunction> func,
+static Handle<Object> Invoke(bool is_construct,
+                             Handle<JSFunction> function,
                              Handle<Object> receiver,
                              int argc,
-                             Object*** args,
+                             Handle<Object> args[],
                              bool* has_pending_exception) {
-  Isolate* isolate = func->GetIsolate();
+  Isolate* isolate = function->GetIsolate();
 
   // Entering JavaScript.
   VMState state(isolate, JS);
@@ -79,21 +80,15 @@
   // Placeholder for return value.
   MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
 
-  typedef Object* (*JSEntryFunction)(
-    byte* entry,
-    Object* function,
-    Object* receiver,
-    int argc,
-    Object*** args);
+  typedef Object* (*JSEntryFunction)(byte* entry,
+                                     Object* function,
+                                     Object* receiver,
+                                     int argc,
+                                     Object*** args);
 
-  Handle<Code> code;
-  if (construct) {
-    JSConstructEntryStub stub;
-    code = stub.GetCode();
-  } else {
-    JSEntryStub stub;
-    code = stub.GetCode();
-  }
+  Handle<Code> code = is_construct
+      ? isolate->factory()->js_construct_entry_code()
+      : isolate->factory()->js_entry_code();
 
   // Convert calls on global objects to be calls on the global
   // receiver instead to avoid having a 'this' pointer which refers
@@ -105,21 +100,22 @@
 
   // Make sure that the global object of the context we're about to
   // make the current one is indeed a global object.
-  ASSERT(func->context()->global()->IsGlobalObject());
+  ASSERT(function->context()->global()->IsGlobalObject());
 
   {
     // Save and restore context around invocation and block the
     // allocation of handles without explicit handle scopes.
     SaveContext save(isolate);
     NoHandleAllocation na;
-    JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
+    JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
 
     // Call the function through the right JS entry stub.
-    byte* entry_address = func->code()->entry();
-    JSFunction* function = *func;
-    Object* receiver_pointer = *receiver;
-    value = CALL_GENERATED_CODE(entry, entry_address, function,
-                                receiver_pointer, argc, args);
+    byte* function_entry = function->code()->entry();
+    JSFunction* func = *function;
+    Object* recv = *receiver;
+    Object*** argv = reinterpret_cast<Object***>(args);
+    value =
+        CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
   }
 
 #ifdef DEBUG
@@ -148,9 +144,11 @@
 Handle<Object> Execution::Call(Handle<Object> callable,
                                Handle<Object> receiver,
                                int argc,
-                               Object*** args,
+                               Handle<Object> argv[],
                                bool* pending_exception,
                                bool convert_receiver) {
+  *pending_exception = false;
+
   if (!callable->IsJSFunction()) {
     callable = TryGetFunctionDelegate(callable, pending_exception);
     if (*pending_exception) return callable;
@@ -159,7 +157,7 @@
 
   // In non-strict mode, convert receiver.
   if (convert_receiver && !receiver->IsJSReceiver() &&
-      !func->shared()->native() && !func->shared()->strict_mode()) {
+      !func->shared()->native() && func->shared()->is_classic_mode()) {
     if (receiver->IsUndefined() || receiver->IsNull()) {
       Object* global = func->context()->global()->global_receiver();
       // Under some circumstances, 'global' can be the JSBuiltinsObject
@@ -172,13 +170,15 @@
     if (*pending_exception) return callable;
   }
 
-  return Invoke(false, func, receiver, argc, args, pending_exception);
+  return Invoke(false, func, receiver, argc, argv, pending_exception);
 }
 
 
-Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
-                              Object*** args, bool* pending_exception) {
-  return Invoke(true, func, Isolate::Current()->global(), argc, args,
+Handle<Object> Execution::New(Handle<JSFunction> func,
+                              int argc,
+                              Handle<Object> argv[],
+                              bool* pending_exception) {
+  return Invoke(true, func, Isolate::Current()->global(), argc, argv,
                 pending_exception);
 }
 
@@ -186,7 +186,7 @@
 Handle<Object> Execution::TryCall(Handle<JSFunction> func,
                                   Handle<Object> receiver,
                                   int argc,
-                                  Object*** args,
+                                  Handle<Object> args[],
                                   bool* caught_exception) {
   // Enter a try-block while executing the JavaScript code. To avoid
   // duplicate error printing it must be non-verbose.  Also, to avoid
@@ -195,6 +195,7 @@
   v8::TryCatch catcher;
   catcher.SetVerbose(false);
   catcher.SetCaptureMessage(false);
+  *caught_exception = false;
 
   Handle<Object> result = Invoke(false, func, receiver, argc, args,
                                  caught_exception);
@@ -355,7 +356,7 @@
 
 void StackGuard::SetStackLimit(uintptr_t limit) {
   ExecutionAccess access(isolate_);
-  // If the current limits are special (eg due to a pending interrupt) then
+  // If the current limits are special (e.g. due to a pending interrupt) then
   // leave them alone.
   uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
   if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
@@ -375,9 +376,15 @@
 }
 
 
+bool StackGuard::ShouldPostponeInterrupts() {
+  ExecutionAccess access(isolate_);
+  return should_postpone_interrupts(access);
+}
+
+
 bool StackGuard::IsInterrupted() {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & INTERRUPT;
+  return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
 }
 
 
@@ -403,7 +410,7 @@
 
 bool StackGuard::IsTerminateExecution() {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & TERMINATE;
+  return (thread_local_.interrupt_flags_ & TERMINATE) != 0;
 }
 
 
@@ -416,7 +423,7 @@
 
 bool StackGuard::IsRuntimeProfilerTick() {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
+  return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
 }
 
 
@@ -433,6 +440,22 @@
 }
 
 
+bool StackGuard::IsGCRequest() {
+  ExecutionAccess access(isolate_);
+  return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
+}
+
+
+void StackGuard::RequestGC() {
+  ExecutionAccess access(isolate_);
+  thread_local_.interrupt_flags_ |= GC_REQUEST;
+  if (thread_local_.postpone_interrupts_nesting_ == 0) {
+    thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+    isolate_->heap()->SetStackLimits();
+  }
+}
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 bool StackGuard::IsDebugBreak() {
   ExecutionAccess access(isolate_);
@@ -555,14 +578,15 @@
 
 // --- C a l l s   t o   n a t i v e s ---
 
-#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception)            \
-  do {                                                                         \
-    Isolate* isolate = Isolate::Current();                                     \
-    Object** args[argc] = argv;                                                \
-    ASSERT(has_pending_exception != NULL);                                     \
-    return Call(isolate->name##_fun(),                                         \
-                isolate->js_builtins_object(), argc, args,                     \
-                has_pending_exception);                                        \
+#define RETURN_NATIVE_CALL(name, args, has_pending_exception)           \
+  do {                                                                  \
+    Isolate* isolate = Isolate::Current();                              \
+    Handle<Object> argv[] = args;                                       \
+    ASSERT(has_pending_exception != NULL);                              \
+    return Call(isolate->name##_fun(),                                  \
+                isolate->js_builtins_object(),                          \
+                ARRAY_SIZE(argv), argv,                                 \
+                has_pending_exception);                                 \
   } while (false)
 
 
@@ -583,44 +607,44 @@
 
 
 Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_number, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_string, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
   if (obj->IsSpecObject()) return obj;
-  RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_object, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_integer, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_int32, { obj }, exc);
 }
 
 
 Handle<Object> Execution::NewDate(double time, bool* exc) {
   Handle<Object> time_obj = FACTORY->NewNumber(time);
-  RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
+  RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
 }
 
 
@@ -657,7 +681,7 @@
 
   bool caught_exception;
   Handle<Object> index_object = factory->NewNumberFromInt(int_index);
-  Object** index_arg[] = { index_object.location() };
+  Handle<Object> index_arg[] = { index_object };
   Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
                                   string,
                                   ARRAY_SIZE(index_arg),
@@ -671,7 +695,8 @@
 
 
 Handle<JSFunction> Execution::InstantiateFunction(
-    Handle<FunctionTemplateInfo> data, bool* exc) {
+    Handle<FunctionTemplateInfo> data,
+    bool* exc) {
   Isolate* isolate = data->GetIsolate();
   // Fast case: see if the function has already been instantiated
   int serial_number = Smi::cast(data->serial_number())->value();
@@ -680,10 +705,12 @@
           GetElementNoExceptionThrown(serial_number);
   if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
   // The function has not yet been instantiated in this context; do it.
-  Object** args[1] = { Handle<Object>::cast(data).location() };
-  Handle<Object> result =
-      Call(isolate->instantiate_fun(),
-           isolate->js_builtins_object(), 1, args, exc);
+  Handle<Object> args[] = { data };
+  Handle<Object> result = Call(isolate->instantiate_fun(),
+                               isolate->js_builtins_object(),
+                               ARRAY_SIZE(args),
+                               args,
+                               exc);
   if (*exc) return Handle<JSFunction>::null();
   return Handle<JSFunction>::cast(result);
 }
@@ -710,10 +737,12 @@
     ASSERT(!*exc);
     return Handle<JSObject>(JSObject::cast(result));
   } else {
-    Object** args[1] = { Handle<Object>::cast(data).location() };
-    Handle<Object> result =
-        Call(isolate->instantiate_fun(),
-             isolate->js_builtins_object(), 1, args, exc);
+    Handle<Object> args[] = { data };
+    Handle<Object> result = Call(isolate->instantiate_fun(),
+                                 isolate->js_builtins_object(),
+                                 ARRAY_SIZE(args),
+                                 args,
+                                 exc);
     if (*exc) return Handle<JSObject>::null();
     return Handle<JSObject>::cast(result);
   }
@@ -724,9 +753,12 @@
                                   Handle<Object> instance_template,
                                   bool* exc) {
   Isolate* isolate = Isolate::Current();
-  Object** args[2] = { instance.location(), instance_template.location() };
+  Handle<Object> args[] = { instance, instance_template };
   Execution::Call(isolate->configure_instance_fun(),
-                  isolate->js_builtins_object(), 2, args, exc);
+                  isolate->js_builtins_object(),
+                  ARRAY_SIZE(args),
+                  args,
+                  exc);
 }
 
 
@@ -735,16 +767,13 @@
                                             Handle<Object> pos,
                                             Handle<Object> is_global) {
   Isolate* isolate = fun->GetIsolate();
-  const int argc = 4;
-  Object** args[argc] = { recv.location(),
-                          Handle<Object>::cast(fun).location(),
-                          pos.location(),
-                          is_global.location() };
-  bool caught_exception = false;
-  Handle<Object> result =
-      TryCall(isolate->get_stack_trace_line_fun(),
-              isolate->js_builtins_object(), argc, args,
-              &caught_exception);
+  Handle<Object> args[] = { recv, fun, pos, is_global };
+  bool caught_exception;
+  Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(),
+                                  isolate->js_builtins_object(),
+                                  ARRAY_SIZE(args),
+                                  args,
+                                  &caught_exception);
   if (caught_exception || !result->IsString()) {
       return isolate->factory()->empty_symbol();
   }
@@ -797,6 +826,11 @@
     return isolate->heap()->undefined_value();
   }
 
+  StackLimitCheck check(isolate);
+  if (check.HasOverflowed()) {
+    return isolate->heap()->undefined_value();
+  }
+
   {
     JavaScriptFrameIterator it(isolate);
     ASSERT(!it.done());
@@ -822,17 +856,22 @@
   // Clear the debug break request flag.
   isolate->stack_guard()->Continue(DEBUGBREAK);
 
-  ProcessDebugMesssages(debug_command_only);
+  ProcessDebugMessages(debug_command_only);
 
   // Return to continue execution.
   return isolate->heap()->undefined_value();
 }
 
-void Execution::ProcessDebugMesssages(bool debug_command_only) {
+void Execution::ProcessDebugMessages(bool debug_command_only) {
   Isolate* isolate = Isolate::Current();
   // Clear the debug command request flag.
   isolate->stack_guard()->Continue(DEBUGCOMMAND);
 
+  StackLimitCheck check(isolate);
+  if (check.HasOverflowed()) {
+    return;
+  }
+
   HandleScope scope(isolate);
   // Enter the debugger. Just continue if we fail to enter the debugger.
   EnterDebugger debugger;
@@ -849,11 +888,22 @@
 
 #endif
 
-MaybeObject* Execution::HandleStackGuardInterrupt() {
-  Isolate* isolate = Isolate::Current();
+MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
   StackGuard* stack_guard = isolate->stack_guard();
+  if (stack_guard->ShouldPostponeInterrupts()) {
+    return isolate->heap()->undefined_value();
+  }
+
+  if (stack_guard->IsGCRequest()) {
+    isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+                                       "StackGuard GC request");
+    stack_guard->Continue(GC_REQUEST);
+  }
+
   isolate->counters()->stack_interrupts()->Increment();
-  if (stack_guard->IsRuntimeProfilerTick()) {
+  // If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
+  if (FLAG_count_based_interrupts ||
+      stack_guard->IsRuntimeProfilerTick()) {
     isolate->counters()->runtime_profiler_ticks()->Increment();
     stack_guard->Continue(RUNTIME_PROFILER_TICK);
     isolate->runtime_profiler()->OptimizeNow();
@@ -875,4 +925,5 @@
   return isolate->heap()->undefined_value();
 }
 
+
 } }  // namespace v8::internal
diff --git a/src/execution.h b/src/execution.h
index 5cd7141..01e4b9d 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,9 +41,14 @@
   DEBUGCOMMAND = 1 << 2,
   PREEMPT = 1 << 3,
   TERMINATE = 1 << 4,
-  RUNTIME_PROFILER_TICK = 1 << 5
+  RUNTIME_PROFILER_TICK = 1 << 5,
+  GC_REQUEST = 1 << 6
 };
 
+
+class Isolate;
+
+
 class Execution : public AllStatic {
  public:
   // Call a function, the caller supplies a receiver and an array
@@ -60,7 +65,7 @@
   static Handle<Object> Call(Handle<Object> callable,
                              Handle<Object> receiver,
                              int argc,
-                             Object*** args,
+                             Handle<Object> argv[],
                              bool* pending_exception,
                              bool convert_receiver = false);
 
@@ -73,7 +78,7 @@
   //
   static Handle<Object> New(Handle<JSFunction> func,
                             int argc,
-                            Object*** args,
+                            Handle<Object> argv[],
                             bool* pending_exception);
 
   // Call a function, just like Call(), but make sure to silently catch
@@ -83,7 +88,7 @@
   static Handle<Object> TryCall(Handle<JSFunction> func,
                                 Handle<Object> receiver,
                                 int argc,
-                                Object*** args,
+                                Handle<Object> argv[],
                                 bool* caught_exception);
 
   // ECMA-262 9.2
@@ -135,12 +140,13 @@
                                           Handle<Object> is_global);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   static Object* DebugBreakHelper();
-  static void ProcessDebugMesssages(bool debug_command_only);
+  static void ProcessDebugMessages(bool debug_command_only);
 #endif
 
   // If the stack guard is triggered, but it is not an actual
   // stack overflow, then handle the interruption accordingly.
-  MUST_USE_RESULT static MaybeObject* HandleStackGuardInterrupt();
+  MUST_USE_RESULT static MaybeObject* HandleStackGuardInterrupt(
+      Isolate* isolate);
 
   // Get a function delegate (or undefined) for the given non-function
   // object. Used for support calling objects as functions.
@@ -157,7 +163,6 @@
 
 
 class ExecutionAccess;
-class Isolate;
 
 
 // StackGuard contains the handling of the limits that are used to limit the
@@ -196,6 +201,8 @@
   bool IsDebugCommand();
   void DebugCommand();
 #endif
+  bool IsGCRequest();
+  void RequestGC();
   void Continue(InterruptFlag after_what);
 
   // This provides an asynchronous read of the stack limits for the current
@@ -219,6 +226,7 @@
   Address address_of_real_jslimit() {
     return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
   }
+  bool ShouldPostponeInterrupts();
 
  private:
   StackGuard();
diff --git a/src/extensions/experimental/break-iterator.cc b/src/extensions/experimental/break-iterator.cc
deleted file mode 100644
index e695a3e..0000000
--- a/src/extensions/experimental/break-iterator.cc
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/break-iterator.h"
-
-#include <string.h>
-
-#include "unicode/brkiter.h"
-#include "unicode/locid.h"
-#include "unicode/rbbi.h"
-
-namespace v8 {
-namespace internal {
-
-v8::Persistent<v8::FunctionTemplate> BreakIterator::break_iterator_template_;
-
-icu::BreakIterator* BreakIterator::UnpackBreakIterator(
-    v8::Handle<v8::Object> obj) {
-  if (break_iterator_template_->HasInstance(obj)) {
-    return static_cast<icu::BreakIterator*>(
-        obj->GetPointerFromInternalField(0));
-  }
-
-  return NULL;
-}
-
-icu::UnicodeString* BreakIterator::ResetAdoptedText(
-    v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
-  // Get the previous value from the internal field.
-  icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
-      obj->GetPointerFromInternalField(1));
-  delete text;
-
-  // Assign new value to the internal pointer.
-  v8::String::Value text_value(value);
-  text = new icu::UnicodeString(
-      reinterpret_cast<const UChar*>(*text_value), text_value.length());
-  obj->SetPointerInInternalField(1, text);
-
-  // Return new unicode string pointer.
-  return text;
-}
-
-void BreakIterator::DeleteBreakIterator(v8::Persistent<v8::Value> object,
-                                        void* param) {
-  v8::Persistent<v8::Object> persistent_object =
-      v8::Persistent<v8::Object>::Cast(object);
-
-  // First delete the hidden C++ object.
-  // Unpacking should never return NULL here. That would only happen if
-  // this method is used as the weak callback for persistent handles not
-  // pointing to a break iterator.
-  delete UnpackBreakIterator(persistent_object);
-
-  delete static_cast<icu::UnicodeString*>(
-      persistent_object->GetPointerFromInternalField(1));
-
-  // Then dispose of the persistent handle to JS object.
-  persistent_object.Dispose();
-}
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
-  // Returns undefined, and schedules an exception to be thrown.
-  return v8::ThrowException(v8::Exception::Error(
-      v8::String::New("BreakIterator method called on an object "
-                      "that is not a BreakIterator.")));
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorAdoptText(
-    const v8::Arguments& args) {
-  if (args.Length() != 1 || !args[0]->IsString()) {
-    return v8::ThrowException(v8::Exception::SyntaxError(
-        v8::String::New("Text input is required.")));
-  }
-
-  icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
-  if (!break_iterator) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  break_iterator->setText(*ResetAdoptedText(args.Holder(), args[0]));
-
-  return v8::Undefined();
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorFirst(
-    const v8::Arguments& args) {
-  icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
-  if (!break_iterator) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  return v8::Int32::New(break_iterator->first());
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorNext(
-    const v8::Arguments& args) {
-  icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
-  if (!break_iterator) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  return v8::Int32::New(break_iterator->next());
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorCurrent(
-    const v8::Arguments& args) {
-  icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
-  if (!break_iterator) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  return v8::Int32::New(break_iterator->current());
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorBreakType(
-    const v8::Arguments& args) {
-  icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
-  if (!break_iterator) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
-  icu::RuleBasedBreakIterator* rule_based_iterator =
-      static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
-  int32_t status = rule_based_iterator->getRuleStatus();
-  // Keep return values in sync with JavaScript BreakType enum.
-  if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
-    return v8::Int32::New(UBRK_WORD_NONE);
-  } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
-    return v8::Int32::New(UBRK_WORD_NUMBER);
-  } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
-    return v8::Int32::New(UBRK_WORD_LETTER);
-  } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
-    return v8::Int32::New(UBRK_WORD_KANA);
-  } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
-    return v8::Int32::New(UBRK_WORD_IDEO);
-  } else {
-    return v8::Int32::New(-1);
-  }
-}
-
-v8::Handle<v8::Value> BreakIterator::JSBreakIterator(
-    const v8::Arguments& args) {
-  v8::HandleScope handle_scope;
-
-  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
-    return v8::ThrowException(v8::Exception::SyntaxError(
-        v8::String::New("Locale and iterator type are required.")));
-  }
-
-  v8::String::Utf8Value locale(args[0]);
-  icu::Locale icu_locale(*locale);
-
-  UErrorCode status = U_ZERO_ERROR;
-  icu::BreakIterator* break_iterator = NULL;
-  v8::String::Utf8Value type(args[1]);
-  if (!strcmp(*type, "character")) {
-    break_iterator =
-        icu::BreakIterator::createCharacterInstance(icu_locale, status);
-  } else if (!strcmp(*type, "word")) {
-    break_iterator =
-        icu::BreakIterator::createWordInstance(icu_locale, status);
-  } else if (!strcmp(*type, "sentence")) {
-    break_iterator =
-        icu::BreakIterator::createSentenceInstance(icu_locale, status);
-  } else if (!strcmp(*type, "line")) {
-    break_iterator =
-        icu::BreakIterator::createLineInstance(icu_locale, status);
-  } else {
-    return v8::ThrowException(v8::Exception::SyntaxError(
-        v8::String::New("Invalid iterator type.")));
-  }
-
-  if (U_FAILURE(status)) {
-    delete break_iterator;
-    return v8::ThrowException(v8::Exception::Error(
-        v8::String::New("Failed to create break iterator.")));
-  }
-
-  if (break_iterator_template_.IsEmpty()) {
-    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
-
-    raw_template->SetClassName(v8::String::New("v8Locale.v8BreakIterator"));
-
-    // Define internal field count on instance template.
-    v8::Local<v8::ObjectTemplate> object_template =
-        raw_template->InstanceTemplate();
-
-    // Set aside internal fields for icu break iterator and adopted text.
-    object_template->SetInternalFieldCount(2);
-
-    // Define all of the prototype methods on prototype template.
-    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
-    proto->Set(v8::String::New("adoptText"),
-               v8::FunctionTemplate::New(BreakIteratorAdoptText));
-    proto->Set(v8::String::New("first"),
-               v8::FunctionTemplate::New(BreakIteratorFirst));
-    proto->Set(v8::String::New("next"),
-               v8::FunctionTemplate::New(BreakIteratorNext));
-    proto->Set(v8::String::New("current"),
-               v8::FunctionTemplate::New(BreakIteratorCurrent));
-    proto->Set(v8::String::New("breakType"),
-               v8::FunctionTemplate::New(BreakIteratorBreakType));
-
-    break_iterator_template_ =
-        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
-  }
-
-  // Create an empty object wrapper.
-  v8::Local<v8::Object> local_object =
-      break_iterator_template_->GetFunction()->NewInstance();
-  v8::Persistent<v8::Object> wrapper =
-      v8::Persistent<v8::Object>::New(local_object);
-
-  // Set break iterator as internal field of the resulting JS object.
-  wrapper->SetPointerInInternalField(0, break_iterator);
-  // Make sure that the pointer to adopted text is NULL.
-  wrapper->SetPointerInInternalField(1, NULL);
-
-  // Make object handle weak so we can delete iterator once GC kicks in.
-  wrapper.MakeWeak(NULL, DeleteBreakIterator);
-
-  return wrapper;
-}
-
-} }  // namespace v8::internal
diff --git a/src/extensions/experimental/break-iterator.h b/src/extensions/experimental/break-iterator.h
deleted file mode 100644
index 73b9bbd..0000000
--- a/src/extensions/experimental/break-iterator.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
-
-#include "include/v8.h"
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class BreakIterator;
-class UnicodeString;
-}
-
-namespace v8 {
-namespace internal {
-
-class BreakIterator {
- public:
-  static v8::Handle<v8::Value> JSBreakIterator(const v8::Arguments& args);
-
-  // Helper methods for various bindings.
-
-  // Unpacks break iterator object from corresponding JavaScript object.
-  static icu::BreakIterator* UnpackBreakIterator(v8::Handle<v8::Object> obj);
-
-  // Deletes the old value and sets the adopted text in
-  // corresponding JavaScript object.
-  static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object> obj,
-                                              v8::Handle<v8::Value> text_value);
-
-  // Release memory we allocated for the BreakIterator once the JS object that
-  // holds the pointer gets garbage collected.
-  static void DeleteBreakIterator(v8::Persistent<v8::Value> object,
-                                  void* param);
-
-  // Assigns new text to the iterator.
-  static v8::Handle<v8::Value> BreakIteratorAdoptText(
-      const v8::Arguments& args);
-
-  // Moves iterator to the beginning of the string and returns new position.
-  static v8::Handle<v8::Value> BreakIteratorFirst(const v8::Arguments& args);
-
-  // Moves iterator to the next position and returns it.
-  static v8::Handle<v8::Value> BreakIteratorNext(const v8::Arguments& args);
-
-  // Returns current iterator's current position.
-  static v8::Handle<v8::Value> BreakIteratorCurrent(
-      const v8::Arguments& args);
-
-  // Returns type of the item from current position.
-  // This call is only valid for word break iterators. Others just return 0.
-  static v8::Handle<v8::Value> BreakIteratorBreakType(
-      const v8::Arguments& args);
-
- private:
-  BreakIterator() {}
-
-  static v8::Persistent<v8::FunctionTemplate> break_iterator_template_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
diff --git a/src/extensions/experimental/collator.cc b/src/extensions/experimental/collator.cc
deleted file mode 100644
index 5cf2192..0000000
--- a/src/extensions/experimental/collator.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/collator.h"
-
-#include "unicode/coll.h"
-#include "unicode/locid.h"
-#include "unicode/ucol.h"
-
-namespace v8 {
-namespace internal {
-
-v8::Persistent<v8::FunctionTemplate> Collator::collator_template_;
-
-icu::Collator* Collator::UnpackCollator(v8::Handle<v8::Object> obj) {
-  if (collator_template_->HasInstance(obj)) {
-    return static_cast<icu::Collator*>(obj->GetPointerFromInternalField(0));
-  }
-
-  return NULL;
-}
-
-void Collator::DeleteCollator(v8::Persistent<v8::Value> object, void* param) {
-  v8::Persistent<v8::Object> persistent_object =
-      v8::Persistent<v8::Object>::Cast(object);
-
-  // First delete the hidden C++ object.
-  // Unpacking should never return NULL here. That would only happen if
-  // this method is used as the weak callback for persistent handles not
-  // pointing to a collator.
-  delete UnpackCollator(persistent_object);
-
-  // Then dispose of the persistent handle to JS object.
-  persistent_object.Dispose();
-}
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
-  // Returns undefined, and schedules an exception to be thrown.
-  return v8::ThrowException(v8::Exception::Error(
-      v8::String::New("Collator method called on an object "
-                      "that is not a Collator.")));
-}
-
-// Extract a boolean option named in |option| and set it to |result|.
-// Return true if it's specified. Otherwise, return false.
-static bool ExtractBooleanOption(const v8::Local<v8::Object>& options,
-                                 const char* option,
-                                 bool* result) {
-  v8::HandleScope handle_scope;
-  v8::TryCatch try_catch;
-  v8::Handle<v8::Value> value = options->Get(v8::String::New(option));
-  if (try_catch.HasCaught()) {
-    return false;
-  }
-  // No need to check if |value| is empty because it's taken care of
-  // by TryCatch above.
-  if (!value->IsUndefined() && !value->IsNull()) {
-    if (value->IsBoolean()) {
-      *result = value->BooleanValue();
-      return true;
-    }
-  }
-  return false;
-}
-
-// When there's an ICU error, throw a JavaScript error with |message|.
-static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
-  return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
-}
-
-v8::Handle<v8::Value> Collator::CollatorCompare(const v8::Arguments& args) {
-  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
-    return v8::ThrowException(v8::Exception::SyntaxError(
-        v8::String::New("Two string arguments are required.")));
-  }
-
-  icu::Collator* collator = UnpackCollator(args.Holder());
-  if (!collator) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  v8::String::Value string_value1(args[0]);
-  v8::String::Value string_value2(args[1]);
-  const UChar* string1 = reinterpret_cast<const UChar*>(*string_value1);
-  const UChar* string2 = reinterpret_cast<const UChar*>(*string_value2);
-  UErrorCode status = U_ZERO_ERROR;
-  UCollationResult result = collator->compare(
-      string1, string_value1.length(), string2, string_value2.length(), status);
-
-  if (U_FAILURE(status)) {
-    return ThrowExceptionForICUError(
-        "Unexpected failure in Collator.compare.");
-  }
-
-  return v8::Int32::New(result);
-}
-
-v8::Handle<v8::Value> Collator::JSCollator(const v8::Arguments& args) {
-  v8::HandleScope handle_scope;
-
-  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
-    return v8::ThrowException(v8::Exception::SyntaxError(
-        v8::String::New("Locale and collation options are required.")));
-  }
-
-  v8::String::AsciiValue locale(args[0]);
-  icu::Locale icu_locale(*locale);
-
-  icu::Collator* collator = NULL;
-  UErrorCode status = U_ZERO_ERROR;
-  collator = icu::Collator::createInstance(icu_locale, status);
-
-  if (U_FAILURE(status)) {
-    delete collator;
-    return ThrowExceptionForICUError("Failed to create collator.");
-  }
-
-  v8::Local<v8::Object> options(args[1]->ToObject());
-
-  // Below, we change collation options that are explicitly specified
-  // by a caller in JavaScript. Otherwise, we don't touch because
-  // we don't want to change the locale-dependent default value.
-  // The three options below are very likely to have the same default
-  // across locales, but I haven't checked them all. Others we may add
-  // in the future have certainly locale-dependent default (e.g.
-  // caseFirst is upperFirst for Danish while is off for most other locales).
-
-  bool ignore_case, ignore_accents, numeric;
-
-  if (ExtractBooleanOption(options, "ignoreCase", &ignore_case)) {
-    // We need to explicitly set the level to secondary to get case ignored.
-    // The default L3 ignores UCOL_CASE_LEVEL == UCOL_OFF !
-    if (ignore_case) {
-      collator->setStrength(icu::Collator::SECONDARY);
-    }
-    collator->setAttribute(UCOL_CASE_LEVEL, ignore_case ? UCOL_OFF : UCOL_ON,
-                           status);
-    if (U_FAILURE(status)) {
-      delete collator;
-      return ThrowExceptionForICUError("Failed to set ignoreCase.");
-    }
-  }
-
-  // Accents are taken into account with strength secondary or higher.
-  if (ExtractBooleanOption(options, "ignoreAccents", &ignore_accents)) {
-    if (!ignore_accents) {
-      collator->setStrength(icu::Collator::SECONDARY);
-    } else {
-      collator->setStrength(icu::Collator::PRIMARY);
-    }
-  }
-
-  if (ExtractBooleanOption(options, "numeric", &numeric)) {
-    collator->setAttribute(UCOL_NUMERIC_COLLATION,
-                           numeric ? UCOL_ON : UCOL_OFF, status);
-    if (U_FAILURE(status)) {
-      delete collator;
-      return ThrowExceptionForICUError("Failed to set numeric sort option.");
-    }
-  }
-
-  if (collator_template_.IsEmpty()) {
-    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
-    raw_template->SetClassName(v8::String::New("v8Locale.Collator"));
-
-    // Define internal field count on instance template.
-    v8::Local<v8::ObjectTemplate> object_template =
-        raw_template->InstanceTemplate();
-
-    // Set aside internal fields for icu collator.
-    object_template->SetInternalFieldCount(1);
-
-    // Define all of the prototype methods on prototype template.
-    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
-    proto->Set(v8::String::New("compare"),
-               v8::FunctionTemplate::New(CollatorCompare));
-
-    collator_template_ =
-        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
-  }
-
-  // Create an empty object wrapper.
-  v8::Local<v8::Object> local_object =
-      collator_template_->GetFunction()->NewInstance();
-  v8::Persistent<v8::Object> wrapper =
-      v8::Persistent<v8::Object>::New(local_object);
-
-  // Set collator as internal field of the resulting JS object.
-  wrapper->SetPointerInInternalField(0, collator);
-
-  // Make object handle weak so we can delete iterator once GC kicks in.
-  wrapper.MakeWeak(NULL, DeleteCollator);
-
-  return wrapper;
-}
-
-} }  // namespace v8::internal
diff --git a/src/extensions/experimental/collator.h b/src/extensions/experimental/collator.h
deleted file mode 100644
index ca7e4dc..0000000
--- a/src/extensions/experimental/collator.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H
-#define V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H_
-
-#include "include/v8.h"
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class Collator;
-class UnicodeString;
-}
-
-namespace v8 {
-namespace internal {
-
-class Collator {
- public:
-  static v8::Handle<v8::Value> JSCollator(const v8::Arguments& args);
-
-  // Helper methods for various bindings.
-
-  // Unpacks collator object from corresponding JavaScript object.
-  static icu::Collator* UnpackCollator(v8::Handle<v8::Object> obj);
-
-  // Release memory we allocated for the Collator once the JS object that
-  // holds the pointer gets garbage collected.
-  static void DeleteCollator(v8::Persistent<v8::Value> object, void* param);
-
-  // Compare two strings and returns -1, 0 and 1 depending on
-  // whether string1 is smaller than, equal to or larger than string2.
-  static v8::Handle<v8::Value> CollatorCompare(const v8::Arguments& args);
-
- private:
-  Collator() {}
-
-  static v8::Persistent<v8::FunctionTemplate> collator_template_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_COLLATOR
diff --git a/src/extensions/experimental/datetime-format.cc b/src/extensions/experimental/datetime-format.cc
deleted file mode 100644
index 94a29ac..0000000
--- a/src/extensions/experimental/datetime-format.cc
+++ /dev/null
@@ -1,384 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/datetime-format.h"
-
-#include <string.h>
-
-#include "src/extensions/experimental/i18n-utils.h"
-#include "unicode/dtfmtsym.h"
-#include "unicode/dtptngen.h"
-#include "unicode/locid.h"
-#include "unicode/smpdtfmt.h"
-
-namespace v8 {
-namespace internal {
-
-v8::Persistent<v8::FunctionTemplate> DateTimeFormat::datetime_format_template_;
-
-static icu::DateFormat* CreateDateTimeFormat(v8::Handle<v8::String>,
-                                             v8::Handle<v8::Object>);
-static v8::Handle<v8::Value> GetSymbols(
-    const v8::Arguments&,
-    const icu::UnicodeString*, int32_t,
-    const icu::UnicodeString*, int32_t,
-    const icu::UnicodeString*, int32_t);
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
-static icu::DateFormat::EStyle GetDateTimeStyle(const icu::UnicodeString&);
-
-icu::SimpleDateFormat* DateTimeFormat::UnpackDateTimeFormat(
-    v8::Handle<v8::Object> obj) {
-  if (datetime_format_template_->HasInstance(obj)) {
-    return static_cast<icu::SimpleDateFormat*>(
-        obj->GetPointerFromInternalField(0));
-  }
-
-  return NULL;
-}
-
-void DateTimeFormat::DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
-                                          void* param) {
-  v8::Persistent<v8::Object> persistent_object =
-      v8::Persistent<v8::Object>::Cast(object);
-
-  // First delete the hidden C++ object.
-  // Unpacking should never return NULL here. That would only happen if
-  // this method is used as the weak callback for persistent handles not
-  // pointing to a date time formatter.
-  delete UnpackDateTimeFormat(persistent_object);
-
-  // Then dispose of the persistent handle to JS object.
-  persistent_object.Dispose();
-}
-
-v8::Handle<v8::Value> DateTimeFormat::Format(const v8::Arguments& args) {
-  v8::HandleScope handle_scope;
-
-  double millis = 0.0;
-  if (args.Length() != 1 || !args[0]->IsDate()) {
-    // Create a new date.
-    v8::TryCatch try_catch;
-    v8::Local<v8::Script> date_script =
-        v8::Script::Compile(v8::String::New("eval('new Date()')"));
-    millis = date_script->Run()->NumberValue();
-    if (try_catch.HasCaught()) {
-      return try_catch.ReThrow();
-    }
-  } else {
-    millis = v8::Date::Cast(*args[0])->NumberValue();
-  }
-
-  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
-  if (!date_format) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  icu::UnicodeString result;
-  date_format->format(millis, result);
-
-  return v8::String::New(
-      reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
-}
-
-v8::Handle<v8::Value> DateTimeFormat::GetMonths(const v8::Arguments& args) {
-  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
-  if (!date_format) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
-
-  int32_t narrow_count;
-  const icu::UnicodeString* narrow = symbols->getMonths(
-      narrow_count,
-      icu::DateFormatSymbols::STANDALONE,
-      icu::DateFormatSymbols::NARROW);
-  int32_t abbrev_count;
-  const icu::UnicodeString* abbrev = symbols->getMonths(
-      abbrev_count,
-      icu::DateFormatSymbols::STANDALONE,
-      icu::DateFormatSymbols::ABBREVIATED);
-  int32_t wide_count;
-  const icu::UnicodeString* wide = symbols->getMonths(
-      wide_count,
-      icu::DateFormatSymbols::STANDALONE,
-      icu::DateFormatSymbols::WIDE);
-
-  return GetSymbols(
-      args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
-}
-
-v8::Handle<v8::Value> DateTimeFormat::GetWeekdays(const v8::Arguments& args) {
-  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
-  if (!date_format) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
-
-  int32_t narrow_count;
-  const icu::UnicodeString* narrow = symbols->getWeekdays(
-      narrow_count,
-      icu::DateFormatSymbols::STANDALONE,
-      icu::DateFormatSymbols::NARROW);
-  int32_t abbrev_count;
-  const icu::UnicodeString* abbrev = symbols->getWeekdays(
-      abbrev_count,
-      icu::DateFormatSymbols::STANDALONE,
-      icu::DateFormatSymbols::ABBREVIATED);
-  int32_t wide_count;
-  const icu::UnicodeString* wide = symbols->getWeekdays(
-      wide_count,
-      icu::DateFormatSymbols::STANDALONE,
-      icu::DateFormatSymbols::WIDE);
-
-  // getXXXWeekdays always returns 8 elements - ICU stable API.
-  // We can't use ASSERT_EQ(8, narrow_count) because ASSERT is internal to v8.
-  if (narrow_count != 8 || abbrev_count != 8 || wide_count != 8) {
-    return v8::ThrowException(v8::Exception::Error(
-        v8::String::New("Failed to get weekday information.")));
-  }
-
-  // ICU documentation says we should ignore element 0 of the returned array.
-  return GetSymbols(args, narrow + 1, narrow_count - 1, abbrev + 1,
-                    abbrev_count -1 , wide + 1, wide_count - 1);
-}
-
-v8::Handle<v8::Value> DateTimeFormat::GetEras(const v8::Arguments& args) {
-  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
-  if (!date_format) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
-
-  int32_t narrow_count;
-  const icu::UnicodeString* narrow = symbols->getNarrowEras(narrow_count);
-  int32_t abbrev_count;
-  const icu::UnicodeString* abbrev = symbols->getEras(abbrev_count);
-  int32_t wide_count;
-  const icu::UnicodeString* wide = symbols->getEraNames(wide_count);
-
-  return GetSymbols(
-      args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
-}
-
-v8::Handle<v8::Value> DateTimeFormat::GetAmPm(const v8::Arguments& args) {
-  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
-  if (!date_format) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
-
-  // In this case narrow == abbreviated == wide
-  int32_t count;
-  const icu::UnicodeString* wide = symbols->getAmPmStrings(count);
-
-  return GetSymbols(args, wide, count, wide, count, wide, count);
-}
-
-v8::Handle<v8::Value> DateTimeFormat::JSDateTimeFormat(
-    const v8::Arguments& args) {
-  v8::HandleScope handle_scope;
-
-  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
-    return v8::ThrowException(v8::Exception::SyntaxError(
-        v8::String::New("Locale and date/time options are required.")));
-  }
-
-  icu::SimpleDateFormat* date_format = static_cast<icu::SimpleDateFormat*>(
-      CreateDateTimeFormat(args[0]->ToString(), args[1]->ToObject()));
-
-  if (datetime_format_template_.IsEmpty()) {
-    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
-
-    raw_template->SetClassName(v8::String::New("v8Locale.DateTimeFormat"));
-
-    // Define internal field count on instance template.
-    v8::Local<v8::ObjectTemplate> object_template =
-        raw_template->InstanceTemplate();
-
-    // Set aside internal field for icu date time formatter.
-    object_template->SetInternalFieldCount(1);
-
-    // Define all of the prototype methods on prototype template.
-    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
-    proto->Set(v8::String::New("format"),
-               v8::FunctionTemplate::New(Format));
-    proto->Set(v8::String::New("getMonths"),
-               v8::FunctionTemplate::New(GetMonths));
-    proto->Set(v8::String::New("getWeekdays"),
-               v8::FunctionTemplate::New(GetWeekdays));
-    proto->Set(v8::String::New("getEras"),
-               v8::FunctionTemplate::New(GetEras));
-    proto->Set(v8::String::New("getAmPm"),
-               v8::FunctionTemplate::New(GetAmPm));
-
-    datetime_format_template_ =
-        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
-  }
-
-  // Create an empty object wrapper.
-  v8::Local<v8::Object> local_object =
-      datetime_format_template_->GetFunction()->NewInstance();
-  v8::Persistent<v8::Object> wrapper =
-      v8::Persistent<v8::Object>::New(local_object);
-
-  // Set date time formatter as internal field of the resulting JS object.
-  wrapper->SetPointerInInternalField(0, date_format);
-
-  // Set resolved pattern in options.pattern.
-  icu::UnicodeString pattern;
-  date_format->toPattern(pattern);
-  v8::Local<v8::Object> options = v8::Object::New();
-  options->Set(v8::String::New("pattern"),
-               v8::String::New(reinterpret_cast<const uint16_t*>(
-                   pattern.getBuffer()), pattern.length()));
-  wrapper->Set(v8::String::New("options"), options);
-
-  // Make object handle weak so we can delete iterator once GC kicks in.
-  wrapper.MakeWeak(NULL, DeleteDateTimeFormat);
-
-  return wrapper;
-}
-
-// Returns SimpleDateFormat.
-static icu::DateFormat* CreateDateTimeFormat(
-    v8::Handle<v8::String> locale, v8::Handle<v8::Object> settings) {
-  v8::HandleScope handle_scope;
-
-  v8::String::AsciiValue ascii_locale(locale);
-  icu::Locale icu_locale(*ascii_locale);
-
-  // Make formatter from skeleton.
-  icu::SimpleDateFormat* date_format = NULL;
-  UErrorCode status = U_ZERO_ERROR;
-  icu::UnicodeString skeleton;
-  if (I18NUtils::ExtractStringSetting(settings, "skeleton", &skeleton)) {
-    v8::Local<icu::DateTimePatternGenerator> generator(
-        icu::DateTimePatternGenerator::createInstance(icu_locale, status));
-    icu::UnicodeString pattern =
-        generator->getBestPattern(skeleton, status);
-
-    date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
-    if (U_SUCCESS(status)) {
-      return date_format;
-    } else {
-      delete date_format;
-    }
-  }
-
-  // Extract date style and time style from settings.
-  icu::UnicodeString date_style;
-  icu::DateFormat::EStyle icu_date_style = icu::DateFormat::kNone;
-  if (I18NUtils::ExtractStringSetting(settings, "dateStyle", &date_style)) {
-    icu_date_style = GetDateTimeStyle(date_style);
-  }
-
-  icu::UnicodeString time_style;
-  icu::DateFormat::EStyle icu_time_style = icu::DateFormat::kNone;
-  if (I18NUtils::ExtractStringSetting(settings, "timeStyle", &time_style)) {
-    icu_time_style = GetDateTimeStyle(time_style);
-  }
-
-  // Try all combinations of date/time styles.
-  if (icu_date_style == icu::DateFormat::kNone &&
-      icu_time_style == icu::DateFormat::kNone) {
-    // Return default short date, short
-    return icu::DateFormat::createDateTimeInstance(
-        icu::DateFormat::kShort, icu::DateFormat::kShort, icu_locale);
-  } else if (icu_date_style != icu::DateFormat::kNone &&
-             icu_time_style != icu::DateFormat::kNone) {
-    return icu::DateFormat::createDateTimeInstance(
-        icu_date_style, icu_time_style, icu_locale);
-  } else if (icu_date_style != icu::DateFormat::kNone) {
-    return icu::DateFormat::createDateInstance(icu_date_style, icu_locale);
-  } else {
-    // icu_time_style != icu::DateFormat::kNone
-    return icu::DateFormat::createTimeInstance(icu_time_style, icu_locale);
-  }
-}
-
-// Creates a v8::Array of narrow, abbrev or wide symbols.
-static v8::Handle<v8::Value> GetSymbols(const v8::Arguments& args,
-                                        const icu::UnicodeString* narrow,
-                                        int32_t narrow_count,
-                                        const icu::UnicodeString* abbrev,
-                                        int32_t abbrev_count,
-                                        const icu::UnicodeString* wide,
-                                        int32_t wide_count) {
-  v8::HandleScope handle_scope;
-
-  // Make wide width default.
-  const icu::UnicodeString* result = wide;
-  int32_t count = wide_count;
-
-  if (args.Length() == 1 && args[0]->IsString()) {
-    v8::String::AsciiValue ascii_value(args[0]);
-    if (strcmp(*ascii_value, "abbreviated") == 0) {
-      result = abbrev;
-      count = abbrev_count;
-    } else if (strcmp(*ascii_value, "narrow") == 0) {
-      result = narrow;
-      count = narrow_count;
-    }
-  }
-
-  v8::Handle<v8::Array> symbols = v8::Array::New();
-  for (int32_t i = 0; i < count; ++i) {
-    symbols->Set(i, v8::String::New(
-        reinterpret_cast<const uint16_t*>(result[i].getBuffer()),
-        result[i].length()));
-  }
-
-  return handle_scope.Close(symbols);
-}
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
-  // Returns undefined, and schedules an exception to be thrown.
-  return v8::ThrowException(v8::Exception::Error(
-      v8::String::New("DateTimeFormat method called on an object "
-                      "that is not a DateTimeFormat.")));
-}
-
-// Returns icu date/time style.
-static icu::DateFormat::EStyle GetDateTimeStyle(
-    const icu::UnicodeString& type) {
-  if (type == UNICODE_STRING_SIMPLE("medium")) {
-    return icu::DateFormat::kMedium;
-  } else if (type == UNICODE_STRING_SIMPLE("long")) {
-    return icu::DateFormat::kLong;
-  } else if (type == UNICODE_STRING_SIMPLE("full")) {
-    return icu::DateFormat::kFull;
-  }
-
-  return icu::DateFormat::kShort;
-}
-
-} }  // namespace v8::internal
diff --git a/src/extensions/experimental/datetime-format.h b/src/extensions/experimental/datetime-format.h
deleted file mode 100644
index a6a228c..0000000
--- a/src/extensions/experimental/datetime-format.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
-
-#include "include/v8.h"
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class SimpleDateFormat;
-}
-
-namespace v8 {
-namespace internal {
-
-class DateTimeFormat {
- public:
-  static v8::Handle<v8::Value> JSDateTimeFormat(const v8::Arguments& args);
-
-  // Helper methods for various bindings.
-
-  // Unpacks date format object from corresponding JavaScript object.
-  static icu::SimpleDateFormat* UnpackDateTimeFormat(
-      v8::Handle<v8::Object> obj);
-
-  // Release memory we allocated for the DateFormat once the JS object that
-  // holds the pointer gets garbage collected.
-  static void DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
-                                   void* param);
-
-  // Formats date and returns corresponding string.
-  static v8::Handle<v8::Value> Format(const v8::Arguments& args);
-
-  // All date time symbol methods below return stand-alone names in
-  // either narrow, abbreviated or wide width.
-
-  // Get list of months.
-  static v8::Handle<v8::Value> GetMonths(const v8::Arguments& args);
-
-  // Get list of weekdays.
-  static v8::Handle<v8::Value> GetWeekdays(const v8::Arguments& args);
-
-  // Get list of eras.
-  static v8::Handle<v8::Value> GetEras(const v8::Arguments& args);
-
-  // Get list of day periods.
-  static v8::Handle<v8::Value> GetAmPm(const v8::Arguments& args);
-
- private:
-  DateTimeFormat();
-
-  static v8::Persistent<v8::FunctionTemplate> datetime_format_template_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp
deleted file mode 100644
index 24fb683..0000000
--- a/src/extensions/experimental/experimental.gyp
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of Google Inc. nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
-  'variables': {
-    # TODO(cira): Find out how to pass this value for arbitrary embedder.
-    # Chromium sets it in common.gypi and does force include of that file for
-    # all sub projects.
-    'icu_src_dir%': '../../../../third_party/icu',
-  },
-  'targets': [
-    {
-      'target_name': 'i18n_api',
-      'type': 'static_library',
-      'sources': [
-        'break-iterator.cc',
-        'break-iterator.h',
-        'collator.cc',
-        'collator.h',
-        'datetime-format.cc',
-        'datetime-format.h',
-        'i18n-extension.cc',
-        'i18n-extension.h',
-        'i18n-locale.cc',
-        'i18n-locale.h',
-        'i18n-natives.h',
-        'i18n-utils.cc',
-        'i18n-utils.h',
-        'language-matcher.cc',
-        'language-matcher.h',
-        'number-format.cc',
-        'number-format.h',
-        '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
-      ],
-      'include_dirs': [
-        '<(icu_src_dir)/public/common',
-        # v8/ is root for all includes.
-        '../../..'
-      ],
-      'dependencies': [
-        '<(icu_src_dir)/icu.gyp:*',
-        'js2c_i18n#host',
-        '../../../tools/gyp/v8.gyp:v8',
-      ],
-      'direct_dependent_settings': {
-        # Adds -Iv8 for embedders.
-        'include_dirs': [
-          '../../..'
-        ],
-      },
-    },
-    {
-      'target_name': 'js2c_i18n',
-      'type': 'none',
-      'toolsets': ['host'],
-      'variables': {
-        'js_files': [
-          'i18n.js'
-        ],
-      },
-      'actions': [
-        {
-          'action_name': 'js2c_i18n',
-          'inputs': [
-            'i18n-js2c.py',
-            '<@(js_files)',
-          ],
-          'outputs': [
-            '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
-          ],
-          'action': [
-            'python',
-            'i18n-js2c.py',
-            '<@(_outputs)',
-            '<@(js_files)'
-          ],
-        },
-      ],
-    },
-  ],  # targets
-}
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
deleted file mode 100644
index c5afcf0..0000000
--- a/src/extensions/experimental/i18n-extension.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/i18n-extension.h"
-
-#include "src/extensions/experimental/break-iterator.h"
-#include "src/extensions/experimental/collator.h"
-#include "src/extensions/experimental/datetime-format.h"
-#include "src/extensions/experimental/i18n-locale.h"
-#include "src/extensions/experimental/i18n-natives.h"
-#include "src/extensions/experimental/number-format.h"
-
-namespace v8 {
-namespace internal {
-
-I18NExtension* I18NExtension::extension_ = NULL;
-
-I18NExtension::I18NExtension()
-    : v8::Extension("v8/i18n", I18Natives::GetScriptSource()) {
-}
-
-v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
-    v8::Handle<v8::String> name) {
-  if (name->Equals(v8::String::New("NativeJSLocale"))) {
-    return v8::FunctionTemplate::New(I18NLocale::JSLocale);
-  } else if (name->Equals(v8::String::New("NativeJSBreakIterator"))) {
-    return v8::FunctionTemplate::New(BreakIterator::JSBreakIterator);
-  } else if (name->Equals(v8::String::New("NativeJSCollator"))) {
-    return v8::FunctionTemplate::New(Collator::JSCollator);
-  } else if (name->Equals(v8::String::New("NativeJSDateTimeFormat"))) {
-    return v8::FunctionTemplate::New(DateTimeFormat::JSDateTimeFormat);
-  } else if (name->Equals(v8::String::New("NativeJSNumberFormat"))) {
-    return v8::FunctionTemplate::New(NumberFormat::JSNumberFormat);
-  }
-
-  return v8::Handle<v8::FunctionTemplate>();
-}
-
-I18NExtension* I18NExtension::get() {
-  if (!extension_) {
-    extension_ = new I18NExtension();
-  }
-  return extension_;
-}
-
-void I18NExtension::Register() {
-  static v8::DeclareExtension i18n_extension_declaration(I18NExtension::get());
-}
-
-} }  // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-js2c.py b/src/extensions/experimental/i18n-js2c.py
deleted file mode 100644
index 9c3128b..0000000
--- a/src/extensions/experimental/i18n-js2c.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of Google Inc. nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This is a utility for converting I18N JavaScript source code into C-style
-# char arrays. It is used for embedded JavaScript code in the V8
-# library.
-# This is a pared down copy of v8/tools/js2c.py that avoids use of
-# v8/src/natives.h and produces different cc template.
-
-import os, re, sys, string
-
-
-def ToCArray(lines):
-  result = []
-  for chr in lines:
-    value = ord(chr)
-    assert value < 128
-    result.append(str(value))
-  result.append("0")
-  return ", ".join(result)
-
-
-def RemoveCommentsAndTrailingWhitespace(lines):
-  lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
-  lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
-  lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
-  return lines
-
-
-def ReadFile(filename):
-  file = open(filename, "rt")
-  try:
-    lines = file.read()
-  finally:
-    file.close()
-  return lines
-
-
-EVAL_PATTERN = re.compile(r'\beval\s*\(');
-WITH_PATTERN = re.compile(r'\bwith\s*\(');
-
-
-def Validate(lines, file):
-  lines = RemoveCommentsAndTrailingWhitespace(lines)
-  # Because of simplified context setup, eval and with is not
-  # allowed in the natives files.
-  eval_match = EVAL_PATTERN.search(lines)
-  if eval_match:
-    raise ("Eval disallowed in natives: %s" % file)
-  with_match = WITH_PATTERN.search(lines)
-  if with_match:
-    raise ("With statements disallowed in natives: %s" % file)
-
-
-HEADER_TEMPLATE = """\
-// Copyright 2011 Google Inc. All Rights Reserved.
-
-// This file was generated from .js source files by gyp.  If you
-// want to make changes to this file you should either change the
-// javascript source files or the i18n-js2c.py script.
-
-#include "src/extensions/experimental/i18n-natives.h"
-
-namespace v8 {
-namespace internal {
-
-// static
-const char* I18Natives::GetScriptSource() {
-  // JavaScript source gets injected here.
-  static const char i18n_source[] = {%s};
-
-  return i18n_source;
-}
-
-}  // internal
-}  // v8
-"""
-
-
-def JS2C(source, target):
-  filename = str(source)
-
-  lines = ReadFile(filename)
-  Validate(lines, filename)
-  data = ToCArray(lines)
-
-  # Emit result
-  output = open(target, "w")
-  output.write(HEADER_TEMPLATE % data)
-  output.close()
-
-
-def main():
-  target = sys.argv[1]
-  source = sys.argv[2]
-  JS2C(source, target)
-
-
-if __name__ == "__main__":
-  main()
diff --git a/src/extensions/experimental/i18n-locale.cc b/src/extensions/experimental/i18n-locale.cc
deleted file mode 100644
index 46a5f87..0000000
--- a/src/extensions/experimental/i18n-locale.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/i18n-locale.h"
-
-#include "src/extensions/experimental/i18n-utils.h"
-#include "src/extensions/experimental/language-matcher.h"
-#include "unicode/locid.h"
-#include "unicode/uloc.h"
-
-namespace v8 {
-namespace internal {
-
-const char* const I18NLocale::kLocaleID = "localeID";
-const char* const I18NLocale::kRegionID = "regionID";
-const char* const I18NLocale::kICULocaleID = "icuLocaleID";
-
-v8::Handle<v8::Value> I18NLocale::JSLocale(const v8::Arguments& args) {
-  v8::HandleScope handle_scope;
-
-  if (args.Length() != 1 || !args[0]->IsObject()) {
-    return v8::Undefined();
-  }
-
-  v8::Local<v8::Object> settings = args[0]->ToObject();
-
-  // Get best match for locale.
-  v8::TryCatch try_catch;
-  v8::Handle<v8::Value> locale_id = settings->Get(v8::String::New(kLocaleID));
-  if (try_catch.HasCaught()) {
-    return v8::Undefined();
-  }
-
-  LocaleIDMatch result;
-  if (locale_id->IsArray()) {
-    LanguageMatcher::GetBestMatchForPriorityList(
-        v8::Handle<v8::Array>::Cast(locale_id), &result);
-  } else if (locale_id->IsString()) {
-    LanguageMatcher::GetBestMatchForString(locale_id->ToString(), &result);
-  } else {
-    LanguageMatcher::GetBestMatchForString(v8::String::New(""), &result);
-  }
-
-  // Get best match for region.
-  char region_id[ULOC_COUNTRY_CAPACITY];
-  I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
-
-  v8::Handle<v8::Value> region = settings->Get(v8::String::New(kRegionID));
-  if (try_catch.HasCaught()) {
-    return v8::Undefined();
-  }
-
-  if (!GetBestMatchForRegionID(result.icu_id, region, region_id)) {
-    // Set region id to empty string because region couldn't be inferred.
-    I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
-  }
-
-  // Build JavaScript object that contains bcp and icu locale ID and region ID.
-  v8::Handle<v8::Object> locale = v8::Object::New();
-  locale->Set(v8::String::New(kLocaleID), v8::String::New(result.bcp47_id));
-  locale->Set(v8::String::New(kICULocaleID), v8::String::New(result.icu_id));
-  locale->Set(v8::String::New(kRegionID), v8::String::New(region_id));
-
-  return handle_scope.Close(locale);
-}
-
-bool I18NLocale::GetBestMatchForRegionID(
-    const char* locale_id, v8::Handle<v8::Value> region_id, char* result) {
-  if (region_id->IsString() && region_id->ToString()->Length() != 0) {
-    icu::Locale user_locale(
-        icu::Locale("und", *v8::String::Utf8Value(region_id->ToString())));
-    I18NUtils::StrNCopy(
-        result, ULOC_COUNTRY_CAPACITY, user_locale.getCountry());
-    return true;
-  }
-  // Maximize locale_id to infer the region (e.g. expand "de" to "de-Latn-DE"
-  // and grab "DE" from the result).
-  UErrorCode status = U_ZERO_ERROR;
-  char maximized_locale[ULOC_FULLNAME_CAPACITY];
-  uloc_addLikelySubtags(
-      locale_id, maximized_locale, ULOC_FULLNAME_CAPACITY, &status);
-  uloc_getCountry(maximized_locale, result, ULOC_COUNTRY_CAPACITY, &status);
-
-  return !U_FAILURE(status);
-}
-
-} }  // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-locale.h b/src/extensions/experimental/i18n-locale.h
deleted file mode 100644
index 607818c..0000000
--- a/src/extensions/experimental/i18n-locale.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
-
-#include "include/v8.h"
-
-namespace v8 {
-namespace internal {
-
-class I18NLocale {
- public:
-  I18NLocale() {}
-
-  // Implementations of window.Locale methods.
-  static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
-
-  // Infers region id given the locale id, or uses user specified region id.
-  // Result is canonicalized.
-  // Returns status of ICU operation (maximizing locale or get region call).
-  static bool GetBestMatchForRegionID(
-      const char* locale_id, v8::Handle<v8::Value> regions, char* result);
-
- private:
-  // Key name for localeID parameter.
-  static const char* const kLocaleID;
-  // Key name for regionID parameter.
-  static const char* const kRegionID;
-  // Key name for the icuLocaleID result.
-  static const char* const kICULocaleID;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
diff --git a/src/extensions/experimental/i18n-natives.h b/src/extensions/experimental/i18n-natives.h
deleted file mode 100644
index 37362d0..0000000
--- a/src/extensions/experimental/i18n-natives.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
-
-namespace v8 {
-namespace internal {
-
-class I18Natives {
- public:
-  // Gets script source from generated file.
-  // Source is statically allocated string.
-  static const char* GetScriptSource();
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
diff --git a/src/extensions/experimental/i18n-utils.cc b/src/extensions/experimental/i18n-utils.cc
deleted file mode 100644
index dc2be1a..0000000
--- a/src/extensions/experimental/i18n-utils.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/i18n-utils.h"
-
-#include <string.h>
-
-#include "unicode/unistr.h"
-
-namespace v8 {
-namespace internal {
-
-// static
-void I18NUtils::StrNCopy(char* dest, int length, const char* src) {
-  if (!dest || !src) return;
-
-  strncpy(dest, src, length);
-  dest[length - 1] = '\0';
-}
-
-// static
-bool I18NUtils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
-                                     const char* setting,
-                                     icu::UnicodeString* result) {
-  if (!setting || !result) return false;
-
-  v8::HandleScope handle_scope;
-  v8::TryCatch try_catch;
-  v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
-  if (try_catch.HasCaught()) {
-    return false;
-  }
-  // No need to check if |value| is empty because it's taken care of
-  // by TryCatch above.
-  if (!value->IsUndefined() && !value->IsNull() && value->IsString()) {
-    v8::String::Utf8Value utf8_value(value);
-    if (*utf8_value == NULL) return false;
-    result->setTo(icu::UnicodeString::fromUTF8(*utf8_value));
-    return true;
-  }
-  return false;
-}
-
-// static
-void I18NUtils::AsciiToUChar(const char* source,
-                             int32_t source_length,
-                             UChar* target,
-                             int32_t target_length) {
-  int32_t length =
-      source_length < target_length ? source_length : target_length;
-
-  if (length <= 0) {
-    return;
-  }
-
-  for (int32_t i = 0; i < length - 1; ++i) {
-    target[i] = static_cast<UChar>(source[i]);
-  }
-
-  target[length - 1] = 0x0u;
-}
-
-} }  // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-utils.h b/src/extensions/experimental/i18n-utils.h
deleted file mode 100644
index 7c31528..0000000
--- a/src/extensions/experimental/i18n-utils.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
-
-#include "include/v8.h"
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class UnicodeString;
-}
-
-namespace v8 {
-namespace internal {
-
-class I18NUtils {
- public:
-  // Safe string copy. Null terminates the destination. Copies at most
-  // (length - 1) bytes.
-  // We can't use snprintf since it's not supported on all relevant platforms.
-  // We can't use OS::SNPrintF, it's only for internal code.
-  static void StrNCopy(char* dest, int length, const char* src);
-
-  // Extract a string setting named in |settings| and set it to |result|.
-  // Return true if it's specified. Otherwise, return false.
-  static bool ExtractStringSetting(const v8::Handle<v8::Object>& settings,
-                                   const char* setting,
-                                   icu::UnicodeString* result);
-
-  // Converts ASCII array into UChar array.
-  // Target is always \0 terminated.
-  static void AsciiToUChar(const char* source,
-                           int32_t source_length,
-                           UChar* target,
-                           int32_t target_length);
-
- private:
-  I18NUtils() {}
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
diff --git a/src/extensions/experimental/i18n.js b/src/extensions/experimental/i18n.js
deleted file mode 100644
index 56bcf9e..0000000
--- a/src/extensions/experimental/i18n.js
+++ /dev/null
@@ -1,380 +0,0 @@
-// Copyright 2006-2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// TODO(cira): Rename v8Locale into LocaleInfo once we have stable API.
-/**
- * LocaleInfo class is an aggregate class of all i18n API calls.
- * @param {Object} settings - localeID and regionID to create LocaleInfo from.
- *   {Array.<string>|string} settings.localeID -
- *     Unicode identifier of the locale.
- *     See http://unicode.org/reports/tr35/#BCP_47_Conformance
- *   {string} settings.regionID - ISO3166 region ID with addition of
- *     invalid, undefined and reserved region codes.
- * @constructor
- */
-v8Locale = function(settings) {
-  native function NativeJSLocale();
-
-  // Assume user wanted to do v8Locale("sr");
-  if (typeof(settings) === "string") {
-    settings = {'localeID': settings};
-  }
-
-  var properties = NativeJSLocale(
-      v8Locale.__createSettingsOrDefault(settings, {'localeID': 'root'}));
-
-  // Keep the resolved ICU locale ID around to avoid resolving localeID to
-  // ICU locale ID every time BreakIterator, Collator and so forth are called.
-  this.__icuLocaleID = properties.icuLocaleID;
-  this.options = {'localeID': properties.localeID,
-                  'regionID': properties.regionID};
-};
-
-/**
- * Clones existing locale with possible overrides for some of the options.
- * @param {!Object} settings - overrides for current locale settings.
- * @returns {Object} - new LocaleInfo object.
- */
-v8Locale.prototype.derive = function(settings) {
-  return new v8Locale(
-      v8Locale.__createSettingsOrDefault(settings, this.options));
-};
-
-/**
- * v8BreakIterator class implements locale aware segmenatation.
- * It is not part of EcmaScript proposal.
- * @param {Object} locale - locale object to pass to break
- *   iterator implementation.
- * @param {string} type - type of segmenatation:
- *   - character
- *   - word
- *   - sentence
- *   - line
- * @private
- * @constructor
- */
-v8Locale.v8BreakIterator = function(locale, type) {
-  native function NativeJSBreakIterator();
-
-  locale = v8Locale.__createLocaleOrDefault(locale);
-  // BCP47 ID would work in this case, but we use ICU locale for consistency.
-  var iterator = NativeJSBreakIterator(locale.__icuLocaleID, type);
-  iterator.type = type;
-  return iterator;
-};
-
-/**
- * Type of the break we encountered during previous iteration.
- * @type{Enum}
- */
-v8Locale.v8BreakIterator.BreakType = {
-  'unknown': -1,
-  'none': 0,
-  'number': 100,
-  'word': 200,
-  'kana': 300,
-  'ideo': 400
-};
-
-/**
- * Creates new v8BreakIterator based on current locale.
- * @param {string} - type of segmentation. See constructor.
- * @returns {Object} - new v8BreakIterator object.
- */
-v8Locale.prototype.v8CreateBreakIterator = function(type) {
-  return new v8Locale.v8BreakIterator(this, type);
-};
-
-// TODO(jungshik): Set |collator.options| to actually recognized / resolved
-// values.
-/**
- * Collator class implements locale-aware sort.
- * @param {Object} locale - locale object to pass to collator implementation.
- * @param {Object} settings - collation flags:
- *   - ignoreCase
- *   - ignoreAccents
- *   - numeric
- * @private
- * @constructor
- */
-v8Locale.Collator = function(locale, settings) {
-  native function NativeJSCollator();
-
-  locale = v8Locale.__createLocaleOrDefault(locale);
-  var collator = NativeJSCollator(
-      locale.__icuLocaleID, v8Locale.__createSettingsOrDefault(settings, {}));
-  return collator;
-};
-
-/**
- * Creates new Collator based on current locale.
- * @param {Object} - collation flags. See constructor.
- * @returns {Object} - new Collator object.
- */
-v8Locale.prototype.createCollator = function(settings) {
-  return new v8Locale.Collator(this, settings);
-};
-
-/**
- * DateTimeFormat class implements locale-aware date and time formatting.
- * Constructor is not part of public API.
- * @param {Object} locale - locale object to pass to formatter.
- * @param {Object} settings - formatting flags:
- *   - skeleton
- *   - dateStyle
- *   - timeStyle
- * @private
- * @constructor
- */
-v8Locale.__DateTimeFormat = function(locale, settings) {
-  native function NativeJSDateTimeFormat();
-
-  settings = v8Locale.__createSettingsOrDefault(settings, {});
-
-  var cleanSettings = {};
-  if (settings.hasOwnProperty('skeleton')) {
-    cleanSettings['skeleton'] = settings['skeleton'];
-  } else {
-    cleanSettings = {};
-    if (settings.hasOwnProperty('dateStyle')) {
-      var ds = settings['dateStyle'];
-      if (!/^(short|medium|long|full)$/.test(ds)) ds = 'short';
-      cleanSettings['dateStyle'] = ds;
-    } else if (settings.hasOwnProperty('dateType')) {
-      // Obsolete. New spec requires dateStyle, but we'll keep this around
-      // for current users.
-      // TODO(cira): Remove when all internal users switch to dateStyle.
-      var dt = settings['dateType'];
-      if (!/^(short|medium|long|full)$/.test(dt)) dt = 'short';
-      cleanSettings['dateStyle'] = dt;
-    }
-
-    if (settings.hasOwnProperty('timeStyle')) {
-      var ts = settings['timeStyle'];
-      if (!/^(short|medium|long|full)$/.test(ts)) ts = 'short';
-      cleanSettings['timeStyle'] = ts;
-    } else if (settings.hasOwnProperty('timeType')) {
-      // TODO(cira): Remove when all internal users switch to timeStyle.
-      var tt = settings['timeType'];
-      if (!/^(short|medium|long|full)$/.test(tt)) tt = 'short';
-      cleanSettings['timeStyle'] = tt;
-    }
-  }
-
-  // Default is to show short date and time.
-  if (!cleanSettings.hasOwnProperty('skeleton') &&
-      !cleanSettings.hasOwnProperty('dateStyle') &&
-      !cleanSettings.hasOwnProperty('timeStyle')) {
-    cleanSettings = {'dateStyle': 'short',
-                     'timeStyle': 'short'};
-  }
-
-  locale = v8Locale.__createLocaleOrDefault(locale);
-  var formatter = NativeJSDateTimeFormat(locale.__icuLocaleID, cleanSettings);
-
-  // NativeJSDateTimeFormat creates formatter.options for us, we just need
-  // to append actual settings to it.
-  for (key in cleanSettings) {
-    formatter.options[key] = cleanSettings[key];
-  }
-
-  /**
-   * Clones existing date time format with possible overrides for some
-   * of the options.
-   * @param {!Object} overrideSettings - overrides for current format settings.
-   * @returns {Object} - new DateTimeFormat object.
-   * @public
-   */
-  formatter.derive = function(overrideSettings) {
-    // To remove a setting user can specify undefined as its value. We'll remove
-    // it from the map in that case.
-    for (var prop in overrideSettings) {
-      if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
-        delete settings[prop];
-      }
-    }
-    return new v8Locale.__DateTimeFormat(
-        locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
-  };
-
-  return formatter;
-};
-
-/**
- * Creates new DateTimeFormat based on current locale.
- * @param {Object} - formatting flags. See constructor.
- * @returns {Object} - new DateTimeFormat object.
- */
-v8Locale.prototype.createDateTimeFormat = function(settings) {
-  return new v8Locale.__DateTimeFormat(this, settings);
-};
-
-/**
- * NumberFormat class implements locale-aware number formatting.
- * Constructor is not part of public API.
- * @param {Object} locale - locale object to pass to formatter.
- * @param {Object} settings - formatting flags:
- *   - skeleton
- *   - pattern
- *   - style - decimal, currency, percent or scientific
- *   - currencyCode - ISO 4217 3-letter currency code
- * @private
- * @constructor
- */
-v8Locale.__NumberFormat = function(locale, settings) {
-  native function NativeJSNumberFormat();
-
-  settings = v8Locale.__createSettingsOrDefault(settings, {});
-
-  var cleanSettings = {};
-  if (settings.hasOwnProperty('skeleton')) {
-    // Assign skeleton to cleanSettings and fix invalid currency pattern
-    // if present - 'ooxo' becomes 'o'.
-    cleanSettings['skeleton'] =
-        settings['skeleton'].replace(/\u00a4+[^\u00a4]+\u00a4+/g, '\u00a4');
-  } else if (settings.hasOwnProperty('pattern')) {
-    cleanSettings['pattern'] = settings['pattern'];
-  } else if (settings.hasOwnProperty('style')) {
-    var style = settings['style'];
-    if (!/^(decimal|currency|percent|scientific)$/.test(style)) {
-      style = 'decimal';
-    }
-    cleanSettings['style'] = style;
-  }
-
-  // Default is to show decimal style.
-  if (!cleanSettings.hasOwnProperty('skeleton') &&
-      !cleanSettings.hasOwnProperty('pattern') &&
-      !cleanSettings.hasOwnProperty('style')) {
-    cleanSettings = {'style': 'decimal'};
-  }
-
-  // Add currency code if available and valid (3-letter ASCII code).
-  if (settings.hasOwnProperty('currencyCode') &&
-      /^[a-zA-Z]{3}$/.test(settings['currencyCode'])) {
-    cleanSettings['currencyCode'] = settings['currencyCode'].toUpperCase();
-  }
-
-  locale = v8Locale.__createLocaleOrDefault(locale);
-  // Pass in region ID for proper currency detection. Use ZZ if region is empty.
-  var region = locale.options.regionID !== '' ? locale.options.regionID : 'ZZ';
-  var formatter = NativeJSNumberFormat(
-      locale.__icuLocaleID, 'und_' + region, cleanSettings);
-
-  // ICU doesn't always uppercase the currency code.
-  if (formatter.options.hasOwnProperty('currencyCode')) {
-    formatter.options['currencyCode'] =
-        formatter.options['currencyCode'].toUpperCase();
-  }
-
-  for (key in cleanSettings) {
-    // Don't overwrite keys that are alredy in.
-    if (formatter.options.hasOwnProperty(key)) continue;
-
-    formatter.options[key] = cleanSettings[key];
-  }
-
-  /**
-   * Clones existing number format with possible overrides for some
-   * of the options.
-   * @param {!Object} overrideSettings - overrides for current format settings.
-   * @returns {Object} - new or cached NumberFormat object.
-   * @public
-   */
-  formatter.derive = function(overrideSettings) {
-    // To remove a setting user can specify undefined as its value. We'll remove
-    // it from the map in that case.
-    for (var prop in overrideSettings) {
-      if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
-        delete settings[prop];
-      }
-    }
-    return new v8Locale.__NumberFormat(
-        locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
-  };
-
-  return formatter;
-};
-
-/**
- * Creates new NumberFormat based on current locale.
- * @param {Object} - formatting flags. See constructor.
- * @returns {Object} - new or cached NumberFormat object.
- */
-v8Locale.prototype.createNumberFormat = function(settings) {
-  return new v8Locale.__NumberFormat(this, settings);
-};
-
-/**
- * Merges user settings and defaults.
- * Settings that are not of object type are rejected.
- * Actual property values are not validated, but whitespace is trimmed if they
- * are strings.
- * @param {!Object} settings - user provided settings.
- * @param {!Object} defaults - default values for this type of settings.
- * @returns {Object} - valid settings object.
- * @private
- */
-v8Locale.__createSettingsOrDefault = function(settings, defaults) {
-  if (!settings || typeof(settings) !== 'object' ) {
-    return defaults;
-  }
-  for (var key in defaults) {
-    if (!settings.hasOwnProperty(key)) {
-      settings[key] = defaults[key];
-    }
-  }
-  // Clean up settings.
-  for (var key in settings) {
-    // Trim whitespace.
-    if (typeof(settings[key]) === "string") {
-      settings[key] = settings[key].trim();
-    }
-    // Remove all properties that are set to undefined/null. This allows
-    // derive method to remove a setting we don't need anymore.
-    if (!settings[key]) {
-      delete settings[key];
-    }
-  }
-
-  return settings;
-};
-
-/**
- * If locale is valid (defined and of v8Locale type) we return it. If not
- * we create default locale and return it.
- * @param {!Object} locale - user provided locale.
- * @returns {Object} - v8Locale object.
- * @private
- */
-v8Locale.__createLocaleOrDefault = function(locale) {
-  if (!locale || !(locale instanceof v8Locale)) {
-    return new v8Locale();
-  } else {
-    return locale;
-  }
-};
diff --git a/src/extensions/experimental/language-matcher.cc b/src/extensions/experimental/language-matcher.cc
deleted file mode 100644
index 127e571..0000000
--- a/src/extensions/experimental/language-matcher.cc
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// TODO(cira): Remove LanguageMatcher from v8 when ICU implements
-// language matching API.
-
-#include "src/extensions/experimental/language-matcher.h"
-
-#include <string.h>
-
-#include "src/extensions/experimental/i18n-utils.h"
-#include "unicode/datefmt.h"  // For getAvailableLocales
-#include "unicode/locid.h"
-#include "unicode/uloc.h"
-
-namespace v8 {
-namespace internal {
-
-const unsigned int LanguageMatcher::kLanguageWeight = 75;
-const unsigned int LanguageMatcher::kScriptWeight = 20;
-const unsigned int LanguageMatcher::kRegionWeight = 5;
-const unsigned int LanguageMatcher::kThreshold = 50;
-const unsigned int LanguageMatcher::kPositionBonus = 1;
-const char* const LanguageMatcher::kDefaultLocale = "root";
-
-static const char* GetLanguageException(const char*);
-static bool BCP47ToICUFormat(const char*, char*);
-static int CompareLocaleSubtags(const char*, const char*);
-static bool BuildLocaleName(const char*, const char*, LocaleIDMatch*);
-
-LocaleIDMatch::LocaleIDMatch()
-    : score(-1) {
-  I18NUtils::StrNCopy(
-      bcp47_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
-
-  I18NUtils::StrNCopy(
-      icu_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
-}
-
-LocaleIDMatch& LocaleIDMatch::operator=(const LocaleIDMatch& rhs) {
-  I18NUtils::StrNCopy(this->bcp47_id, ULOC_FULLNAME_CAPACITY, rhs.bcp47_id);
-  I18NUtils::StrNCopy(this->icu_id, ULOC_FULLNAME_CAPACITY, rhs.icu_id);
-  this->score = rhs.score;
-
-  return *this;
-}
-
-// static
-void LanguageMatcher::GetBestMatchForPriorityList(
-    v8::Handle<v8::Array> locales, LocaleIDMatch* result) {
-  v8::HandleScope handle_scope;
-
-  unsigned int position_bonus = locales->Length() * kPositionBonus;
-
-  int max_score = 0;
-  LocaleIDMatch match;
-  for (unsigned int i = 0; i < locales->Length(); ++i) {
-    position_bonus -= kPositionBonus;
-
-    v8::TryCatch try_catch;
-    v8::Local<v8::Value> locale_id = locales->Get(v8::Integer::New(i));
-
-    // Return default if exception is raised when reading parameter.
-    if (try_catch.HasCaught()) break;
-
-    // JavaScript arrays can be heterogenous so check each item
-    // if it's a string.
-    if (!locale_id->IsString()) continue;
-
-    if (!CompareToSupportedLocaleIDList(locale_id->ToString(), &match)) {
-      continue;
-    }
-
-    // Skip items under threshold.
-    if (match.score < kThreshold) continue;
-
-    match.score += position_bonus;
-    if (match.score > max_score) {
-      *result = match;
-
-      max_score = match.score;
-    }
-  }
-}
-
-// static
-void LanguageMatcher::GetBestMatchForString(
-    v8::Handle<v8::String> locale, LocaleIDMatch* result) {
-  LocaleIDMatch match;
-
-  if (CompareToSupportedLocaleIDList(locale, &match) &&
-      match.score >= kThreshold) {
-    *result = match;
-  }
-}
-
-// static
-bool LanguageMatcher::CompareToSupportedLocaleIDList(
-    v8::Handle<v8::String> locale_id, LocaleIDMatch* result) {
-  static int32_t available_count = 0;
-  // Depending on how ICU data is built, locales returned by
-  // Locale::getAvailableLocale() are not guaranteed to support DateFormat,
-  // Collation and other services.  We can call getAvailableLocale() of all the
-  // services we want to support and take the intersection of them all, but
-  // using DateFormat::getAvailableLocales() should suffice.
-  // TODO(cira): Maybe make this thread-safe?
-  static const icu::Locale* available_locales =
-      icu::DateFormat::getAvailableLocales(available_count);
-
-  // Skip this locale_id if it's not in ASCII.
-  static LocaleIDMatch default_match;
-  v8::String::AsciiValue ascii_value(locale_id);
-  if (*ascii_value == NULL) return false;
-
-  char locale[ULOC_FULLNAME_CAPACITY];
-  if (!BCP47ToICUFormat(*ascii_value, locale)) return false;
-
-  icu::Locale input_locale(locale);
-
-  // Position of the best match locale in list of available locales.
-  int position = -1;
-  const char* language = GetLanguageException(input_locale.getLanguage());
-  const char* script = input_locale.getScript();
-  const char* region = input_locale.getCountry();
-  for (int32_t i = 0; i < available_count; ++i) {
-    int current_score = 0;
-    int sign =
-        CompareLocaleSubtags(language, available_locales[i].getLanguage());
-    current_score += sign * kLanguageWeight;
-
-    sign = CompareLocaleSubtags(script, available_locales[i].getScript());
-    current_score += sign * kScriptWeight;
-
-    sign = CompareLocaleSubtags(region, available_locales[i].getCountry());
-    current_score += sign * kRegionWeight;
-
-    if (current_score >= kThreshold && current_score > result->score) {
-      result->score = current_score;
-      position = i;
-    }
-  }
-
-  // Didn't find any good matches so use defaults.
-  if (position == -1) return false;
-
-  return BuildLocaleName(available_locales[position].getBaseName(),
-                         input_locale.getName(), result);
-}
-
-// For some unsupported language subtags it is better to fallback to related
-// language that is supported than to default.
-static const char* GetLanguageException(const char* language) {
-  // Serbo-croatian to Serbian.
-  if (!strcmp(language, "sh")) return "sr";
-
-  // Norweigan to Norweiaan to Norwegian Bokmal.
-  if (!strcmp(language, "no")) return "nb";
-
-  // Moldavian to Romanian.
-  if (!strcmp(language, "mo")) return "ro";
-
-  // Tagalog to Filipino.
-  if (!strcmp(language, "tl")) return "fil";
-
-  return language;
-}
-
-// Converts user input from BCP47 locale id format to ICU compatible format.
-// Returns false if uloc_forLanguageTag call fails or if extension is too long.
-static bool BCP47ToICUFormat(const char* locale_id, char* result) {
-  UErrorCode status = U_ZERO_ERROR;
-  int32_t locale_size = 0;
-
-  char locale[ULOC_FULLNAME_CAPACITY];
-  I18NUtils::StrNCopy(locale, ULOC_FULLNAME_CAPACITY, locale_id);
-
-  // uloc_forLanguageTag has a bug where long extension can crash the code.
-  // We need to check if extension part of language id conforms to the length.
-  // ICU bug: http://bugs.icu-project.org/trac/ticket/8519
-  const char* extension = strstr(locale_id, "-u-");
-  if (extension != NULL &&
-      strlen(extension) > ULOC_KEYWORD_AND_VALUES_CAPACITY) {
-    // Truncate to get non-crashing string, but still preserve base language.
-    int base_length = strlen(locale_id) - strlen(extension);
-    locale[base_length] = '\0';
-  }
-
-  uloc_forLanguageTag(locale, result, ULOC_FULLNAME_CAPACITY,
-                      &locale_size, &status);
-  return !U_FAILURE(status);
-}
-
-// Compares locale id subtags.
-// Returns 1 for match or -1 for mismatch.
-static int CompareLocaleSubtags(const char* lsubtag, const char* rsubtag) {
-  return strcmp(lsubtag, rsubtag) == 0 ? 1 : -1;
-}
-
-// Builds a BCP47 compliant locale id from base name of matched locale and
-// full user specified locale.
-// Returns false if uloc_toLanguageTag failed to convert locale id.
-// Example:
-//   base_name of matched locale (ICU ID): de_DE
-//   input_locale_name (ICU ID): de_AT@collation=phonebk
-//   result (ICU ID): de_DE@collation=phonebk
-//   result (BCP47 ID): de-DE-u-co-phonebk
-static bool BuildLocaleName(const char* base_name,
-                            const char* input_locale_name,
-                            LocaleIDMatch* result) {
-  I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
-
-  // Get extensions (if any) from the original locale.
-  const char* extension = strchr(input_locale_name, ULOC_KEYWORD_SEPARATOR);
-  if (extension != NULL) {
-    I18NUtils::StrNCopy(result->icu_id + strlen(base_name),
-                        ULOC_KEYWORD_AND_VALUES_CAPACITY, extension);
-  } else {
-    I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
-  }
-
-  // Convert ICU locale name into BCP47 format.
-  UErrorCode status = U_ZERO_ERROR;
-  uloc_toLanguageTag(result->icu_id, result->bcp47_id,
-                     ULOC_FULLNAME_CAPACITY, false, &status);
-  return !U_FAILURE(status);
-}
-
-} }  // namespace v8::internal
diff --git a/src/extensions/experimental/language-matcher.h b/src/extensions/experimental/language-matcher.h
deleted file mode 100644
index dd29304..0000000
--- a/src/extensions/experimental/language-matcher.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
-
-#include "include/v8.h"
-
-#include "unicode/uloc.h"
-
-namespace v8 {
-namespace internal {
-
-struct LocaleIDMatch {
-  LocaleIDMatch();
-
-  LocaleIDMatch& operator=(const LocaleIDMatch& rhs);
-
-  // Bcp47 locale id - "de-Latn-DE-u-co-phonebk".
-  char bcp47_id[ULOC_FULLNAME_CAPACITY];
-
-  // ICU locale id - "de_Latn_DE@collation=phonebk".
-  char icu_id[ULOC_FULLNAME_CAPACITY];
-
-  // Score for this locale.
-  int score;
-};
-
-class LanguageMatcher {
- public:
-  // Default locale.
-  static const char* const kDefaultLocale;
-
-  // Finds best supported locale for a given a list of locale identifiers.
-  // It preserves the extension for the locale id.
-  static void GetBestMatchForPriorityList(
-      v8::Handle<v8::Array> locale_list, LocaleIDMatch* result);
-
-  // Finds best supported locale for a single locale identifier.
-  // It preserves the extension for the locale id.
-  static void GetBestMatchForString(
-      v8::Handle<v8::String> locale_id, LocaleIDMatch* result);
-
- private:
-  // If langauge subtags match add this amount to the score.
-  static const unsigned int kLanguageWeight;
-
-  // If script subtags match add this amount to the score.
-  static const unsigned int kScriptWeight;
-
-  // If region subtags match add this amount to the score.
-  static const unsigned int kRegionWeight;
-
-  // LocaleID match score has to be over this number to accept the match.
-  static const unsigned int kThreshold;
-
-  // For breaking ties in priority queue.
-  static const unsigned int kPositionBonus;
-
-  LanguageMatcher();
-
-  // Compares locale_id to the supported list of locales and returns best
-  // match.
-  // Returns false if it fails to convert locale id from ICU to BCP47 format.
-  static bool CompareToSupportedLocaleIDList(v8::Handle<v8::String> locale_id,
-                                             LocaleIDMatch* result);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
diff --git a/src/extensions/experimental/number-format.cc b/src/extensions/experimental/number-format.cc
deleted file mode 100644
index 2932c52..0000000
--- a/src/extensions/experimental/number-format.cc
+++ /dev/null
@@ -1,374 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/number-format.h"
-
-#include <string.h>
-
-#include "src/extensions/experimental/i18n-utils.h"
-#include "unicode/dcfmtsym.h"
-#include "unicode/decimfmt.h"
-#include "unicode/locid.h"
-#include "unicode/numfmt.h"
-#include "unicode/uchar.h"
-#include "unicode/ucurr.h"
-#include "unicode/unum.h"
-#include "unicode/uversion.h"
-
-namespace v8 {
-namespace internal {
-
-const int NumberFormat::kCurrencyCodeLength = 4;
-
-v8::Persistent<v8::FunctionTemplate> NumberFormat::number_format_template_;
-
-static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String>,
-                                              v8::Handle<v8::String>,
-                                              v8::Handle<v8::Object>);
-static icu::DecimalFormat* CreateFormatterFromSkeleton(
-    const icu::Locale&, const icu::UnicodeString&, UErrorCode*);
-static icu::DecimalFormatSymbols* GetFormatSymbols(const icu::Locale&);
-static bool GetCurrencyCode(const icu::Locale&,
-                            const char* const,
-                            v8::Handle<v8::Object>,
-                            UChar*);
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
-
-icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
-    v8::Handle<v8::Object> obj) {
-  if (number_format_template_->HasInstance(obj)) {
-    return static_cast<icu::DecimalFormat*>(
-        obj->GetPointerFromInternalField(0));
-  }
-
-  return NULL;
-}
-
-void NumberFormat::DeleteNumberFormat(v8::Persistent<v8::Value> object,
-                                      void* param) {
-  v8::Persistent<v8::Object> persistent_object =
-      v8::Persistent<v8::Object>::Cast(object);
-
-  // First delete the hidden C++ object.
-  // Unpacking should never return NULL here. That would only happen if
-  // this method is used as the weak callback for persistent handles not
-  // pointing to a number formatter.
-  delete UnpackNumberFormat(persistent_object);
-
-  // Then dispose of the persistent handle to JS object.
-  persistent_object.Dispose();
-}
-
-v8::Handle<v8::Value> NumberFormat::Format(const v8::Arguments& args) {
-  v8::HandleScope handle_scope;
-
-  if (args.Length() != 1 || !args[0]->IsNumber()) {
-    // Just return NaN on invalid input.
-    return v8::String::New("NaN");
-  }
-
-  icu::DecimalFormat* number_format = UnpackNumberFormat(args.Holder());
-  if (!number_format) {
-    return ThrowUnexpectedObjectError();
-  }
-
-  // ICU will handle actual NaN value properly and return NaN string.
-  icu::UnicodeString result;
-  number_format->format(args[0]->NumberValue(), result);
-
-  return v8::String::New(
-      reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
-}
-
-v8::Handle<v8::Value> NumberFormat::JSNumberFormat(const v8::Arguments& args) {
-  v8::HandleScope handle_scope;
-
-  // Expect locale id, region id and settings.
-  if (args.Length() != 3 ||
-      !args[0]->IsString() || !args[1]->IsString() || !args[2]->IsObject()) {
-    return v8::ThrowException(v8::Exception::SyntaxError(
-        v8::String::New("Locale, region and number settings are required.")));
-  }
-
-  icu::DecimalFormat* number_format = CreateNumberFormat(
-      args[0]->ToString(), args[1]->ToString(), args[2]->ToObject());
-
-  if (number_format_template_.IsEmpty()) {
-    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
-
-    raw_template->SetClassName(v8::String::New("v8Locale.NumberFormat"));
-
-    // Define internal field count on instance template.
-    v8::Local<v8::ObjectTemplate> object_template =
-        raw_template->InstanceTemplate();
-
-    // Set aside internal field for icu number formatter.
-    object_template->SetInternalFieldCount(1);
-
-    // Define all of the prototype methods on prototype template.
-    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
-    proto->Set(v8::String::New("format"),
-               v8::FunctionTemplate::New(Format));
-
-    number_format_template_ =
-        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
-  }
-
-  // Create an empty object wrapper.
-  v8::Local<v8::Object> local_object =
-      number_format_template_->GetFunction()->NewInstance();
-  v8::Persistent<v8::Object> wrapper =
-      v8::Persistent<v8::Object>::New(local_object);
-
-  // Set number formatter as internal field of the resulting JS object.
-  wrapper->SetPointerInInternalField(0, number_format);
-
-  // Create options key.
-  v8::Local<v8::Object> options = v8::Object::New();
-
-  // Show what ICU decided to use for easier problem tracking.
-  // Keep it as v8 specific extension.
-  icu::UnicodeString pattern;
-  number_format->toPattern(pattern);
-  options->Set(v8::String::New("v8ResolvedPattern"),
-               v8::String::New(reinterpret_cast<const uint16_t*>(
-                   pattern.getBuffer()), pattern.length()));
-
-  // Set resolved currency code in options.currency if not empty.
-  icu::UnicodeString currency(number_format->getCurrency());
-  if (!currency.isEmpty()) {
-    options->Set(v8::String::New("currencyCode"),
-                 v8::String::New(reinterpret_cast<const uint16_t*>(
-                     currency.getBuffer()), currency.length()));
-  }
-
-  wrapper->Set(v8::String::New("options"), options);
-
-  // Make object handle weak so we can delete iterator once GC kicks in.
-  wrapper.MakeWeak(NULL, DeleteNumberFormat);
-
-  return wrapper;
-}
-
-// Returns DecimalFormat.
-static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String> locale,
-                                              v8::Handle<v8::String> region,
-                                              v8::Handle<v8::Object> settings) {
-  v8::HandleScope handle_scope;
-
-  v8::String::AsciiValue ascii_locale(locale);
-  icu::Locale icu_locale(*ascii_locale);
-
-  // Make formatter from skeleton.
-  icu::DecimalFormat* number_format = NULL;
-  UErrorCode status = U_ZERO_ERROR;
-  icu::UnicodeString setting;
-
-  if (I18NUtils::ExtractStringSetting(settings, "skeleton", &setting)) {
-    // TODO(cira): Use ICU skeleton once
-    // http://bugs.icu-project.org/trac/ticket/8610 is resolved.
-    number_format = CreateFormatterFromSkeleton(icu_locale, setting, &status);
-  } else if (I18NUtils::ExtractStringSetting(settings, "pattern", &setting)) {
-    number_format =
-        new icu::DecimalFormat(setting, GetFormatSymbols(icu_locale), status);
-  } else if (I18NUtils::ExtractStringSetting(settings, "style", &setting)) {
-    if (setting == UNICODE_STRING_SIMPLE("currency")) {
-      number_format = static_cast<icu::DecimalFormat*>(
-          icu::NumberFormat::createCurrencyInstance(icu_locale, status));
-    } else if (setting == UNICODE_STRING_SIMPLE("percent")) {
-      number_format = static_cast<icu::DecimalFormat*>(
-          icu::NumberFormat::createPercentInstance(icu_locale, status));
-    } else if (setting == UNICODE_STRING_SIMPLE("scientific")) {
-      number_format = static_cast<icu::DecimalFormat*>(
-          icu::NumberFormat::createScientificInstance(icu_locale, status));
-    } else {
-      // Make it decimal in any other case.
-      number_format = static_cast<icu::DecimalFormat*>(
-          icu::NumberFormat::createInstance(icu_locale, status));
-    }
-  }
-
-  if (U_FAILURE(status)) {
-    delete number_format;
-    status = U_ZERO_ERROR;
-    number_format = static_cast<icu::DecimalFormat*>(
-        icu::NumberFormat::createInstance(icu_locale, status));
-  }
-
-  // Attach appropriate currency code to the formatter.
-  // It affects currency formatters only.
-  // Region is full language identifier in form 'und_' + region id.
-  v8::String::AsciiValue ascii_region(region);
-
-  UChar currency_code[NumberFormat::kCurrencyCodeLength];
-  if (GetCurrencyCode(icu_locale, *ascii_region, settings, currency_code)) {
-    number_format->setCurrency(currency_code, status);
-  }
-
-  return number_format;
-}
-
-// Generates ICU number format pattern from given skeleton.
-// TODO(cira): Remove once ICU includes equivalent method
-// (see http://bugs.icu-project.org/trac/ticket/8610).
-static icu::DecimalFormat* CreateFormatterFromSkeleton(
-    const icu::Locale& icu_locale,
-    const icu::UnicodeString& skeleton,
-    UErrorCode* status) {
-  icu::DecimalFormat skeleton_format(
-      skeleton, GetFormatSymbols(icu_locale), *status);
-
-  // Find out if skeleton contains currency or percent symbol and create
-  // proper instance to tweak.
-  icu::DecimalFormat* base_format = NULL;
-
-  // UChar representation of U+00A4 currency symbol.
-  const UChar currency_symbol = 0xA4u;
-
-  int32_t index = skeleton.indexOf(currency_symbol);
-  if (index != -1) {
-    // Find how many U+00A4 are there. There is at least one.
-    // Case of non-consecutive U+00A4 is taken care of in i18n.js.
-    int32_t end_index = skeleton.lastIndexOf(currency_symbol, index);
-
-#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
-    icu::NumberFormat::EStyles style;
-    switch (end_index - index) {
-      case 0:
-        style = icu::NumberFormat::kCurrencyStyle;
-        break;
-      case 1:
-        style = icu::NumberFormat::kIsoCurrencyStyle;
-        break;
-      default:
-        style = icu::NumberFormat::kPluralCurrencyStyle;
-    }
-#else  // ICU version is 4.8 or above (we ignore versions below 4.0).
-    UNumberFormatStyle style;
-    switch (end_index - index) {
-      case 0:
-        style = UNUM_CURRENCY;
-        break;
-      case 1:
-        style = UNUM_CURRENCY_ISO;
-        break;
-      default:
-        style = UNUM_CURRENCY_PLURAL;
-    }
-#endif
-
-    base_format = static_cast<icu::DecimalFormat*>(
-        icu::NumberFormat::createInstance(icu_locale, style, *status));
-  } else if (skeleton.indexOf('%') != -1) {
-    base_format = static_cast<icu::DecimalFormat*>(
-        icu::NumberFormat::createPercentInstance(icu_locale, *status));
-  } else {
-    // TODO(cira): Handle scientific skeleton.
-    base_format = static_cast<icu::DecimalFormat*>(
-        icu::NumberFormat::createInstance(icu_locale, *status));
-  }
-
-  if (U_FAILURE(*status)) {
-    delete base_format;
-    return NULL;
-  }
-
-  // Copy important information from skeleton to the new formatter.
-  // TODO(cira): copy rounding information from skeleton?
-  base_format->setGroupingUsed(skeleton_format.isGroupingUsed());
-
-  base_format->setMinimumIntegerDigits(
-      skeleton_format.getMinimumIntegerDigits());
-
-  base_format->setMinimumFractionDigits(
-      skeleton_format.getMinimumFractionDigits());
-
-  base_format->setMaximumFractionDigits(
-      skeleton_format.getMaximumFractionDigits());
-
-  return base_format;
-}
-
-// Gets decimal symbols for a locale.
-static icu::DecimalFormatSymbols* GetFormatSymbols(
-    const icu::Locale& icu_locale) {
-  UErrorCode status = U_ZERO_ERROR;
-  icu::DecimalFormatSymbols* symbols =
-      new icu::DecimalFormatSymbols(icu_locale, status);
-
-  if (U_FAILURE(status)) {
-    delete symbols;
-    // Use symbols from default locale.
-    symbols = new icu::DecimalFormatSymbols(status);
-  }
-
-  return symbols;
-}
-
-// Gets currency ISO 4217 3-letter code.
-// Check currencyCode setting first, then @currency=code and in the end
-// try to infer currency code from locale in the form 'und_' + region id.
-// Returns false in case of error.
-static bool GetCurrencyCode(const icu::Locale& icu_locale,
-                            const char* const und_region_locale,
-                            v8::Handle<v8::Object> settings,
-                            UChar* code) {
-  UErrorCode status = U_ZERO_ERROR;
-
-  // If there is user specified currency code, use it.
-  icu::UnicodeString currency;
-  if (I18NUtils::ExtractStringSetting(settings, "currencyCode", &currency)) {
-    currency.extract(code, NumberFormat::kCurrencyCodeLength, status);
-    return true;
-  }
-
-  // If ICU locale has -cu- currency code use it.
-  char currency_code[NumberFormat::kCurrencyCodeLength];
-  int32_t length = icu_locale.getKeywordValue(
-      "currency", currency_code, NumberFormat::kCurrencyCodeLength, status);
-  if (length != 0) {
-    I18NUtils::AsciiToUChar(currency_code, length + 1,
-                            code, NumberFormat::kCurrencyCodeLength);
-    return true;
-  }
-
-  // Otherwise infer currency code from the region id.
-  ucurr_forLocale(
-      und_region_locale, code, NumberFormat::kCurrencyCodeLength, &status);
-
-  return !!U_SUCCESS(status);
-}
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
-  // Returns undefined, and schedules an exception to be thrown.
-  return v8::ThrowException(v8::Exception::Error(
-      v8::String::New("NumberFormat method called on an object "
-                      "that is not a NumberFormat.")));
-}
-
-} }  // namespace v8::internal
diff --git a/src/extensions/experimental/number-format.h b/src/extensions/experimental/number-format.h
deleted file mode 100644
index bcfaed6..0000000
--- a/src/extensions/experimental/number-format.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
-
-#include "include/v8.h"
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class DecimalFormat;
-}
-
-namespace v8 {
-namespace internal {
-
-class NumberFormat {
- public:
-  // 3-letter ISO 4217 currency code plus \0.
-  static const int kCurrencyCodeLength;
-
-  static v8::Handle<v8::Value> JSNumberFormat(const v8::Arguments& args);
-
-  // Helper methods for various bindings.
-
-  // Unpacks date format object from corresponding JavaScript object.
-  static icu::DecimalFormat* UnpackNumberFormat(
-      v8::Handle<v8::Object> obj);
-
-  // Release memory we allocated for the NumberFormat once the JS object that
-  // holds the pointer gets garbage collected.
-  static void DeleteNumberFormat(v8::Persistent<v8::Value> object,
-                                 void* param);
-
-  // Formats number and returns corresponding string.
-  static v8::Handle<v8::Value> Format(const v8::Arguments& args);
-
- private:
-  NumberFormat();
-
-  static v8::Persistent<v8::FunctionTemplate> number_format_template_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
diff --git a/src/extensions/gc-extension.cc b/src/extensions/gc-extension.cc
index 3740c27..573797e 100644
--- a/src/extensions/gc-extension.cc
+++ b/src/extensions/gc-extension.cc
@@ -40,19 +40,15 @@
 
 
 v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
-  bool compact = false;
-  // All allocation spaces other than NEW_SPACE have the same effect.
-  if (args.Length() >= 1 && args[0]->IsBoolean()) {
-    compact = args[0]->BooleanValue();
-  }
-  HEAP->CollectAllGarbage(compact);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
   return v8::Undefined();
 }
 
 
 void GCExtension::Register() {
-  static GCExtension gc_extension;
-  static v8::DeclareExtension gc_extension_declaration(&gc_extension);
+  static GCExtension* gc_extension = NULL;
+  if (gc_extension == NULL) gc_extension = new GCExtension();
+  static v8::DeclareExtension gc_extension_declaration(gc_extension);
 }
 
 } }  // namespace v8::internal
diff --git a/src/factory.cc b/src/factory.cc
index 971f9f9..143099c 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -59,13 +59,13 @@
 }
 
 
-Handle<FixedArray> Factory::NewFixedDoubleArray(int size,
-                                                PretenureFlag pretenure) {
+Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
+                                                      PretenureFlag pretenure) {
   ASSERT(0 <= size);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
-      FixedArray);
+      FixedDoubleArray);
 }
 
 
@@ -95,6 +95,14 @@
 }
 
 
+Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
+  ASSERT(0 <= at_least_space_for);
+  CALL_HEAP_FUNCTION(isolate(),
+                     ObjectHashSet::Allocate(at_least_space_for),
+                     ObjectHashSet);
+}
+
+
 Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
   ASSERT(0 <= at_least_space_for);
   CALL_HEAP_FUNCTION(isolate(),
@@ -133,6 +141,20 @@
 }
 
 
+Handle<AccessorPair> Factory::NewAccessorPair() {
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->AllocateAccessorPair(),
+                     AccessorPair);
+}
+
+
+Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->AllocateTypeFeedbackInfo(),
+                     TypeFeedbackInfo);
+}
+
+
 // Symbols are created in the old generation (data space).
 Handle<String> Factory::LookupSymbol(Vector<const char> string) {
   CALL_HEAP_FUNCTION(isolate(),
@@ -244,7 +266,7 @@
 
 
 Handle<String> Factory::NewExternalStringFromAscii(
-    ExternalAsciiString::Resource* resource) {
+    const ExternalAsciiString::Resource* resource) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateExternalStringFromAscii(resource),
@@ -253,7 +275,7 @@
 
 
 Handle<String> Factory::NewExternalStringFromTwoByte(
-    ExternalTwoByteString::Resource* resource) {
+    const ExternalTwoByteString::Resource* resource) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
@@ -305,7 +327,7 @@
 Handle<Context> Factory::NewBlockContext(
     Handle<JSFunction> function,
     Handle<Context> previous,
-    Handle<SerializedScopeInfo> scope_info) {
+    Handle<ScopeInfo> scope_info) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateBlockContext(*function,
@@ -360,6 +382,8 @@
   script->set_context_data(heap->undefined_value());
   script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
   script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
+  script->set_compilation_state(
+      Smi::FromInt(Script::COMPILATION_STATE_INITIAL));
   script->set_wrapper(*wrapper);
   script->set_line_ends(heap->undefined_value());
   script->set_eval_from_shared(heap->undefined_value());
@@ -414,10 +438,12 @@
 }
 
 
-Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
+Handle<Map> Factory::NewMap(InstanceType type,
+                            int instance_size,
+                            ElementsKind elements_kind) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateMap(type, instance_size),
+      isolate()->heap()->AllocateMap(type, instance_size, elements_kind),
       Map);
 }
 
@@ -465,23 +491,12 @@
 }
 
 
-Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
-  CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
-}
-
-
-Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
-  CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
-}
-
-
 Handle<Map> Factory::GetElementsTransitionMap(
-    Handle<Map> src,
-    ElementsKind elements_kind,
-    bool safe_to_add_transition) {
-  CALL_HEAP_FUNCTION(isolate(),
-                     src->GetElementsTransitionMap(elements_kind,
-                                                   safe_to_add_transition),
+    Handle<JSObject> src,
+    ElementsKind elements_kind) {
+  Isolate* i = isolate();
+  CALL_HEAP_FUNCTION(i,
+                     src->GetElementsTransitionMap(i, elements_kind),
                      Map);
 }
 
@@ -491,6 +506,12 @@
 }
 
 
+Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
+    Handle<FixedDoubleArray> array) {
+  CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
+}
+
+
 Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
     Handle<SharedFunctionInfo> function_info,
     Handle<Map> function_map,
@@ -511,29 +532,30 @@
     PretenureFlag pretenure) {
   Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
       function_info,
-      function_info->strict_mode()
-          ? isolate()->strict_mode_function_map()
-          : isolate()->function_map(),
+      function_info->is_classic_mode()
+          ? isolate()->function_map()
+          : isolate()->strict_mode_function_map(),
       pretenure);
 
   result->set_context(*context);
-  int number_of_literals = function_info->num_literals();
-  Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
-  if (number_of_literals > 0) {
-    // Store the object, regexp and array functions in the literals
-    // array prefix.  These functions will be used when creating
-    // object, regexp and array literals in this function.
-    literals->set(JSFunction::kLiteralGlobalContextIndex,
-                  context->global_context());
+  if (!function_info->bound()) {
+    int number_of_literals = function_info->num_literals();
+    Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
+    if (number_of_literals > 0) {
+      // Store the object, regexp and array functions in the literals
+      // array prefix.  These functions will be used when creating
+      // object, regexp and array literals in this function.
+      literals->set(JSFunction::kLiteralGlobalContextIndex,
+                    context->global_context());
+    }
+    result->set_literals(*literals);
   }
-  result->set_literals(*literals);
-  result->set_next_function_link(isolate()->heap()->undefined_value());
-
   if (V8::UseCrankshaft() &&
       FLAG_always_opt &&
       result->is_compiled() &&
       !function_info->is_toplevel() &&
-      function_info->allows_lazy_compilation()) {
+      function_info->allows_lazy_compilation() &&
+      !function_info->optimization_disabled()) {
     result->MarkForLazyRecompilation();
   }
   return result;
@@ -548,17 +570,19 @@
 }
 
 
-Handle<Object> Factory::NewNumberFromInt(int value) {
+Handle<Object> Factory::NewNumberFromInt(int32_t value,
+                                         PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->NumberFromInt32(value), Object);
+      isolate()->heap()->NumberFromInt32(value, pretenure), Object);
 }
 
 
-Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
+Handle<Object> Factory::NewNumberFromUint(uint32_t value,
+                                         PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->NumberFromUint32(value), Object);
+      isolate()->heap()->NumberFromUint32(value, pretenure), Object);
 }
 
 
@@ -651,14 +675,16 @@
     return undefined_value();
   Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
   Handle<Object> type_obj = LookupAsciiSymbol(type);
-  Object** argv[2] = { type_obj.location(),
-                       Handle<Object>::cast(args).location() };
+  Handle<Object> argv[] = { type_obj, args };
 
   // Invoke the JavaScript factory method. If an exception is thrown while
   // running the factory method, use the exception as the result.
   bool caught_exception;
   Handle<Object> result = Execution::TryCall(fun,
-      isolate()->js_builtins_object(), 2, argv, &caught_exception);
+                                             isolate()->js_builtins_object(),
+                                             ARRAY_SIZE(argv),
+                                             argv,
+                                             &caught_exception);
   return result;
 }
 
@@ -674,13 +700,16 @@
   Handle<JSFunction> fun = Handle<JSFunction>(
       JSFunction::cast(isolate()->js_builtins_object()->
                        GetPropertyNoExceptionThrown(*constr)));
-  Object** argv[1] = { Handle<Object>::cast(message).location() };
+  Handle<Object> argv[] = { message };
 
   // Invoke the JavaScript factory method. If an exception is thrown while
   // running the factory method, use the exception as the result.
   bool caught_exception;
   Handle<Object> result = Execution::TryCall(fun,
-      isolate()->js_builtins_object(), 1, argv, &caught_exception);
+                                             isolate()->js_builtins_object(),
+                                             ARRAY_SIZE(argv),
+                                             argv,
+                                             &caught_exception);
   return result;
 }
 
@@ -693,7 +722,7 @@
   // Allocate the function
   Handle<JSFunction> function = NewFunction(name, the_hole_value());
 
-  // Setup the code pointer in both the shared function info and in
+  // Set up the code pointer in both the shared function info and in
   // the function itself.
   function->shared()->set_code(*code);
   function->set_code(*code);
@@ -724,7 +753,7 @@
   // Allocate the function.
   Handle<JSFunction> function = NewFunction(name, prototype);
 
-  // Setup the code pointer in both the shared function info and in
+  // Set up the code pointer in both the shared function info and in
   // the function itself.
   function->shared()->set_code(*code);
   function->set_code(*code);
@@ -732,7 +761,9 @@
   if (force_initial_map ||
       type != JS_OBJECT_TYPE ||
       instance_size != JSObject::kHeaderSize) {
-    Handle<Map> initial_map = NewMap(type, instance_size);
+    Handle<Map> initial_map = NewMap(type,
+                                     instance_size,
+                                     FAST_SMI_ONLY_ELEMENTS);
     function->set_initial_map(*initial_map);
     initial_map->set_constructor(*function);
   }
@@ -741,7 +772,10 @@
   // property that refers to the function.
   SetPrototypeProperty(function, prototype);
   // Currently safe because it is only invoked from Genesis.
-  SetLocalPropertyNoThrow(prototype, constructor_symbol(), function, DONT_ENUM);
+  CHECK_NOT_EMPTY_HANDLE(isolate(),
+                         JSObject::SetLocalPropertyIgnoreAttributes(
+                             prototype, constructor_symbol(),
+                             function, DONT_ENUM));
   return function;
 }
 
@@ -749,7 +783,7 @@
 Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
                                                         Handle<Code> code) {
   Handle<JSFunction> function = NewFunctionWithoutPrototype(name,
-                                                            kNonStrictMode);
+                                                            CLASSIC_MODE);
   function->shared()->set_code(*code);
   function->set_code(*code);
   ASSERT(!function->has_initial_map());
@@ -758,11 +792,11 @@
 }
 
 
-Handle<SerializedScopeInfo> Factory::NewSerializedScopeInfo(int length) {
+Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateSerializedScopeInfo(length),
-      SerializedScopeInfo);
+      isolate()->heap()->AllocateScopeInfo(length),
+      ScopeInfo);
 }
 
 
@@ -831,10 +865,13 @@
   // Number of descriptors added to the result so far.
   int descriptor_count = 0;
 
+  // Ensure that marking will not progress and change color of objects.
+  DescriptorArray::WhitenessWitness witness(*result);
+
   // Copy the descriptors from the array.
   for (int i = 0; i < array->number_of_descriptors(); i++) {
-    if (array->GetType(i) != NULL_DESCRIPTOR) {
-      result->CopyFrom(descriptor_count++, *array, i);
+    if (!array->IsNullDescriptor(i)) {
+      DescriptorArray::CopyFrom(result, descriptor_count++, array, i, witness);
     }
   }
 
@@ -854,7 +891,7 @@
     if (result->LinearSearch(*key, descriptor_count) ==
         DescriptorArray::kNotFound) {
       CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
-      result->Set(descriptor_count, &desc);
+      result->Set(descriptor_count, &desc, witness);
       descriptor_count++;
     } else {
       duplicates++;
@@ -868,13 +905,13 @@
     Handle<DescriptorArray> new_result =
         NewDescriptorArray(number_of_descriptors);
     for (int i = 0; i < number_of_descriptors; i++) {
-      new_result->CopyFrom(i, *result, i);
+      DescriptorArray::CopyFrom(new_result, i, result, i, witness);
     }
     result = new_result;
   }
 
   // Sort the result before returning.
-  result->Sort();
+  result->Sort(witness);
   return result;
 }
 
@@ -905,21 +942,62 @@
 
 
 Handle<JSArray> Factory::NewJSArray(int capacity,
+                                    ElementsKind elements_kind,
                                     PretenureFlag pretenure) {
-  Handle<JSObject> obj = NewJSObject(isolate()->array_function(), pretenure);
   CALL_HEAP_FUNCTION(isolate(),
-                     Handle<JSArray>::cast(obj)->Initialize(capacity),
+                     isolate()->heap()->AllocateJSArrayAndStorage(
+                         elements_kind,
+                         0,
+                         capacity,
+                         INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
+                         pretenure),
                      JSArray);
 }
 
 
-Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
+Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
+                                                ElementsKind elements_kind,
                                                 PretenureFlag pretenure) {
-  Handle<JSArray> result =
-      Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
-                                        pretenure));
-  result->SetContent(*elements);
-  return result;
+  CALL_HEAP_FUNCTION(
+      isolate(),
+      isolate()->heap()->AllocateJSArrayWithElements(*elements,
+                                                     elements_kind,
+                                                     pretenure),
+      JSArray);
+}
+
+
+void Factory::SetElementsCapacityAndLength(Handle<JSArray> array,
+                                           int capacity,
+                                           int length) {
+  ElementsAccessor* accessor = array->GetElementsAccessor();
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      accessor->SetCapacityAndLength(*array, capacity, length));
+}
+
+
+void Factory::SetContent(Handle<JSArray> array,
+                         Handle<FixedArrayBase> elements) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      array->SetContent(*elements));
+}
+
+
+void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      array->EnsureCanContainHeapObjectElements());
+}
+
+
+void Factory::EnsureCanContainElements(Handle<JSArray> array,
+                                       Handle<FixedArrayBase> elements,
+                                       EnsureElementsMode mode) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      array->EnsureCanContainElements(*elements, mode));
 }
 
 
@@ -948,11 +1026,18 @@
 }
 
 
+void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      object->SetIdentityHash(hash, ALLOW_CREATION));
+}
+
+
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
     Handle<String> name,
     int number_of_literals,
     Handle<Code> code,
-    Handle<SerializedScopeInfo> scope_info) {
+    Handle<ScopeInfo> scope_info) {
   Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
   shared->set_code(*code);
   shared->set_scope_info(*scope_info);
@@ -1000,6 +1085,12 @@
 }
 
 
+Handle<String> Factory::Uint32ToString(uint32_t value) {
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->Uint32ToString(value), String);
+}
+
+
 Handle<SeededNumberDictionary> Factory::DictionaryAtNumberPut(
     Handle<SeededNumberDictionary> dictionary,
     uint32_t key,
@@ -1042,11 +1133,11 @@
 
 Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
     Handle<String> name,
-    StrictModeFlag strict_mode) {
+    LanguageMode language_mode) {
   Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
-  Handle<Map> map = strict_mode == kStrictMode
-      ? isolate()->strict_mode_function_without_prototype_map()
-      : isolate()->function_without_prototype_map();
+  Handle<Map> map = (language_mode == CLASSIC_MODE)
+      ? isolate()->function_without_prototype_map()
+      : isolate()->strict_mode_function_without_prototype_map();
   CALL_HEAP_FUNCTION(isolate(),
                      isolate()->heap()->AllocateFunction(
                          *map,
@@ -1058,8 +1149,9 @@
 
 Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
     Handle<String> name,
-    StrictModeFlag strict_mode) {
-  Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name, strict_mode);
+    LanguageMode language_mode) {
+  Handle<JSFunction> fun =
+      NewFunctionWithoutPrototypeHelper(name, language_mode);
   fun->set_context(isolate()->context()->global_context());
   return fun;
 }
@@ -1319,4 +1411,20 @@
 }
 
 
+Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
+  Heap* h = isolate()->heap();
+  if (name->Equals(h->undefined_symbol())) return undefined_value();
+  if (name->Equals(h->nan_symbol())) return nan_value();
+  if (name->Equals(h->infinity_symbol())) return infinity_value();
+  return Handle<Object>::null();
+}
+
+
+Handle<Object> Factory::ToBoolean(bool value) {
+  return Handle<Object>(value
+                        ? isolate()->heap()->true_value()
+                        : isolate()->heap()->false_value());
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/factory.h b/src/factory.h
index c9817fe..786d4a9 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,7 +50,7 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   // Allocate a new uninitialized fixed double array.
-  Handle<FixedArray> NewFixedDoubleArray(
+  Handle<FixedDoubleArray> NewFixedDoubleArray(
       int size,
       PretenureFlag pretenure = NOT_TENURED);
 
@@ -62,6 +62,8 @@
 
   Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
 
+  Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for);
+
   Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
 
   Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
@@ -71,6 +73,10 @@
   Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
       int deopt_entry_count,
       PretenureFlag pretenure);
+  // Allocates a pre-tenured empty AccessorPair.
+  Handle<AccessorPair> NewAccessorPair();
+
+  Handle<TypeFeedbackInfo> NewTypeFeedbackInfo();
 
   Handle<String> LookupSymbol(Vector<const char> str);
   Handle<String> LookupSymbol(Handle<String> str);
@@ -149,9 +155,9 @@
   // not make sense to have a UTF-8 factory function for external strings,
   // because we cannot change the underlying buffer.
   Handle<String> NewExternalStringFromAscii(
-      ExternalAsciiString::Resource* resource);
+      const ExternalAsciiString::Resource* resource);
   Handle<String> NewExternalStringFromTwoByte(
-      ExternalTwoByteString::Resource* resource);
+      const ExternalTwoByteString::Resource* resource);
 
   // Create a global (but otherwise uninitialized) context.
   Handle<Context> NewGlobalContext();
@@ -174,7 +180,7 @@
   // Create a 'block' context.
   Handle<Context> NewBlockContext(Handle<JSFunction> function,
                                   Handle<Context> previous,
-                                  Handle<SerializedScopeInfo> scope_info);
+                                  Handle<ScopeInfo> scope_info);
 
   // Return the Symbol matching the passed in string.
   Handle<String> SymbolFromString(Handle<String> value);
@@ -207,7 +213,9 @@
   Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
       Handle<Object> value);
 
-  Handle<Map> NewMap(InstanceType type, int instance_size);
+  Handle<Map> NewMap(InstanceType type,
+                     int instance_size,
+                     ElementsKind elements_kind = FAST_ELEMENTS);
 
   Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
 
@@ -219,22 +227,22 @@
 
   Handle<Map> CopyMapDropTransitions(Handle<Map> map);
 
-  Handle<Map> GetFastElementsMap(Handle<Map> map);
-
-  Handle<Map> GetSlowElementsMap(Handle<Map> map);
-
-  Handle<Map> GetElementsTransitionMap(Handle<Map> map,
-                                       ElementsKind elements_kind,
-                                       bool safe_to_add_transition);
+  Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
+                                       ElementsKind elements_kind);
 
   Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
 
-  // Numbers (eg, literals) are pretenured by the parser.
+  Handle<FixedDoubleArray> CopyFixedDoubleArray(
+      Handle<FixedDoubleArray> array);
+
+  // Numbers (e.g. literals) are pretenured by the parser.
   Handle<Object> NewNumber(double value,
                            PretenureFlag pretenure = NOT_TENURED);
 
-  Handle<Object> NewNumberFromInt(int value);
-  Handle<Object> NewNumberFromUint(uint32_t value);
+  Handle<Object> NewNumberFromInt(int32_t value,
+                                  PretenureFlag pretenure = NOT_TENURED);
+  Handle<Object> NewNumberFromUint(uint32_t value,
+                                  PretenureFlag pretenure = NOT_TENURED);
 
   // These objects are used by the api to create env-independent data
   // structures in the heap.
@@ -256,24 +264,39 @@
 
   // JS arrays are pretenured when allocated by the parser.
   Handle<JSArray> NewJSArray(int capacity,
+                             ElementsKind elements_kind = FAST_ELEMENTS,
                              PretenureFlag pretenure = NOT_TENURED);
 
   Handle<JSArray> NewJSArrayWithElements(
-      Handle<FixedArray> elements,
+      Handle<FixedArrayBase> elements,
+      ElementsKind elements_kind = FAST_ELEMENTS,
       PretenureFlag pretenure = NOT_TENURED);
 
+  void SetElementsCapacityAndLength(Handle<JSArray> array,
+                                    int capacity,
+                                    int length);
+
+  void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
+
+  void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
+  void EnsureCanContainElements(Handle<JSArray> array,
+                                Handle<FixedArrayBase> elements,
+                                EnsureElementsMode mode);
+
   Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
 
   // Change the type of the argument into a JS object/function and reinitialize.
   void BecomeJSObject(Handle<JSReceiver> object);
   void BecomeJSFunction(Handle<JSReceiver> object);
 
+  void SetIdentityHash(Handle<JSObject> object, Object* hash);
+
   Handle<JSFunction> NewFunction(Handle<String> name,
                                  Handle<Object> prototype);
 
   Handle<JSFunction> NewFunctionWithoutPrototype(
       Handle<String> name,
-      StrictModeFlag strict_mode);
+      LanguageMode language_mode);
 
   Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
 
@@ -287,7 +310,7 @@
       Handle<Context> context,
       PretenureFlag pretenure = TENURED);
 
-  Handle<SerializedScopeInfo> NewSerializedScopeInfo(int length);
+  Handle<ScopeInfo> NewScopeInfo(int length);
 
   Handle<Code> NewCode(const CodeDesc& desc,
                        Code::Flags flags,
@@ -360,6 +383,7 @@
       PropertyAttributes attributes);
 
   Handle<String> NumberToString(Handle<Object> number);
+  Handle<String> Uint32ToString(uint32_t value);
 
   enum ApiInstanceType {
     JavaScriptObject,
@@ -404,7 +428,7 @@
       Handle<String> name,
       int number_of_literals,
       Handle<Code> code,
-      Handle<SerializedScopeInfo> scope_info);
+      Handle<ScopeInfo> scope_info);
   Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
 
   Handle<JSMessageObject> NewJSMessageObject(
@@ -451,6 +475,14 @@
                              JSRegExp::Flags flags,
                              int capture_count);
 
+  // Returns the value for a known global constant (a property of the global
+  // object which is neither configurable nor writable) like 'undefined'.
+  // Returns a null handle when the given name is unknown.
+  Handle<Object> GlobalConstantFor(Handle<String> name);
+
+  // Converts the given boolean condition to JavaScript boolean value.
+  Handle<Object> ToBoolean(bool value);
+
  private:
   Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
 
@@ -459,7 +491,7 @@
 
   Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
       Handle<String> name,
-      StrictModeFlag strict_mode);
+      LanguageMode language_mode);
 
   Handle<DescriptorArray> CopyAppendCallbackDescriptors(
       Handle<DescriptorArray> array,
diff --git a/src/fast-dtoa.h b/src/fast-dtoa.h
index 94c22ec..ef28557 100644
--- a/src/fast-dtoa.h
+++ b/src/fast-dtoa.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,7 +43,7 @@
 
 // FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
 // include the terminating '\0' character.
-static const int kFastDtoaMaximalLength = 17;
+const int kFastDtoaMaximalLength = 17;
 
 // Provides a decimal representation of v.
 // The result should be interpreted as buffer * 10^(point - length).
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index e8f6349..75697a8 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,6 +41,7 @@
   extern ctype FLAG_##nam;
 #define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
   static ctype const FLAG_##nam = def;
+#define DEFINE_implication(whenflag, thenflag)
 
 // We want to supply the actual storage and value for the flag variable in the
 // .cc file.  We only do this for writable flags.
@@ -48,6 +49,7 @@
 #define FLAG_FULL(ftype, ctype, nam, def, cmt) \
   ctype FLAG_##nam = def;
 #define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+#define DEFINE_implication(whenflag, thenflag)
 
 // We need to define all of our default values so that the Flag structure can
 // access them by pointer.  These are just used internally inside of one .cc,
@@ -56,7 +58,7 @@
 #define FLAG_FULL(ftype, ctype, nam, def, cmt) \
   static ctype const FLAGDEFAULT_##nam = def;
 #define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-
+#define DEFINE_implication(whenflag, thenflag)
 
 // We want to write entries into our meta data table, for internal parsing and
 // printing / etc in the flag parser code.  We only do this for writable flags.
@@ -64,6 +66,14 @@
 #define FLAG_FULL(ftype, ctype, nam, def, cmt) \
   { Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false },
 #define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+#define DEFINE_implication(whenflag, thenflag)
+
+// We produce the code to set flags when it is implied by another flag.
+#elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
+#define FLAG_FULL(ftype, ctype, nam, def, cmt)
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+#define DEFINE_implication(whenflag, thenflag) \
+  if (FLAG_##whenflag) FLAG_##thenflag = true;
 
 #else
 #error No mode supplied when including flags.defs
@@ -71,17 +81,41 @@
 
 #ifdef FLAG_MODE_DECLARE
 // Structure used to hold a collection of arguments to the JavaScript code.
+#define JSARGUMENTS_INIT {{}}
 struct JSArguments {
 public:
-  JSArguments();
-  JSArguments(int argc, const char** argv);
-  int argc() const;
-  const char** argv();
-  const char*& operator[](int idx);
-  JSArguments& operator=(JSArguments args);
+  inline int argc() const {
+    return static_cast<int>(storage_[0]);
+  }
+  inline const char** argv() const {
+    return reinterpret_cast<const char**>(storage_[1]);
+  }
+  inline const char*& operator[] (int idx) const {
+    return argv()[idx];
+  }
+  inline JSArguments& operator=(JSArguments args) {
+    set_argc(args.argc());
+    set_argv(args.argv());
+    return *this;
+  }
+  static JSArguments Create(int argc, const char** argv) {
+    JSArguments args;
+    args.set_argc(argc);
+    args.set_argv(argv);
+    return args;
+  }
 private:
-  int argc_;
-  const char** argv_;
+  void set_argc(int argc) {
+    storage_[0] = argc;
+  }
+  void set_argv(const char** argv) {
+    storage_[1] = reinterpret_cast<AtomicWord>(argv);
+  }
+public:
+  // Contains argc and argv. Unfortunately we have to store these two fields
+  // into a single one to avoid making the initialization macro (which would be
+  // "{ 0, NULL }") contain a coma.
+  AtomicWord storage_[2];
 };
 #endif
 
@@ -96,37 +130,49 @@
 //
 #define FLAG FLAG_FULL
 
-// Flags for experimental language features.
+// Flags for language modes and experimental language features.
+DEFINE_bool(use_strict, false, "enforce strict mode")
+
 DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
+DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
+DEFINE_bool(harmony_modules, false,
+            "enable harmony modules (implies block scoping)")
 DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
-DEFINE_bool(harmony_weakmaps, false, "enable harmony weak maps")
-DEFINE_bool(harmony_block_scoping, false, "enable harmony block scoping")
+DEFINE_bool(harmony_collections, false,
+            "enable harmony collections (sets, maps, and weak maps)")
+DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
+DEFINE_implication(harmony, harmony_scoping)
+DEFINE_implication(harmony, harmony_modules)
+DEFINE_implication(harmony, harmony_proxies)
+DEFINE_implication(harmony, harmony_collections)
+DEFINE_implication(harmony_modules, harmony_scoping)
 
 // Flags for experimental implementation features.
+DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
+DEFINE_bool(clever_optimizations,
+            true,
+            "Optimize object size, Array shift, DOM strings and string +")
+
+// Flags for data representation optimizations
 DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
-DEFINE_bool(string_slices, false, "use string slices")
+DEFINE_bool(string_slices, true, "use string slices")
 
 // Flags for Crankshaft.
-#ifdef V8_TARGET_ARCH_MIPS
-  DEFINE_bool(crankshaft, false, "use crankshaft")
-#else
-  DEFINE_bool(crankshaft, true, "use crankshaft")
-#endif
-DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
-DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
-DEFINE_bool(build_lithium, true, "use lithium chunk builder")
-DEFINE_bool(alloc_lithium, true, "use lithium register allocator")
-DEFINE_bool(use_lithium, true, "use lithium code generator")
+DEFINE_bool(crankshaft, true, "use crankshaft")
+DEFINE_string(hydrogen_filter, "", "optimization filter")
 DEFINE_bool(use_range, true, "use hydrogen range analysis")
 DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
 DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
 DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
 DEFINE_bool(use_inlining, true, "use function inlining")
 DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
-DEFINE_bool(eliminate_empty_blocks, true, "eliminate empty blocks")
 DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
+DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
+            true,
+            "crankshaft harvests type feedback from stub cache")
 DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
 DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
+DEFINE_string(trace_phase, "Z", "trace generated IR for specified phases")
 DEFINE_bool(trace_inlining, false, "trace inlining decisions")
 DEFINE_bool(trace_alloc, false, "trace register allocator")
 DEFINE_bool(trace_all_uses, false, "trace all use positions")
@@ -146,13 +192,49 @@
 DEFINE_bool(trace_osr, false, "trace on-stack replacement")
 DEFINE_int(stress_runs, 0, "number of stress runs")
 DEFINE_bool(optimize_closures, true, "optimize closures")
+DEFINE_bool(inline_construct, true, "inline constructor calls")
+DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
+DEFINE_int(loop_weight, 1, "loop weight for representation inference")
+
+DEFINE_bool(optimize_for_in, true,
+            "optimize functions containing for-in loops")
+
+// Experimental profiler changes.
+DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
+DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
+DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
+DEFINE_bool(self_optimization, false,
+            "primitive functions trigger their own optimization")
+DEFINE_bool(direct_self_opt, false,
+            "call recompile stub directly when self-optimizing")
+DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
+DEFINE_bool(count_based_interrupts, false,
+            "trigger profiler ticks based on counting instead of timing")
+DEFINE_bool(interrupt_at_exit, false,
+            "insert an interrupt check at function exit")
+DEFINE_bool(weighted_back_edges, false,
+            "weight back edges by jump distance for interrupt triggering")
+DEFINE_int(interrupt_budget, 5900,
+           "execution budget before interrupt is triggered")
+DEFINE_int(type_info_threshold, 15,
+           "percentage of ICs that must have type info to allow optimization")
+DEFINE_int(self_opt_count, 130, "call count before self-optimization")
+
+DEFINE_implication(experimental_profiler, watch_ic_patching)
+DEFINE_implication(experimental_profiler, self_optimization)
+// Not implying direct_self_opt here because it seems to be a bad idea.
+DEFINE_implication(experimental_profiler, retry_self_opt)
+DEFINE_implication(experimental_profiler, count_based_interrupts)
+DEFINE_implication(experimental_profiler, interrupt_at_exit)
+DEFINE_implication(experimental_profiler, weighted_back_edges)
+
+DEFINE_bool(trace_opt_verbose, false, "extra verbose compilation tracing")
+DEFINE_implication(trace_opt_verbose, trace_opt)
 
 // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
 DEFINE_bool(debug_code, false,
             "generate extra code (assertions) for debugging")
 DEFINE_bool(code_comments, false, "emit comments in code disassembly")
-DEFINE_bool(peephole_optimization, true,
-            "perform peephole optimizations in assembly code")
 DEFINE_bool(enable_sse2, true,
             "enable use of SSE2 instructions if available")
 DEFINE_bool(enable_sse3, true,
@@ -180,6 +262,8 @@
 DEFINE_bool(expose_externalize_string, false,
             "expose externalize string extension")
 DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
+DEFINE_bool(builtins_in_stack_traces, false,
+            "show built-in functions in stack traces")
 DEFINE_bool(disable_native_files, false, "disable builtin natives files")
 
 // builtins-ia32.cc
@@ -200,10 +284,8 @@
 DEFINE_bool(trace_opt, false, "trace lazy optimization")
 DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
 DEFINE_bool(opt, true, "use adaptive optimizations")
-DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
 DEFINE_bool(always_opt, false, "always try to optimize functions")
 DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_bool(deopt, true, "support deoptimization")
 DEFINE_bool(trace_deopt, false, "trace deoptimization")
 
 // compiler.cc
@@ -225,10 +307,13 @@
             "automatically set the debug break flag when debugger commands are "
             "in the queue")
 DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
+DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
 
 // execution.cc
-DEFINE_int(stack_size, kPointerSize * 128,
-           "default size of stack region v8 is allowed to use (in KkBytes)")
+// Slightly less than 1MB on 64-bit, since Windows' default stack size for
+// the main execution thread is 1MB for both 32 and 64-bit.
+DEFINE_int(stack_size, kPointerSize * 123,
+           "default size of stack region v8 is allowed to use (in kBytes)")
 
 // frames.cc
 DEFINE_int(max_stack_trace_source_length, 300,
@@ -253,14 +338,23 @@
             "print cumulative GC statistics in name=value format on exit")
 DEFINE_bool(trace_gc_verbose, false,
             "print more details following each garbage collection")
+DEFINE_bool(trace_fragmentation, false,
+            "report fragmentation for old pointer and data pages")
 DEFINE_bool(collect_maps, true,
             "garbage collect maps from which no objects can be reached")
 DEFINE_bool(flush_code, true,
             "flush code that we expect not to use again before full gc")
+DEFINE_bool(incremental_marking, true, "use incremental marking")
+DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
+DEFINE_bool(trace_incremental_marking, false,
+            "trace progress of the incremental marking")
 
 // v8.cc
 DEFINE_bool(use_idle_notification, true,
             "Use idle notification to reduce memory footprint.")
+
+DEFINE_bool(send_idle_notification, false,
+            "Send idle notifcation between stress runs.")
 // ic.cc
 DEFINE_bool(use_ic, true, "use inline caching")
 
@@ -276,8 +370,12 @@
 
 // mark-compact.cc
 DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
+DEFINE_bool(lazy_sweeping, true,
+            "Use lazy sweeping for old pointer and data spaces")
 DEFINE_bool(never_compact, false,
             "Never perform compaction on full GC - testing only")
+DEFINE_bool(compact_code_space, true,
+            "Compact code space on full non-incremental collections")
 DEFINE_bool(cleanup_code_caches_at_gc, true,
             "Flush inline caches prior to mark compact collection and "
             "flush code caches in maps during mark compact cycle.")
@@ -285,31 +383,16 @@
            "Default seed for initializing random generator "
            "(0, the default, means to use system random).")
 
-DEFINE_bool(canonicalize_object_literal_maps, true,
-            "Canonicalize maps for object literals.")
-
-DEFINE_bool(use_big_map_space, true,
-            "Use big map space, but don't compact if it grew too big.")
-
-DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
-           "Maximum number of pages in map space which still allows to encode "
-           "forwarding pointers.  That's actually a constant, but it's useful "
-           "to control it with a flag for better testing.")
-
-// mksnapshot.cc
-DEFINE_bool(h, false, "print this message")
-DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
-
 // objects.cc
 DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
 
 // parser.cc
 DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
-DEFINE_bool(strict_mode, true, "allow strict mode directives")
 
 // simulator-arm.cc and simulator-mips.cc
 DEFINE_bool(trace_sim, false, "Trace simulator execution")
-DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
+DEFINE_bool(check_icache, false,
+            "Check icache flushes in ARM and MIPS simulator")
 DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
 DEFINE_int(sim_stack_alignment, 8,
            "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
@@ -334,7 +417,6 @@
 
 // Regexp
 DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
-DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
 
 // Testing flags test/cctest/test-{flags,api,serialization}.cc
 DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
@@ -356,13 +438,17 @@
 
 DEFINE_bool(help, false, "Print usage message, including flags, on console")
 DEFINE_bool(dump_counters, false, "Dump counters on exit")
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
 DEFINE_bool(debugger, false, "Enable JavaScript debugger")
 DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
                                     "debugger agent in another process")
 DEFINE_bool(debugger_agent, false, "Enable debugger agent")
 DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
 DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments, JSArguments(),
+DEFINE_args(js_arguments, JSARGUMENTS_INIT,
             "Pass all remaining arguments to the script. Alias for \"--\".")
 
 #if defined(WEBOS__)
@@ -386,6 +472,15 @@
 DEFINE_string(gdbjit_dump_filter, "",
               "dump only objects containing this substring")
 
+// mark-compact.cc
+DEFINE_bool(force_marking_deque_overflows, false,
+            "force overflows of marking deque by reducing it's size "
+            "to 64 words")
+
+DEFINE_bool(stress_compaction, false,
+            "stress the GC compactor to flush out bugs (implies "
+            "--force_marking_deque_overflows)")
+
 //
 // Debug only flags
 //
@@ -408,11 +503,7 @@
             "pretty print source code for builtins")
 DEFINE_bool(print_ast, false, "print source AST")
 DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
-DEFINE_bool(print_json_ast, false, "print source AST as JSON")
-DEFINE_bool(print_builtin_json_ast, false,
-            "print source AST for builtins as JSON")
 DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
-DEFINE_bool(verify_stack_height, false, "verify stack height tracing on ia32")
 
 // compiler.cc
 DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
@@ -433,6 +524,11 @@
 // ic.cc
 DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
 
+// interface.cc
+DEFINE_bool(print_interfaces, false, "print interfaces")
+DEFINE_bool(print_interface_details, false, "print interface inference details")
+DEFINE_int(print_interface_depth, 5, "depth for printing interfaces")
+
 // objects.cc
 DEFINE_bool(trace_normalization,
             false,
@@ -441,10 +537,6 @@
 // runtime.cc
 DEFINE_bool(trace_lazy, false, "trace lazy compilation")
 
-// serialize.cc
-DEFINE_bool(debug_serialization, false,
-            "write debug information into the snapshot.")
-
 // spaces.cc
 DEFINE_bool(collect_heap_spill_statistics, false,
             "report heap spill statistics along with heap_stats "
@@ -509,8 +601,18 @@
 #define FLAG FLAG_READONLY
 #endif
 
+// elements.cc
+DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
+
 // code-stubs.cc
 DEFINE_bool(print_code_stubs, false, "print code stubs")
+DEFINE_bool(test_secondary_stub_cache,
+            false,
+            "test secondary stub cache by disabling the primary one")
+
+DEFINE_bool(test_primary_stub_cache,
+            false,
+            "test primary stub cache by disabling the secondary one")
 
 // codegen-ia32.cc / codegen-arm.cc
 DEFINE_bool(print_code, false, "print generated code")
@@ -520,6 +622,20 @@
 DEFINE_bool(print_code_verbose, false, "print more information for code")
 DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
 
+#ifdef ENABLE_DISASSEMBLER
+DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
+DEFINE_implication(print_all_code, print_code)
+DEFINE_implication(print_all_code, print_opt_code)
+DEFINE_implication(print_all_code, print_unopt_code)
+DEFINE_implication(print_all_code, print_code_verbose)
+DEFINE_implication(print_all_code, print_builtin_code)
+DEFINE_implication(print_all_code, print_code_stubs)
+DEFINE_implication(print_all_code, code_comments)
+#ifdef DEBUG
+DEFINE_implication(print_all_code, trace_codegen)
+#endif
+#endif
+
 // Cleanup...
 #undef FLAG_FULL
 #undef FLAG_READONLY
@@ -528,8 +644,10 @@
 #undef DEFINE_bool
 #undef DEFINE_int
 #undef DEFINE_string
+#undef DEFINE_implication
 
 #undef FLAG_MODE_DECLARE
 #undef FLAG_MODE_DEFINE
 #undef FLAG_MODE_DEFINE_DEFAULTS
 #undef FLAG_MODE_META
+#undef FLAG_MODE_DEFINE_IMPLICATIONS
diff --git a/src/flags.cc b/src/flags.cc
index ab5b57c..5720cbd 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -411,7 +411,7 @@
           for (int k = i; k < *argc; k++) {
             js_argv[k - start_pos] = StrDup(argv[k]);
           }
-          *flag->args_variable() = JSArguments(js_argc, js_argv);
+          *flag->args_variable() = JSArguments::Create(js_argc, js_argv);
           i = *argc;  // Consume all arguments
           break;
         }
@@ -534,18 +534,10 @@
   }
 }
 
-JSArguments::JSArguments()
-    : argc_(0), argv_(NULL) {}
-JSArguments::JSArguments(int argc, const char** argv)
-    : argc_(argc), argv_(argv) {}
-int JSArguments::argc() const { return argc_; }
-const char** JSArguments::argv() { return argv_; }
-const char*& JSArguments::operator[](int idx) { return argv_[idx]; }
-JSArguments& JSArguments::operator=(JSArguments args) {
-    argc_ = args.argc_;
-    argv_ = args.argv_;
-    return *this;
-}
 
+void FlagList::EnforceFlagImplications() {
+#define FLAG_MODE_DEFINE_IMPLICATIONS
+#include "flag-definitions.h"
+}
 
 } }  // namespace v8::internal
diff --git a/src/flags.h b/src/flags.h
index f9cbde0..f0b239b 100644
--- a/src/flags.h
+++ b/src/flags.h
@@ -72,6 +72,9 @@
 
   // Print help to stdout with flags, types, and default values.
   static void PrintHelp();
+
+  // Set flags as consequence of being implied by another flag.
+  static void EnforceFlagImplications();
 };
 
 } }  // namespace v8::internal
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 7ba79bf..27a526c 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -68,7 +68,7 @@
 
 inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
   v->VisitPointer(context_address());
-  StackFrame::IteratePc(v, pc_address(), holder);
+  v->VisitPointer(code_address());
 }
 
 
@@ -77,9 +77,24 @@
 }
 
 
-inline StackHandler::State StackHandler::state() const {
+inline bool StackHandler::is_js_entry() const {
+  return kind() == JS_ENTRY;
+}
+
+
+inline bool StackHandler::is_catch() const {
+  return kind() == CATCH;
+}
+
+
+inline bool StackHandler::is_finally() const {
+  return kind() == FINALLY;
+}
+
+
+inline StackHandler::Kind StackHandler::kind() const {
   const int offset = StackHandlerConstants::kStateOffset;
-  return static_cast<State>(Memory::int_at(address() + offset));
+  return KindField::decode(Memory::unsigned_at(address() + offset));
 }
 
 
@@ -89,9 +104,9 @@
 }
 
 
-inline Address* StackHandler::pc_address() const {
-  const int offset = StackHandlerConstants::kPCOffset;
-  return reinterpret_cast<Address*>(address() + offset);
+inline Object** StackHandler::code_address() const {
+  const int offset = StackHandlerConstants::kCodeOffset;
+  return reinterpret_cast<Object**>(address() + offset);
 }
 
 
@@ -105,8 +120,33 @@
 }
 
 
+inline Code* StackFrame::LookupCode() const {
+  return GetContainingCode(isolate(), pc());
+}
+
+
 inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
-  return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
+  return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
+}
+
+
+inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
+    : StackFrame(iterator) {
+}
+
+
+inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
+    : EntryFrame(iterator) {
+}
+
+
+inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
+    : StackFrame(iterator) {
+}
+
+
+inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
+    : StackFrame(iterator) {
 }
 
 
@@ -151,7 +191,12 @@
 inline bool StandardFrame::IsConstructFrame(Address fp) {
   Object* marker =
       Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset);
-  return marker == Smi::FromInt(CONSTRUCT);
+  return marker == Smi::FromInt(StackFrame::CONSTRUCT);
+}
+
+
+inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
+    : StandardFrame(iterator) {
 }
 
 
@@ -190,6 +235,26 @@
 }
 
 
+inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+    : JavaScriptFrame(iterator) {
+}
+
+
+inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
+    StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
+}
+
+
+inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
+    : StandardFrame(iterator) {
+}
+
+
+inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
+    : InternalFrame(iterator) {
+}
+
+
 template<typename Iterator>
 inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
     Isolate* isolate)
@@ -197,6 +262,15 @@
   if (!done()) Advance();
 }
 
+
+template<typename Iterator>
+inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+    Isolate* isolate, ThreadLocalTop* top)
+    : iterator_(isolate, top) {
+  if (!done()) Advance();
+}
+
+
 template<typename Iterator>
 inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
   // TODO(1233797): The frame hierarchy needs to change. It's
diff --git a/src/frames.cc b/src/frames.cc
index 60b1aad..0571a81 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,6 +31,7 @@
 #include "deoptimizer.h"
 #include "frames-inl.h"
 #include "full-codegen.h"
+#include "lazy-instance.h"
 #include "mark-compact.h"
 #include "safepoint-table.h"
 #include "scopeinfo.h"
@@ -41,6 +42,22 @@
 namespace v8 {
 namespace internal {
 
+
+static ReturnAddressLocationResolver return_address_location_resolver = NULL;
+
+
+// Resolves pc_address through the resolution address function if one is set.
+static inline Address* ResolveReturnAddressLocation(Address* pc_address) {
+  if (return_address_location_resolver == NULL) {
+    return pc_address;
+  } else {
+    return reinterpret_cast<Address*>(
+        return_address_location_resolver(
+            reinterpret_cast<uintptr_t>(pc_address)));
+  }
+}
+
+
 // Iterator that supports traversing the stack handlers of a
 // particular frame. Needs to know the top of the handler chain.
 class StackHandlerIterator BASE_EMBEDDED {
@@ -155,8 +172,8 @@
     ASSERT(fp_ != NULL);
     state.fp = fp_;
     state.sp = sp_;
-    state.pc_address =
-        reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
+    state.pc_address = ResolveReturnAddressLocation(
+        reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_)));
     type = StackFrame::ComputeType(isolate(), &state);
   }
   if (SingletonFor(type) == NULL) return;
@@ -366,16 +383,17 @@
 
 
 Code* StackFrame::GetSafepointData(Isolate* isolate,
-                                   Address pc,
+                                   Address inner_pointer,
                                    SafepointEntry* safepoint_entry,
                                    unsigned* stack_slots) {
-  PcToCodeCache::PcToCodeCacheEntry* entry =
-      isolate->pc_to_code_cache()->GetCacheEntry(pc);
+  InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
+      isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
   if (!entry->safepoint_entry.is_valid()) {
-    entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
+    entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
     ASSERT(entry->safepoint_entry.is_valid());
   } else {
-    ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
+    ASSERT(entry->safepoint_entry.Equals(
+        entry->code->GetSafepointEntry(inner_pointer)));
   }
 
   // Fill in the results and return the code.
@@ -392,11 +410,16 @@
 }
 
 
+#ifdef DEBUG
+static bool GcSafeCodeContains(HeapObject* object, Address addr);
+#endif
+
+
 void StackFrame::IteratePc(ObjectVisitor* v,
                            Address* pc_address,
                            Code* holder) {
   Address pc = *pc_address;
-  ASSERT(holder->contains(pc));
+  ASSERT(GcSafeCodeContains(holder, pc));
   unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
   Object* code = holder;
   v->VisitPointer(&code);
@@ -408,6 +431,13 @@
 }
 
 
+void StackFrame::SetReturnAddressLocationResolver(
+    ReturnAddressLocationResolver resolver) {
+  ASSERT(return_address_location_resolver == NULL);
+  return_address_location_resolver = resolver;
+}
+
+
 StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
   ASSERT(state->fp != NULL);
   if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
@@ -479,11 +509,11 @@
 
 
 void ExitFrame::ComputeCallerState(State* state) const {
-  // Setup the caller state.
+  // Set up the caller state.
   state->sp = caller_sp();
   state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
-  state->pc_address
-      = reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset);
+  state->pc_address = ResolveReturnAddressLocation(
+      reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
 }
 
 
@@ -517,7 +547,8 @@
 void ExitFrame::FillState(Address fp, Address sp, State* state) {
   state->sp = sp;
   state->fp = fp;
-  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+  state->pc_address = ResolveReturnAddressLocation(
+      reinterpret_cast<Address*>(sp - 1 * kPointerSize));
 }
 
 
@@ -552,7 +583,8 @@
 void StandardFrame::ComputeCallerState(State* state) const {
   state->sp = caller_sp();
   state->fp = caller_fp();
-  state->pc_address = reinterpret_cast<Address*>(ComputePCAddress(fp()));
+  state->pc_address = ResolveReturnAddressLocation(
+      reinterpret_cast<Address*>(ComputePCAddress(fp())));
 }
 
 
@@ -705,6 +737,74 @@
 }
 
 
+void JavaScriptFrame::PrintTop(FILE* file,
+                               bool print_args,
+                               bool print_line_number) {
+  // constructor calls
+  HandleScope scope;
+  AssertNoAllocation no_allocation;
+  JavaScriptFrameIterator it;
+  while (!it.done()) {
+    if (it.frame()->is_java_script()) {
+      JavaScriptFrame* frame = it.frame();
+      if (frame->IsConstructor()) PrintF(file, "new ");
+      // function name
+      Object* maybe_fun = frame->function();
+      if (maybe_fun->IsJSFunction()) {
+        JSFunction* fun = JSFunction::cast(maybe_fun);
+        fun->PrintName();
+        Code* js_code = frame->unchecked_code();
+        Address pc = frame->pc();
+        int code_offset =
+            static_cast<int>(pc - js_code->instruction_start());
+        PrintF("+%d", code_offset);
+        SharedFunctionInfo* shared = fun->shared();
+        if (print_line_number) {
+          Code* code = Code::cast(
+              v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
+          int source_pos = code->SourcePosition(pc);
+          Object* maybe_script = shared->script();
+          if (maybe_script->IsScript()) {
+            Handle<Script> script(Script::cast(maybe_script));
+            int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+            Object* script_name_raw = script->name();
+            if (script_name_raw->IsString()) {
+              String* script_name = String::cast(script->name());
+              SmartArrayPointer<char> c_script_name =
+                  script_name->ToCString(DISALLOW_NULLS,
+                                         ROBUST_STRING_TRAVERSAL);
+              PrintF(file, " at %s:%d", *c_script_name, line);
+            } else {
+              PrintF(file, "at <unknown>:%d", line);
+            }
+          } else {
+            PrintF(file, " at <unknown>:<unknown>");
+          }
+        }
+      } else {
+        PrintF("<unknown>");
+      }
+
+      if (print_args) {
+        // function arguments
+        // (we are intentionally only printing the actually
+        // supplied parameters, not all parameters required)
+        PrintF(file, "(this=");
+        frame->receiver()->ShortPrint(file);
+        const int length = frame->ComputeParametersCount();
+        for (int i = 0; i < length; i++) {
+          PrintF(file, ", ");
+          frame->GetParameter(i)->ShortPrint(file);
+        }
+        PrintF(file, ")");
+      }
+      break;
+    }
+    it.Advance();
+  }
+}
+
+
 void FrameSummary::Print() {
   PrintF("receiver: ");
   receiver_->ShortPrint();
@@ -739,18 +839,16 @@
                          data->TranslationIndex(deopt_index)->value());
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
   ASSERT(opcode == Translation::BEGIN);
-  int frame_count = it.Next();
+  it.Next();  // Drop frame count.
+  int jsframe_count = it.Next();
 
   // We create the summary in reverse order because the frames
   // in the deoptimization translation are ordered bottom-to-top.
-  int i = frame_count;
+  bool is_constructor = IsConstructor();
+  int i = jsframe_count;
   while (i > 0) {
     opcode = static_cast<Translation::Opcode>(it.Next());
-    if (opcode == Translation::FRAME) {
-      // We don't inline constructor calls, so only the first, outermost
-      // frame can be a constructor frame in case of inlining.
-      bool is_constructor = (i == frame_count) && IsConstructor();
-
+    if (opcode == Translation::JS_FRAME) {
       i--;
       int ast_id = it.Next();
       int function_id = it.Next();
@@ -800,11 +898,18 @@
 
       FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
       frames->Add(summary);
+      is_constructor = false;
+    } else if (opcode == Translation::CONSTRUCT_STUB_FRAME) {
+      // The next encountered JS_FRAME will be marked as a constructor call.
+      it.Skip(Translation::NumberOfOperandsFor(opcode));
+      ASSERT(!is_constructor);
+      is_constructor = true;
     } else {
       // Skip over operands to advance to the next opcode.
       it.Skip(Translation::NumberOfOperandsFor(opcode));
     }
   }
+  ASSERT(!is_constructor);
 }
 
 
@@ -819,7 +924,8 @@
   // back to a slow search in this case to find the original optimized
   // code object.
   if (!code->contains(pc())) {
-    code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
+    code = isolate()->inner_pointer_to_code_cache()->
+        GcSafeFindCodeForInnerPointer(pc());
   }
   ASSERT(code != NULL);
   ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@@ -843,8 +949,9 @@
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
   ASSERT(opcode == Translation::BEGIN);
   USE(opcode);
-  int frame_count = it.Next();
-  return frame_count;
+  it.Next();  // Drop frame count.
+  int jsframe_count = it.Next();
+  return jsframe_count;
 }
 
 
@@ -859,14 +966,15 @@
                          data->TranslationIndex(deopt_index)->value());
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
   ASSERT(opcode == Translation::BEGIN);
-  int frame_count = it.Next();
+  it.Next();  // Drop frame count.
+  int jsframe_count = it.Next();
 
   // We insert the frames in reverse order because the frames
   // in the deoptimization translation are ordered bottom-to-top.
-  while (frame_count > 0) {
+  while (jsframe_count > 0) {
     opcode = static_cast<Translation::Opcode>(it.Next());
-    if (opcode == Translation::FRAME) {
-      frame_count--;
+    if (opcode == Translation::JS_FRAME) {
+      jsframe_count--;
       it.Next();  // Skip ast id.
       int function_id = it.Next();
       it.Next();  // Skip height.
@@ -881,6 +989,11 @@
 }
 
 
+int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
+  return Smi::cast(GetExpression(0))->value();
+}
+
+
 Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
   return fp() + StandardFrameConstants::kCallerSPOffset;
 }
@@ -927,11 +1040,15 @@
   if (IsConstructor()) accumulator->Add("new ");
   accumulator->PrintFunction(function, receiver, &code);
 
-  Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+  // Get scope information for nicer output, if possible. If code is NULL, or
+  // doesn't contain scope info, scope_info will return 0 for the number of
+  // parameters, stack local variables, context local variables, stack slots,
+  // or context slots.
+  Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
 
   if (function->IsJSFunction()) {
     Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
-    scope_info = Handle<SerializedScopeInfo>(shared->scope_info());
+    scope_info = Handle<ScopeInfo>(shared->scope_info());
     Object* script_obj = shared->script();
     if (script_obj->IsScript()) {
       Handle<Script> script(Script::cast(script_obj));
@@ -956,11 +1073,6 @@
 
   accumulator->Add("(this=%o", receiver);
 
-  // Get scope information for nicer output, if possible. If code is
-  // NULL, or doesn't contain scope info, info will return 0 for the
-  // number of parameters, stack slots, or context slots.
-  ScopeInfo<PreallocatedStorage> info(*scope_info);
-
   // Print the parameters.
   int parameters_count = ComputeParametersCount();
   for (int i = 0; i < parameters_count; i++) {
@@ -968,8 +1080,8 @@
     // If we have a name for the parameter we print it. Nameless
     // parameters are either because we have more actual parameters
     // than formal parameters or because we have no scope information.
-    if (i < info.number_of_parameters()) {
-      accumulator->PrintName(*info.parameter_name(i));
+    if (i < scope_info->ParameterCount()) {
+      accumulator->PrintName(scope_info->ParameterName(i));
       accumulator->Add("=");
     }
     accumulator->Add("%o", GetParameter(i));
@@ -987,8 +1099,8 @@
   accumulator->Add(" {\n");
 
   // Compute the number of locals and expression stack elements.
-  int stack_locals_count = info.number_of_stack_slots();
-  int heap_locals_count = info.number_of_context_slots();
+  int stack_locals_count = scope_info->StackLocalCount();
+  int heap_locals_count = scope_info->ContextLocalCount();
   int expressions_count = ComputeExpressionsCount();
 
   // Print stack-allocated local variables.
@@ -997,7 +1109,7 @@
   }
   for (int i = 0; i < stack_locals_count; i++) {
     accumulator->Add("  var ");
-    accumulator->PrintName(*info.stack_slot_name(i));
+    accumulator->PrintName(scope_info->StackLocalName(i));
     accumulator->Add(" = ");
     if (i < expressions_count) {
       accumulator->Add("%o", GetExpression(i));
@@ -1014,16 +1126,16 @@
   }
 
   // Print heap-allocated local variables.
-  if (heap_locals_count > Context::MIN_CONTEXT_SLOTS) {
+  if (heap_locals_count > 0) {
     accumulator->Add("  // heap-allocated locals\n");
   }
-  for (int i = Context::MIN_CONTEXT_SLOTS; i < heap_locals_count; i++) {
+  for (int i = 0; i < heap_locals_count; i++) {
     accumulator->Add("  var ");
-    accumulator->PrintName(*info.context_slot_name(i));
+    accumulator->PrintName(scope_info->ContextLocalName(i));
     accumulator->Add(" = ");
     if (context != NULL) {
       if (i < context->length()) {
-        accumulator->Add("%o", context->get(i));
+        accumulator->Add("%o", context->get(Context::MIN_CONTEXT_SLOTS + i));
       } else {
         accumulator->Add(
             "// warning: missing context slot - inconsistent frame?");
@@ -1092,7 +1204,7 @@
   StackHandlerIterator it(this, top_handler());
   ASSERT(!it.done());
   StackHandler* handler = it.handler();
-  ASSERT(handler->is_entry());
+  ASSERT(handler->is_js_entry());
   handler->Iterate(v, LookupCode());
 #ifdef DEBUG
   // Make sure that the entry frame does not contain more than one
@@ -1155,53 +1267,90 @@
 // -------------------------------------------------------------------------
 
 
-Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
+static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
+  MapWord map_word = object->map_word();
+  return map_word.IsForwardingAddress() ?
+      map_word.ToForwardingAddress()->map() : map_word.ToMap();
+}
+
+
+static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
+  return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
+}
+
+
+#ifdef DEBUG
+static bool GcSafeCodeContains(HeapObject* code, Address addr) {
+  Map* map = GcSafeMapOfCodeSpaceObject(code);
+  ASSERT(map == code->GetHeap()->code_map());
+  Address start = code->address();
+  Address end = code->address() + code->SizeFromMap(map);
+  return start <= addr && addr < end;
+}
+#endif
+
+
+Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
+                                                Address inner_pointer) {
   Code* code = reinterpret_cast<Code*>(object);
-  ASSERT(code != NULL && code->contains(pc));
+  ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer));
   return code;
 }
 
 
-Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
+Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
+    Address inner_pointer) {
   Heap* heap = isolate_->heap();
-  // Check if the pc points into a large object chunk.
-  LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
-  if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
+  // Check if the inner pointer points into a large object chunk.
+  LargePage* large_page = heap->lo_space()->FindPage(inner_pointer);
+  if (large_page != NULL) {
+    return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
+  }
 
-  // Iterate through the 8K page until we reach the end or find an
-  // object starting after the pc.
-  Page* page = Page::FromAddress(pc);
-  HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
-  HeapObject* previous = NULL;
+  // Iterate through the page until we reach the end or find an object starting
+  // after the inner pointer.
+  Page* page = Page::FromAddress(inner_pointer);
+
+  Address addr = page->skip_list()->StartFor(inner_pointer);
+
+  Address top = heap->code_space()->top();
+  Address limit = heap->code_space()->limit();
+
   while (true) {
-    HeapObject* next = iterator.next();
-    if (next == NULL || next->address() >= pc) {
-      return GcSafeCastToCode(previous, pc);
+    if (addr == top && addr != limit) {
+      addr = limit;
+      continue;
     }
-    previous = next;
+
+    HeapObject* obj = HeapObject::FromAddress(addr);
+    int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
+    Address next_addr = addr + obj_size;
+    if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
+    addr = next_addr;
   }
 }
 
 
-PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
+InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
+    InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
   isolate_->counters()->pc_to_code()->Increment();
-  ASSERT(IsPowerOf2(kPcToCodeCacheSize));
+  ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
   uint32_t hash = ComputeIntegerHash(
-      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)),
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)),
       v8::internal::kZeroHashSeed);
-  uint32_t index = hash & (kPcToCodeCacheSize - 1);
-  PcToCodeCacheEntry* entry = cache(index);
-  if (entry->pc == pc) {
+  uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
+  InnerPointerToCodeCacheEntry* entry = cache(index);
+  if (entry->inner_pointer == inner_pointer) {
     isolate_->counters()->pc_to_code_cached()->Increment();
-    ASSERT(entry->code == GcSafeFindCodeForPc(pc));
+    ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
   } else {
     // Because this code may be interrupted by a profiling signal that
-    // also queries the cache, we cannot update pc before the code has
-    // been set. Otherwise, we risk trying to use a cache entry before
+    // also queries the cache, we cannot update inner_pointer before the code
+    // has been set. Otherwise, we risk trying to use a cache entry before
     // the code has been computed.
-    entry->code = GcSafeFindCodeForPc(pc);
+    entry->code = GcSafeFindCodeForInnerPointer(inner_pointer);
     entry->safepoint_entry.Reset();
-    entry->pc = pc;
+    entry->inner_pointer = inner_pointer;
   }
   return entry;
 }
@@ -1232,12 +1381,12 @@
 };
 
 
-static const JSCallerSavedCodeData kCallerSavedCodeData;
-
+static LazyInstance<JSCallerSavedCodeData>::type caller_saved_code_data =
+    LAZY_INSTANCE_INITIALIZER;
 
 int JSCallerSavedCode(int n) {
   ASSERT(0 <= n && n < kNumJSCallerSaved);
-  return kCallerSavedCodeData.reg_code[n];
+  return caller_saved_code_data.Get().reg_code[n];
 }
 
 
diff --git a/src/frames.h b/src/frames.h
index fed11c4..9071555 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,47 +49,54 @@
 class ThreadLocalTop;
 class Isolate;
 
-class PcToCodeCache {
+class InnerPointerToCodeCache {
  public:
-  struct PcToCodeCacheEntry {
-    Address pc;
+  struct InnerPointerToCodeCacheEntry {
+    Address inner_pointer;
     Code* code;
     SafepointEntry safepoint_entry;
   };
 
-  explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
+  explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
     Flush();
   }
 
-  Code* GcSafeFindCodeForPc(Address pc);
-  Code* GcSafeCastToCode(HeapObject* object, Address pc);
+  Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
+  Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer);
 
   void Flush() {
     memset(&cache_[0], 0, sizeof(cache_));
   }
 
-  PcToCodeCacheEntry* GetCacheEntry(Address pc);
+  InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
 
  private:
-  PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
+  InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
 
   Isolate* isolate_;
 
-  static const int kPcToCodeCacheSize = 1024;
-  PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+  static const int kInnerPointerToCodeCacheSize = 1024;
+  InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
 
-  DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
+  DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
 };
 
 
 class StackHandler BASE_EMBEDDED {
  public:
-  enum State {
-    ENTRY,
-    TRY_CATCH,
-    TRY_FINALLY
+  enum Kind {
+    JS_ENTRY,
+    CATCH,
+    FINALLY,
+    LAST_KIND = FINALLY
   };
 
+  static const int kKindWidth = 2;
+  STATIC_ASSERT(LAST_KIND < (1 << kKindWidth));
+  static const int kIndexWidth = 32 - kKindWidth;
+  class KindField: public BitField<StackHandler::Kind, 0, kKindWidth> {};
+  class IndexField: public BitField<unsigned, kKindWidth, kIndexWidth> {};
+
   // Get the address of this stack handler.
   inline Address address() const;
 
@@ -106,16 +113,16 @@
   static inline StackHandler* FromAddress(Address address);
 
   // Testers
-  bool is_entry() { return state() == ENTRY; }
-  bool is_try_catch() { return state() == TRY_CATCH; }
-  bool is_try_finally() { return state() == TRY_FINALLY; }
+  inline bool is_js_entry() const;
+  inline bool is_catch() const;
+  inline bool is_finally() const;
 
  private:
   // Accessors.
-  inline State state() const;
+  inline Kind kind() const;
 
   inline Object** context_address() const;
-  inline Address* pc_address() const;
+  inline Object** code_address() const;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
 };
@@ -139,7 +146,10 @@
   enum Type {
     NONE = 0,
     STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
-    NUMBER_OF_TYPES
+    NUMBER_OF_TYPES,
+    // Used by FrameScope to indicate that the stack frame is constructed
+    // manually and the FrameScope does not need to emit code.
+    MANUAL
   };
 #undef DECLARE_TYPE
 
@@ -215,9 +225,7 @@
   virtual Code* unchecked_code() const = 0;
 
   // Get the code associated with this frame.
-  Code* LookupCode() const {
-    return GetContainingCode(isolate(), pc());
-  }
+  inline Code* LookupCode() const;
 
   // Get the code object that contains the given pc.
   static inline Code* GetContainingCode(Isolate* isolate, Address pc);
@@ -233,6 +241,11 @@
   virtual void Iterate(ObjectVisitor* v) const = 0;
   static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
 
+  // Sets a callback function for return-address rewriting profilers
+  // to resolve the location of a return address to the location of the
+  // profiler's stashed return address.
+  static void SetReturnAddressLocationResolver(
+      ReturnAddressLocationResolver resolver);
 
   // Printing support.
   enum PrintMode { OVERVIEW, DETAILS };
@@ -299,7 +312,7 @@
   virtual void SetCallerFp(Address caller_fp);
 
  protected:
-  explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+  inline explicit EntryFrame(StackFrameIterator* iterator);
 
   // The caller stack pointer for entry frames is always zero. The
   // real information about the caller frame is available through the
@@ -326,8 +339,7 @@
   }
 
  protected:
-  explicit EntryConstructFrame(StackFrameIterator* iterator)
-      : EntryFrame(iterator) { }
+  inline explicit EntryConstructFrame(StackFrameIterator* iterator);
 
  private:
   friend class StackFrameIterator;
@@ -361,7 +373,7 @@
   static void FillState(Address fp, Address sp, State* state);
 
  protected:
-  explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+  inline explicit ExitFrame(StackFrameIterator* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
@@ -394,8 +406,7 @@
   }
 
  protected:
-  explicit StandardFrame(StackFrameIterator* iterator)
-      : StackFrame(iterator) { }
+  inline explicit StandardFrame(StackFrameIterator* iterator);
 
   virtual void ComputeCallerState(State* state) const;
 
@@ -513,9 +524,10 @@
     return static_cast<JavaScriptFrame*>(frame);
   }
 
+  static void PrintTop(FILE* file, bool print_args, bool print_line_number);
+
  protected:
-  explicit JavaScriptFrame(StackFrameIterator* iterator)
-      : StandardFrame(iterator) { }
+  inline explicit JavaScriptFrame(StackFrameIterator* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
@@ -552,8 +564,7 @@
   DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
 
  protected:
-  explicit OptimizedFrame(StackFrameIterator* iterator)
-      : JavaScriptFrame(iterator) { }
+  inline explicit OptimizedFrame(StackFrameIterator* iterator);
 
  private:
   friend class StackFrameIterator;
@@ -581,12 +592,9 @@
                      int index) const;
 
  protected:
-  explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
-      : JavaScriptFrame(iterator) { }
+  inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
 
-  virtual int GetNumberOfIncomingArguments() const {
-    return Smi::cast(GetExpression(0))->value();
-  }
+  virtual int GetNumberOfIncomingArguments() const;
 
   virtual Address GetCallerStackPointer() const;
 
@@ -611,8 +619,7 @@
   }
 
  protected:
-  explicit InternalFrame(StackFrameIterator* iterator)
-      : StandardFrame(iterator) { }
+  inline explicit InternalFrame(StackFrameIterator* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
@@ -633,8 +640,7 @@
   }
 
  protected:
-  explicit ConstructFrame(StackFrameIterator* iterator)
-      : InternalFrame(iterator) { }
+  inline explicit ConstructFrame(StackFrameIterator* iterator);
 
  private:
   friend class StackFrameIterator;
@@ -710,20 +716,26 @@
 
   inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
 
+  inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
+
   // Skip frames until the frame with the given id is reached.
   explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
 
   inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
 
-  JavaScriptFrameIteratorTemp(Address fp, Address sp,
-                              Address low_bound, Address high_bound) :
+  JavaScriptFrameIteratorTemp(Address fp,
+                              Address sp,
+                              Address low_bound,
+                              Address high_bound) :
       iterator_(fp, sp, low_bound, high_bound) {
     if (!done()) Advance();
   }
 
   JavaScriptFrameIteratorTemp(Isolate* isolate,
-                              Address fp, Address sp,
-                              Address low_bound, Address high_bound) :
+                              Address fp,
+                              Address sp,
+                              Address low_bound,
+                              Address high_bound) :
       iterator_(isolate, fp, sp, low_bound, high_bound) {
     if (!done()) Advance();
   }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 8073874..d963979 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -51,7 +51,37 @@
 }
 
 
-void BreakableStatementChecker::VisitDeclaration(Declaration* decl) {
+void BreakableStatementChecker::VisitVariableDeclaration(
+    VariableDeclaration* decl) {
+}
+
+void BreakableStatementChecker::VisitFunctionDeclaration(
+    FunctionDeclaration* decl) {
+}
+
+void BreakableStatementChecker::VisitModuleDeclaration(
+    ModuleDeclaration* decl) {
+}
+
+void BreakableStatementChecker::VisitImportDeclaration(
+    ImportDeclaration* decl) {
+}
+
+void BreakableStatementChecker::VisitExportDeclaration(
+    ExportDeclaration* decl) {
+}
+
+
+void BreakableStatementChecker::VisitModuleLiteral(ModuleLiteral* module) {
+}
+
+void BreakableStatementChecker::VisitModuleVariable(ModuleVariable* module) {
+}
+
+void BreakableStatementChecker::VisitModulePath(ModulePath* module) {
+}
+
+void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) {
 }
 
 
@@ -244,11 +274,6 @@
 }
 
 
-void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
-  Visit(expr->expression());
-}
-
-
 void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
   Visit(expr->left());
   Visit(expr->right());
@@ -278,8 +303,8 @@
   masm.positions_recorder()->StartGDBJITLineInfoRecording();
 #endif
 
-  FullCodeGenerator cgen(&masm);
-  cgen.Generate(info);
+  FullCodeGenerator cgen(&masm, info);
+  cgen.Generate();
   if (cgen.HasStackOverflow()) {
     ASSERT(!isolate->has_pending_exception());
     return false;
@@ -288,15 +313,26 @@
 
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
   Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
-  code->set_optimizable(info->IsOptimizable());
+  code->set_optimizable(info->IsOptimizable() &&
+                        !info->function()->flags()->Contains(kDontOptimize));
+  code->set_self_optimization_header(cgen.has_self_optimization_header_);
   cgen.PopulateDeoptimizationData(code);
+  cgen.PopulateTypeFeedbackInfo(code);
+  cgen.PopulateTypeFeedbackCells(code);
   code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
+  code->set_handler_table(*cgen.handler_table());
+#ifdef ENABLE_DEBUGGER_SUPPORT
   code->set_has_debug_break_slots(
       info->isolate()->debugger()->IsDebuggerActive());
+  code->set_compiled_optimizable(info->IsOptimizable());
+#endif  // ENABLE_DEBUGGER_SUPPORT
   code->set_allow_osr_at_loop_nesting_level(0);
   code->set_stack_check_table_offset(table_offset);
   CodeGenerator::PrintCode(code, info);
-  info->SetCode(code);  // may be an empty handle.
+  info->SetCode(code);  // May be an empty handle.
+  if (!code.is_null()) {
+    isolate->runtime_profiler()->NotifyCodeGenerated(code->instruction_size());
+  }
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (FLAG_gdbjit && !code.is_null()) {
     GDBJITLineInfo* lineinfo =
@@ -330,8 +366,7 @@
   ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
   if (!info_->HasDeoptimizationSupport()) return;
   int length = bailout_entries_.length();
-  Handle<DeoptimizationOutputData> data =
-      isolate()->factory()->
+  Handle<DeoptimizationOutputData> data = isolate()->factory()->
       NewDeoptimizationOutputData(length, TENURED);
   for (int i = 0; i < length; i++) {
     data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
@@ -341,6 +376,30 @@
 }
 
 
+void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
+  Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
+  info->set_ic_total_count(ic_total_count_);
+  ASSERT(!isolate()->heap()->InNewSpace(*info));
+  code->set_type_feedback_info(*info);
+}
+
+
+void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
+  if (type_feedback_cells_.is_empty()) return;
+  int length = type_feedback_cells_.length();
+  int array_size = TypeFeedbackCells::LengthOfFixedArray(length);
+  Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
+      isolate()->factory()->NewFixedArray(array_size, TENURED));
+  for (int i = 0; i < length; i++) {
+    cache->SetAstId(i, Smi::FromInt(type_feedback_cells_[i].ast_id));
+    cache->SetCell(i, *type_feedback_cells_[i].cell);
+  }
+  TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
+      *cache);
+}
+
+
+
 void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
   PrepareForBailoutForId(node->id(), state);
 }
@@ -363,20 +422,23 @@
 }
 
 
-void FullCodeGenerator::PrepareForBailoutForId(int id, State state) {
+void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
   // There's no need to prepare this code for bailouts from already optimized
   // code or code that can't be optimized.
-  if (!FLAG_deopt || !info_->HasDeoptimizationSupport()) return;
+  if (!info_->HasDeoptimizationSupport()) return;
   unsigned pc_and_state =
       StateField::encode(state) | PcField::encode(masm_->pc_offset());
+  ASSERT(Smi::IsValid(pc_and_state));
   BailoutEntry entry = { id, pc_and_state };
 #ifdef DEBUG
-  // Assert that we don't have multiple bailout entries for the same node.
-  for (int i = 0; i < bailout_entries_.length(); i++) {
-    if (bailout_entries_.at(i).id == entry.id) {
-      AstPrinter printer;
-      PrintF("%s", printer.PrintProgram(info_->function()));
-      UNREACHABLE();
+  if (FLAG_enable_slow_asserts) {
+    // Assert that we don't have multiple bailout entries for the same node.
+    for (int i = 0; i < bailout_entries_.length(); i++) {
+      if (bailout_entries_.at(i).id == entry.id) {
+        AstPrinter printer;
+        PrintF("%s", printer.PrintProgram(info_->function()));
+        UNREACHABLE();
+      }
     }
   }
 #endif  // DEBUG
@@ -384,10 +446,18 @@
 }
 
 
-void FullCodeGenerator::RecordStackCheck(int ast_id) {
+void FullCodeGenerator::RecordTypeFeedbackCell(
+    unsigned id, Handle<JSGlobalPropertyCell> cell) {
+  TypeFeedbackCellEntry entry = { id, cell };
+  type_feedback_cells_.Add(entry);
+}
+
+
+void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
   // The pc offset does not need to be encoded and packed together with a
   // state.
-  BailoutEntry entry = { ast_id, masm_->pc_offset() };
+  ASSERT(masm_->pc_offset() > 0);
+  BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
   stack_checks_.Add(entry);
 }
 
@@ -412,27 +482,24 @@
 
 void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
   __ push(reg);
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Register reg) const {
   // For simplicity we always test the accumulator register.
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
 
 void FullCodeGenerator::EffectContext::PlugTOS() const {
   __ Drop(1);
-  codegen()->decrement_stack_height();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
   __ pop(result_register());
-  codegen()->decrement_stack_height();
 }
 
 
@@ -443,8 +510,7 @@
 void FullCodeGenerator::TestContext::PlugTOS() const {
   // For simplicity we always test the accumulator register.
   __ pop(result_register());
-  codegen()->decrement_stack_height();
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -505,33 +571,33 @@
 
 void FullCodeGenerator::VisitDeclarations(
     ZoneList<Declaration*>* declarations) {
-  int length = declarations->length();
-  int global_count = 0;
-  for (int i = 0; i < length; i++) {
-    Declaration* decl = declarations->at(i);
-    EmitDeclaration(decl->proxy(), decl->mode(), decl->fun(), &global_count);
-  }
+  int save_global_count = global_count_;
+  global_count_ = 0;
+
+  AstVisitor::VisitDeclarations(declarations);
 
   // Batch declare global functions and variables.
-  if (global_count > 0) {
+  if (global_count_ > 0) {
     Handle<FixedArray> array =
-        isolate()->factory()->NewFixedArray(2 * global_count, TENURED);
+       isolate()->factory()->NewFixedArray(2 * global_count_, TENURED);
+    int length = declarations->length();
     for (int j = 0, i = 0; i < length; i++) {
       Declaration* decl = declarations->at(i);
       Variable* var = decl->proxy()->var();
 
       if (var->IsUnallocated()) {
         array->set(j++, *(var->name()));
-        if (decl->fun() == NULL) {
-          if (var->mode() == Variable::CONST) {
-            // In case this is const property use the hole.
+        FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
+        if (fun_decl == NULL) {
+          if (var->binding_needs_init()) {
+            // In case this binding needs initialization use the hole.
             array->set_the_hole(j++);
           } else {
             array->set_undefined(j++);
           }
         } else {
           Handle<SharedFunctionInfo> function =
-              Compiler::BuildFunctionInfo(decl->fun(), script());
+              Compiler::BuildFunctionInfo(fun_decl->fun(), script());
           // Check for stack-overflow exception.
           if (function.is_null()) {
             SetStackOverflow();
@@ -545,15 +611,61 @@
     // declaration the global functions and variables.
     DeclareGlobals(array);
   }
+
+  global_count_ = save_global_count;
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
+  EmitDeclaration(decl->proxy(), decl->mode(), NULL);
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+  EmitDeclaration(decl->proxy(), decl->mode(), decl->fun());
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* decl) {
+  EmitDeclaration(decl->proxy(), decl->mode(), NULL);
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
+  EmitDeclaration(decl->proxy(), decl->mode(), NULL);
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
+  // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
+  // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
+  // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::VisitModulePath(ModulePath* module) {
+  // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::VisitModuleUrl(ModuleUrl* decl) {
+  // TODO(rossberg)
 }
 
 
 int FullCodeGenerator::DeclareGlobalsFlags() {
-  int flags = 0;
-  if (is_eval()) flags |= kDeclareGlobalsEvalFlag;
-  if (is_strict_mode()) flags |= kDeclareGlobalsStrictModeFlag;
-  if (is_native()) flags |= kDeclareGlobalsNativeFlag;
-  return flags;
+  ASSERT(DeclareGlobalsLanguageMode::is_valid(language_mode()));
+  return DeclareGlobalsEvalFlag::encode(is_eval()) |
+      DeclareGlobalsNativeFlag::encode(is_native()) |
+      DeclareGlobalsLanguageMode::encode(language_mode());
 }
 
 
@@ -659,14 +771,13 @@
 }
 
 
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
-  ZoneList<Expression*>* args = node->arguments();
-  const Runtime::Function* function = node->function();
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+  const Runtime::Function* function = expr->function();
   ASSERT(function != NULL);
   ASSERT(function->intrinsic_type == Runtime::INLINE);
   InlineFunctionGenerator generator =
       FindInlineFunctionGenerator(function->function_id);
-  ((*this).*(generator))(args);
+  ((*this).*(generator))(expr);
 }
 
 
@@ -683,11 +794,25 @@
 }
 
 
+void FullCodeGenerator::VisitInDuplicateContext(Expression* expr) {
+  if (context()->IsEffect()) {
+    VisitForEffect(expr);
+  } else if (context()->IsAccumulatorValue()) {
+    VisitForAccumulatorValue(expr);
+  } else if (context()->IsStackValue()) {
+    VisitForStackValue(expr);
+  } else if (context()->IsTest()) {
+    const TestContext* test = TestContext::cast(context());
+    VisitForControl(expr, test->true_label(), test->false_label(),
+                    test->fall_through());
+  }
+}
+
+
 void FullCodeGenerator::VisitComma(BinaryOperation* expr) {
   Comment cmnt(masm_, "[ Comma");
   VisitForEffect(expr->left());
-  if (context()->IsTest()) ForwardBailoutToChild(expr);
-  VisitInCurrentContext(expr->right());
+  VisitInDuplicateContext(expr->right());
 }
 
 
@@ -709,7 +834,6 @@
     }
     PrepareForBailoutForId(right_id, NO_REGISTERS);
     __ bind(&eval_right);
-    ForwardBailoutToChild(expr);
 
   } else if (context()->IsAccumulatorValue()) {
     VisitForAccumulatorValue(left);
@@ -717,7 +841,6 @@
     // case we need it.
     __ push(result_register());
     Label discard, restore;
-    PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     if (is_logical_and) {
       DoTest(left, &discard, &restore, &restore);
     } else {
@@ -736,7 +859,6 @@
     // case we need it.
     __ push(result_register());
     Label discard;
-    PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     if (is_logical_and) {
       DoTest(left, &discard, &done, &discard);
     } else {
@@ -758,7 +880,7 @@
     __ bind(&eval_right);
   }
 
-  VisitInCurrentContext(right);
+  VisitInDuplicateContext(right);
   __ bind(&done);
 }
 
@@ -785,34 +907,6 @@
 }
 
 
-void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
-  if (!info_->HasDeoptimizationSupport()) return;
-  ASSERT(context()->IsTest());
-  ASSERT(expr == forward_bailout_stack_->expr());
-  forward_bailout_pending_ = forward_bailout_stack_;
-}
-
-
-void FullCodeGenerator::VisitInCurrentContext(Expression* expr) {
-  if (context()->IsTest()) {
-    ForwardBailoutStack stack(expr, forward_bailout_pending_);
-    ForwardBailoutStack* saved = forward_bailout_stack_;
-    forward_bailout_pending_ = NULL;
-    forward_bailout_stack_ = &stack;
-    Visit(expr);
-    forward_bailout_stack_ = saved;
-  } else {
-    ASSERT(forward_bailout_pending_ == NULL);
-    Visit(expr);
-    State state = context()->IsAccumulatorValue() ? TOS_REG : NO_REGISTERS;
-    PrepareForBailout(expr, state);
-    // Forwarding bailouts to children is a one shot operation. It should have
-    // been processed at this point.
-    ASSERT(forward_bailout_pending_ == NULL);
-  }
-}
-
-
 void FullCodeGenerator::VisitBlock(Block* stmt) {
   Comment cmnt(masm_, "[ Block");
   NestedBlock nested_block(this, stmt);
@@ -823,9 +917,18 @@
   if (stmt->block_scope() != NULL) {
     { Comment cmnt(masm_, "[ Extend block context");
       scope_ = stmt->block_scope();
-      __ Push(scope_->GetSerializedScopeInfo());
+      Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+      int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
+      __ Push(scope_info);
       PushFunctionArgumentForContextAllocation();
-      __ CallRuntime(Runtime::kPushBlockContext, 2);
+      if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
+        FastNewBlockContextStub stub(heap_slots);
+        __ CallStub(&stub);
+      } else {
+        __ CallRuntime(Runtime::kPushBlockContext, 2);
+      }
+
+      // Replace the context stored in the frame.
       StoreToFrameField(StandardFrameConstants::kContextOffset,
                         context_register());
     }
@@ -972,7 +1075,6 @@
   VisitForStackValue(stmt->expression());
   PushFunctionArgumentForContextAllocation();
   __ CallRuntime(Runtime::kPushWithContext, 2);
-  decrement_stack_height();
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
 
   { WithOrCatch body(this);
@@ -1010,7 +1112,7 @@
   // Check stack before looping.
   PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
   __ bind(&stack_check);
-  EmitStackCheck(stmt);
+  EmitStackCheck(stmt, &body);
   __ jmp(&body);
 
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1039,7 +1141,7 @@
   SetStatementPosition(stmt);
 
   // Check stack before looping.
-  EmitStackCheck(stmt);
+  EmitStackCheck(stmt, &body);
 
   __ bind(&test);
   VisitForControl(stmt->cond(),
@@ -1058,6 +1160,10 @@
   Label test, body;
 
   Iteration loop_statement(this, stmt);
+
+  // Set statement position for a break slot before entering the for-body.
+  SetStatementPosition(stmt);
+
   if (stmt->init() != NULL) {
     Visit(stmt->init());
   }
@@ -1072,7 +1178,6 @@
 
   PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
   __ bind(loop_statement.continue_label());
-  SetStatementPosition(stmt);
   if (stmt->next() != NULL) {
     Visit(stmt->next());
   }
@@ -1082,7 +1187,7 @@
   SetStatementPosition(stmt);
 
   // Check stack before looping.
-  EmitStackCheck(stmt);
+  EmitStackCheck(stmt, &body);
 
   __ bind(&test);
   if (stmt->cond() != NULL) {
@@ -1103,20 +1208,17 @@
 void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
   Comment cmnt(masm_, "[ TryCatchStatement");
   SetStatementPosition(stmt);
-  // The try block adds a handler to the exception handler chain
-  // before entering, and removes it again when exiting normally.
-  // If an exception is thrown during execution of the try block,
-  // control is passed to the handler, which also consumes the handler.
-  // At this point, the exception is in a register, and store it in
-  // the temporary local variable (prints as ".catch-var") before
-  // executing the catch block. The catch block has been rewritten
-  // to introduce a new scope to bind the catch variable and to remove
-  // that scope again afterwards.
+  // The try block adds a handler to the exception handler chain before
+  // entering, and removes it again when exiting normally.  If an exception
+  // is thrown during execution of the try block, the handler is consumed
+  // and control is passed to the catch block with the exception in the
+  // result register.
 
-  Label try_handler_setup, done;
-  __ Call(&try_handler_setup);
-  // Try handler code, exception in result register.
-
+  Label try_entry, handler_entry, exit;
+  __ jmp(&try_entry);
+  __ bind(&handler_entry);
+  handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
+  // Exception handler code, the exception is in the result register.
   // Extend the context before executing the catch block.
   { Comment cmnt(masm_, "[ Extend catch context");
     __ Push(stmt->variable()->name());
@@ -1130,27 +1232,23 @@
   Scope* saved_scope = scope();
   scope_ = stmt->scope();
   ASSERT(scope_->declarations()->is_empty());
-  { WithOrCatch body(this);
+  { WithOrCatch catch_body(this);
     Visit(stmt->catch_block());
   }
   // Restore the context.
   LoadContextField(context_register(), Context::PREVIOUS_INDEX);
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
   scope_ = saved_scope;
-  __ jmp(&done);
+  __ jmp(&exit);
 
   // Try block code. Sets up the exception handler chain.
-  __ bind(&try_handler_setup);
-  {
-    const int delta = StackHandlerConstants::kSize / kPointerSize;
-    TryCatch try_block(this);
-    __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
-    increment_stack_height(delta);
+  __ bind(&try_entry);
+  __ PushTryHandler(StackHandler::CATCH, stmt->index());
+  { TryCatch try_body(this);
     Visit(stmt->try_block());
-    __ PopTryHandler();
-    decrement_stack_height(delta);
   }
-  __ bind(&done);
+  __ PopTryHandler();
+  __ bind(&exit);
 }
 
 
@@ -1162,12 +1260,12 @@
   //
   // The try-finally construct can enter the finally block in three ways:
   // 1. By exiting the try-block normally. This removes the try-handler and
-  //      calls the finally block code before continuing.
+  //    calls the finally block code before continuing.
   // 2. By exiting the try-block with a function-local control flow transfer
   //    (break/continue/return). The site of the, e.g., break removes the
   //    try handler and calls the finally block code before continuing
   //    its outward control transfer.
-  // 3. by exiting the try-block with a thrown exception.
+  // 3. By exiting the try-block with a thrown exception.
   //    This can happen in nested function calls. It traverses the try-handler
   //    chain and consumes the try-handler entry before jumping to the
   //    handler code. The handler code then calls the finally-block before
@@ -1178,49 +1276,39 @@
   // exception) in the result register (rax/eax/r0), both of which must
   // be preserved. The return address isn't GC-safe, so it should be
   // cooked before GC.
-  Label finally_entry;
-  Label try_handler_setup;
-  const int original_stack_height = stack_height();
+  Label try_entry, handler_entry, finally_entry;
 
-  // Setup the try-handler chain. Use a call to
-  // Jump to try-handler setup and try-block code. Use call to put try-handler
-  // address on stack.
-  __ Call(&try_handler_setup);
-  // Try handler code. Return address of call is pushed on handler stack.
-  {
-    // This code is only executed during stack-handler traversal when an
-    // exception is thrown. The exception is in the result register, which
-    // is retained by the finally block.
-    // Call the finally block and then rethrow the exception if it returns.
-    __ Call(&finally_entry);
-    __ push(result_register());
-    __ CallRuntime(Runtime::kReThrow, 1);
-  }
+  // Jump to try-handler setup and try-block code.
+  __ jmp(&try_entry);
+  __ bind(&handler_entry);
+  handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
+  // Exception handler code.  This code is only executed when an exception
+  // is thrown.  The exception is in the result register, and must be
+  // preserved by the finally block.  Call the finally block and then
+  // rethrow the exception if it returns.
+  __ Call(&finally_entry);
+  __ push(result_register());
+  __ CallRuntime(Runtime::kReThrow, 1);
 
+  // Finally block implementation.
   __ bind(&finally_entry);
-  {
-    // Finally block implementation.
-    Finally finally_block(this);
-    EnterFinallyBlock();
-    set_stack_height(original_stack_height + Finally::kElementCount);
+  EnterFinallyBlock();
+  { Finally finally_body(this);
     Visit(stmt->finally_block());
-    ExitFinallyBlock();  // Return to the calling code.
   }
+  ExitFinallyBlock();  // Return to the calling code.
 
-  __ bind(&try_handler_setup);
-  {
-    // Setup try handler (stack pointer registers).
-    const int delta = StackHandlerConstants::kSize / kPointerSize;
-    TryFinally try_block(this, &finally_entry);
-    __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
-    set_stack_height(original_stack_height + delta);
+  // Set up try handler.
+  __ bind(&try_entry);
+  __ PushTryHandler(StackHandler::FINALLY, stmt->index());
+  { TryFinally try_body(this, &finally_entry);
     Visit(stmt->try_block());
-    __ PopTryHandler();
-    set_stack_height(original_stack_height);
   }
+  __ PopTryHandler();
   // Execute the finally block on the way out.  Clobber the unpredictable
-  // value in the accumulator with one that's safe for GC.  The finally
-  // block will unconditionally preserve the accumulator on the stack.
+  // value in the result register with one that's safe for GC because the
+  // finally block will unconditionally preserve the result register on the
+  // stack.
   ClearAccumulator();
   __ Call(&finally_entry);
 }
@@ -1246,7 +1334,6 @@
   __ bind(&true_case);
   SetExpressionPosition(expr->then_expression(),
                         expr->then_expression_position());
-  int start_stack_height = stack_height();
   if (context()->IsTest()) {
     const TestContext* for_test = TestContext::cast(context());
     VisitForControl(expr->then_expression(),
@@ -1254,17 +1341,15 @@
                     for_test->false_label(),
                     NULL);
   } else {
-    VisitInCurrentContext(expr->then_expression());
+    VisitInDuplicateContext(expr->then_expression());
     __ jmp(&done);
   }
 
   PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
   __ bind(&false_case);
-  set_stack_height(start_stack_height);
-  if (context()->IsTest()) ForwardBailoutToChild(expr);
   SetExpressionPosition(expr->else_expression(),
                         expr->else_expression_position());
-  VisitInCurrentContext(expr->else_expression());
+  VisitInDuplicateContext(expr->else_expression());
   // If control flow falls through Visit, merge it with true case here.
   if (!context()->IsTest()) {
     __ bind(&done);
@@ -1301,11 +1386,8 @@
 
 void FullCodeGenerator::VisitThrow(Throw* expr) {
   Comment cmnt(masm_, "[ Throw");
-  // Throw has no effect on the stack height or the current expression context.
-  // Usually the expression context is null, because throw is a statement.
   VisitForStackValue(expr->exception());
   __ CallRuntime(Runtime::kThrow, 1);
-  decrement_stack_height();
   // Never returns here.
 }
 
@@ -1321,19 +1403,21 @@
 }
 
 
-bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
-                                          Label* if_true,
-                                          Label* if_false,
-                                          Label* fall_through) {
-  Expression *expr;
+bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
+  Expression* sub_expr;
   Handle<String> check;
-  if (compare->IsLiteralCompareTypeof(&expr, &check)) {
-    EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
+  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+    EmitLiteralCompareTypeof(expr, sub_expr, check);
     return true;
   }
 
-  if (compare->IsLiteralCompareUndefined(&expr)) {
-    EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
+  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+    EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+    return true;
+  }
+
+  if (expr->IsLiteralCompareNull(&sub_expr)) {
+    EmitLiteralCompareNil(expr, sub_expr, kNullValue);
     return true;
   }
 
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 803c618..58d5986 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -77,27 +77,32 @@
     TOS_REG
   };
 
-  explicit FullCodeGenerator(MacroAssembler* masm)
+  FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
       : masm_(masm),
-        info_(NULL),
-        scope_(NULL),
+        info_(info),
+        scope_(info->scope()),
         nesting_stack_(NULL),
         loop_depth_(0),
-        stack_height_(0),
+        global_count_(0),
         context_(NULL),
-        bailout_entries_(0),
+        bailout_entries_(info->HasDeoptimizationSupport()
+                         ? info->function()->ast_node_count() : 0),
         stack_checks_(2),  // There's always at least one.
-        forward_bailout_stack_(NULL),
-        forward_bailout_pending_(NULL) {
-  }
+        type_feedback_cells_(info->HasDeoptimizationSupport()
+                             ? info->function()->ast_node_count() : 0),
+        ic_total_count_(0),
+        has_self_optimization_header_(false) { }
 
   static bool MakeCode(CompilationInfo* info);
 
-  void Generate(CompilationInfo* info);
-  void PopulateDeoptimizationData(Handle<Code> code);
+  // Returns the platform-specific size in bytes of the self-optimization
+  // header.
+  static int self_optimization_header_size();
 
-  class StateField : public BitField<State, 0, 8> { };
-  class PcField    : public BitField<unsigned, 8, 32-8> { };
+  // Encode state and pc-offset as a BitField<type, start, size>.
+  // Only use 30 bits because we encode the result as a smi.
+  class StateField : public BitField<State, 0, 1> { };
+  class PcField    : public BitField<unsigned, 1, 30-1> { };
 
   static const char* State2String(State state) {
     switch (state) {
@@ -143,11 +148,13 @@
       return previous_;
     }
 
- protected:
+   protected:
     MacroAssembler* masm() { return codegen_->masm(); }
 
     FullCodeGenerator* codegen_;
     NestedStatement* previous_;
+
+   private:
     DISALLOW_COPY_AND_ASSIGN(NestedStatement);
   };
 
@@ -276,27 +283,8 @@
     }
   };
 
-  // The forward bailout stack keeps track of the expressions that can
-  // bail out to just before the control flow is split in a child
-  // node. The stack elements are linked together through the parent
-  // link when visiting expressions in test contexts after requesting
-  // bailout in child forwarding.
-  class ForwardBailoutStack BASE_EMBEDDED {
-   public:
-    ForwardBailoutStack(Expression* expr, ForwardBailoutStack* parent)
-        : expr_(expr), parent_(parent) { }
-
-    Expression* expr() const { return expr_; }
-    ForwardBailoutStack* parent() const { return parent_; }
-
-   private:
-    Expression* const expr_;
-    ForwardBailoutStack* const parent_;
-  };
-
   // Type of a member function that generates inline code for a native function.
-  typedef void (FullCodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
+  typedef void (FullCodeGenerator::*InlineFunctionGenerator)(CallRuntime* expr);
 
   static const InlineFunctionGenerator kInlineFunctionGenerators[];
 
@@ -357,23 +345,22 @@
   // need the write barrier if location is CONTEXT.
   MemOperand VarOperand(Variable* var, Register scratch);
 
-  // Forward the bailout responsibility for the given expression to
-  // the next child visited (which must be in a test context).
-  void ForwardBailoutToChild(Expression* expr);
-
   void VisitForEffect(Expression* expr) {
     EffectContext context(this);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    PrepareForBailout(expr, NO_REGISTERS);
   }
 
   void VisitForAccumulatorValue(Expression* expr) {
     AccumulatorValueContext context(this);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    PrepareForBailout(expr, TOS_REG);
   }
 
   void VisitForStackValue(Expression* expr) {
     StackValueContext context(this);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    PrepareForBailout(expr, NO_REGISTERS);
   }
 
   void VisitForControl(Expression* expr,
@@ -381,9 +368,14 @@
                        Label* if_false,
                        Label* fall_through) {
     TestContext context(this, expr, if_true, if_false, fall_through);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    // For test contexts, we prepare for bailout before branching, not at
+    // the end of the entire expression.  This happens as part of visiting
+    // the expression.
   }
 
+  void VisitInDuplicateContext(Expression* expr);
+
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void DeclareGlobals(Handle<FixedArray> pairs);
   int DeclareGlobalsFlags();
@@ -391,29 +383,26 @@
   // Try to perform a comparison as a fast inlined literal compare if
   // the operands allow it.  Returns true if the compare operations
   // has been matched and all code generated; false otherwise.
-  bool TryLiteralCompare(CompareOperation* compare,
-                         Label* if_true,
-                         Label* if_false,
-                         Label* fall_through);
+  bool TryLiteralCompare(CompareOperation* compare);
 
   // Platform-specific code for comparing the type of a value with
   // a given literal string.
   void EmitLiteralCompareTypeof(Expression* expr,
-                                Handle<String> check,
-                                Label* if_true,
-                                Label* if_false,
-                                Label* fall_through);
+                                Expression* sub_expr,
+                                Handle<String> check);
 
-  // Platform-specific code for strict equality comparison with
-  // the undefined value.
-  void EmitLiteralCompareUndefined(Expression* expr,
-                                   Label* if_true,
-                                   Label* if_false,
-                                   Label* fall_through);
+  // Platform-specific code for equality comparison with a nil-like value.
+  void EmitLiteralCompareNil(CompareOperation* expr,
+                             Expression* sub_expr,
+                             NilValue nil);
 
   // Bailout support.
   void PrepareForBailout(Expression* node, State state);
-  void PrepareForBailoutForId(int id, State state);
+  void PrepareForBailoutForId(unsigned id, State state);
+
+  // Cache cell support.  This associates AST ids with global property cells
+  // that will be cleared during GC and collected by the type-feedback oracle.
+  void RecordTypeFeedbackCell(unsigned id, Handle<JSGlobalPropertyCell> cell);
 
   // Record a call's return site offset, used to rebuild the frame if the
   // called function was inlined at the site.
@@ -424,27 +413,33 @@
   // canonical JS true value so we will insert a (dead) test against true at
   // the actual bailout target from the optimized code. If not
   // should_normalize, the true and false labels are ignored.
-  void PrepareForBailoutBeforeSplit(State state,
+  void PrepareForBailoutBeforeSplit(Expression* expr,
                                     bool should_normalize,
                                     Label* if_true,
                                     Label* if_false);
 
   // Platform-specific code for a variable, constant, or function
   // declaration.  Functions have an initial value.
+  // Increments global_count_ for unallocated variables.
   void EmitDeclaration(VariableProxy* proxy,
-                       Variable::Mode mode,
-                       FunctionLiteral* function,
-                       int* global_count);
+                       VariableMode mode,
+                       FunctionLiteral* function);
 
   // Platform-specific code for checking the stack limit at the back edge of
   // a loop.
-  void EmitStackCheck(IterationStatement* stmt);
+  // This is meant to be called at loop back edges, |back_edge_target| is
+  // the jump target of the back edge and is used to approximate the amount
+  // of code inside the loop.
+  void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
   // Record the OSR AST id corresponding to a stack check in the code.
-  void RecordStackCheck(int osr_ast_id);
+  void RecordStackCheck(unsigned osr_ast_id);
   // Emit a table of stack check ids and pcs into the code stream.  Return
   // the offset of the start of the table.
   unsigned EmitStackCheckTable();
 
+  void EmitProfilingCounterDecrement(int delta);
+  void EmitProfilingCounterReset();
+
   // Platform-specific return sequence
   void EmitReturnSequence();
 
@@ -459,7 +454,7 @@
   void EmitInlineRuntimeCall(CallRuntime* expr);
 
 #define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
-  void Emit##name(ZoneList<Expression*>* arguments);
+  void Emit##name(CallRuntime* expr);
   INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
   INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
 #undef EMIT_INLINE_RUNTIME_CALL
@@ -475,13 +470,10 @@
                                  Label* done);
   void EmitVariableLoad(VariableProxy* proxy);
 
-  enum ResolveEvalFlag {
-    SKIP_CONTEXT_LOOKUP,
-    PERFORM_CONTEXT_LOOKUP
-  };
+  void EmitAccessor(Expression* expression);
 
   // Expects the arguments and the function already pushed.
-  void EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, int arg_count);
+  void EmitResolvePossiblyDirectEval(int arg_count);
 
   // Platform-specific support for allocating a new closure based on
   // the given function info.
@@ -513,7 +505,7 @@
 
   // Assign to the given expression as if via '='. The right-hand-side value
   // is expected in the accumulator.
-  void EmitAssignment(Expression* expr, int bailout_ast_id);
+  void EmitAssignment(Expression* expr);
 
   // Complete a variable assignment.  The right-hand-side value is expected
   // in the accumulator.
@@ -529,6 +521,10 @@
   // accumulator.
   void EmitKeyedPropertyAssignment(Assignment* expr);
 
+  void CallIC(Handle<Code> code,
+              RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+              unsigned ast_id = kNoASTId);
+
   void SetFunctionPosition(FunctionLiteral* fun);
   void SetReturnPosition(FunctionLiteral* fun);
   void SetStatementPosition(Statement* stmt);
@@ -548,35 +544,6 @@
     loop_depth_--;
   }
 
-#if defined(V8_TARGET_ARCH_IA32)
-  int stack_height() { return stack_height_; }
-  void set_stack_height(int depth) { stack_height_ = depth; }
-  void increment_stack_height() { stack_height_++; }
-  void increment_stack_height(int delta) { stack_height_ += delta; }
-  void decrement_stack_height() {
-    if (FLAG_verify_stack_height) {
-      ASSERT(stack_height_ > 0);
-    }
-    stack_height_--;
-  }
-  void decrement_stack_height(int delta) {
-    stack_height_-= delta;
-    if (FLAG_verify_stack_height) {
-      ASSERT(stack_height_ >= 0);
-    }
-  }
-  // Call this function only if FLAG_verify_stack_height is true.
-  void verify_stack_height();  // Generates a runtime check of esp - ebp.
-#else
-  int stack_height() { return 0; }
-  void set_stack_height(int depth) {}
-  void increment_stack_height() {}
-  void increment_stack_height(int delta) {}
-  void decrement_stack_height() {}
-  void decrement_stack_height(int delta) {}
-  void verify_stack_height() {}
-#endif  // V8_TARGET_ARCH_IA32
-
   MacroAssembler* masm() { return masm_; }
 
   class ExpressionContext;
@@ -586,9 +553,11 @@
   Handle<Script> script() { return info_->script(); }
   bool is_eval() { return info_->is_eval(); }
   bool is_native() { return info_->is_native(); }
-  bool is_strict_mode() { return function()->strict_mode(); }
-  StrictModeFlag strict_mode_flag() {
-    return is_strict_mode() ? kStrictMode : kNonStrictMode;
+  bool is_classic_mode() {
+    return language_mode() == CLASSIC_MODE;
+  }
+  LanguageMode language_mode() {
+    return function()->language_mode();
   }
   FunctionLiteral* function() { return info_->function(); }
   Scope* scope() { return scope_; }
@@ -618,15 +587,26 @@
   void VisitComma(BinaryOperation* expr);
   void VisitLogicalExpression(BinaryOperation* expr);
   void VisitArithmeticExpression(BinaryOperation* expr);
-  void VisitInCurrentContext(Expression* expr);
 
   void VisitForTypeofValue(Expression* expr);
 
+  void Generate();
+  void PopulateDeoptimizationData(Handle<Code> code);
+  void PopulateTypeFeedbackInfo(Handle<Code> code);
+  void PopulateTypeFeedbackCells(Handle<Code> code);
+
+  Handle<FixedArray> handler_table() { return handler_table_; }
+
   struct BailoutEntry {
     unsigned id;
     unsigned pc_and_state;
   };
 
+  struct TypeFeedbackCellEntry {
+    unsigned ast_id;
+    Handle<JSGlobalPropertyCell> cell;
+  };
+
 
   class ExpressionContext BASE_EMBEDDED {
    public:
@@ -637,10 +617,6 @@
 
     virtual ~ExpressionContext() {
       codegen_->set_new_context(old_);
-      if (FLAG_verify_stack_height) {
-        ASSERT_EQ(expected_stack_height_, codegen()->stack_height());
-        codegen()->verify_stack_height();
-      }
     }
 
     Isolate* isolate() const { return codegen_->isolate(); }
@@ -678,8 +654,8 @@
                              Label** if_false,
                              Label** fall_through) const = 0;
 
-    // Returns true if we are evaluating only for side effects (ie if the result
-    // will be discarded).
+    // Returns true if we are evaluating only for side effects (i.e. if the
+    // result will be discarded).
     virtual bool IsEffect() const { return false; }
 
     // Returns true if we are evaluating for the value (in accu/on stack).
@@ -694,7 +670,6 @@
     FullCodeGenerator* codegen() const { return codegen_; }
     MacroAssembler* masm() const { return masm_; }
     MacroAssembler* masm_;
-    int expected_stack_height_;  // The expected stack height esp - ebp on exit.
 
    private:
     const ExpressionContext* old_;
@@ -704,9 +679,7 @@
   class AccumulatorValueContext : public ExpressionContext {
    public:
     explicit AccumulatorValueContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) {
-      expected_stack_height_ = codegen->stack_height();
-    }
+        : ExpressionContext(codegen) { }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -727,9 +700,7 @@
   class StackValueContext : public ExpressionContext {
    public:
     explicit StackValueContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) {
-      expected_stack_height_ = codegen->stack_height() + 1;
-    }
+        : ExpressionContext(codegen) { }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -758,9 +729,7 @@
           condition_(condition),
           true_label_(true_label),
           false_label_(false_label),
-          fall_through_(fall_through) {
-      expected_stack_height_ = codegen->stack_height();
-    }
+          fall_through_(fall_through) { }
 
     static const TestContext* cast(const ExpressionContext* context) {
       ASSERT(context->IsTest());
@@ -797,10 +766,7 @@
   class EffectContext : public ExpressionContext {
    public:
     explicit EffectContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) {
-      expected_stack_height_ = codegen->stack_height();
-    }
-
+        : ExpressionContext(codegen) { }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -824,12 +790,15 @@
   Label return_label_;
   NestedStatement* nesting_stack_;
   int loop_depth_;
-  int stack_height_;
+  int global_count_;
   const ExpressionContext* context_;
   ZoneList<BailoutEntry> bailout_entries_;
   ZoneList<BailoutEntry> stack_checks_;
-  ForwardBailoutStack* forward_bailout_stack_;
-  ForwardBailoutStack* forward_bailout_pending_;
+  ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
+  int ic_total_count_;
+  bool has_self_optimization_header_;
+  Handle<FixedArray> handler_table_;
+  Handle<JSGlobalPropertyCell> profiling_counter_;
 
   friend class NestedStatement;
 
@@ -837,6 +806,28 @@
 };
 
 
+// A map from property names to getter/setter pairs allocated in the zone.
+class AccessorTable: public TemplateHashMap<Literal,
+                                            ObjectLiteral::Accessors,
+                                            ZoneListAllocationPolicy> {
+ public:
+  explicit AccessorTable(Zone* zone) :
+      TemplateHashMap<Literal,
+                      ObjectLiteral::Accessors,
+                      ZoneListAllocationPolicy>(Literal::Match),
+      zone_(zone) { }
+
+  Iterator lookup(Literal* literal) {
+    Iterator it = find(literal, true);
+    if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
+    return it;
+  }
+
+ private:
+  Zone* zone_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_FULL_CODEGEN_H_
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 68cb053..d3cd447 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -33,6 +33,7 @@
 #include "compiler.h"
 #include "global-handles.h"
 #include "messages.h"
+#include "platform.h"
 #include "natives.h"
 #include "scopeinfo.h"
 
@@ -1115,13 +1116,13 @@
       int context_slots = scope_info.number_of_context_slots();
       // The real slot ID is internal_slots + context_slot_id.
       int internal_slots = Context::MIN_CONTEXT_SLOTS;
-      int locals = scope_info.NumberOfLocals();
+      int locals = scope_info.LocalCount();
       int current_abbreviation = 4;
 
       for (int param = 0; param < params; ++param) {
         w->WriteULEB128(current_abbreviation++);
         w->WriteString(
-            *scope_info.parameter_name(param)->ToCString(DISALLOW_NULLS));
+            *scope_info.ParameterName(param)->ToCString(DISALLOW_NULLS));
         w->Write<uint32_t>(ty_offset);
         Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
         uintptr_t block_start = w->position();
@@ -1312,7 +1313,7 @@
       int context_slots = scope_info.number_of_context_slots();
       // The real slot ID is internal_slots + context_slot_id.
       int internal_slots = Context::MIN_CONTEXT_SLOTS;
-      int locals = scope_info.NumberOfLocals();
+      int locals = scope_info.LocalCount();
       int total_children =
           params + slots + context_slots + internal_slots + locals + 2;
 
@@ -1556,23 +1557,23 @@
 
 class UnwindInfoSection : public DebugSection {
  public:
-  explicit UnwindInfoSection(CodeDescription *desc);
-  virtual bool WriteBody(Writer *w);
+  explicit UnwindInfoSection(CodeDescription* desc);
+  virtual bool WriteBody(Writer* w);
 
-  int WriteCIE(Writer *w);
-  void WriteFDE(Writer *w, int);
+  int WriteCIE(Writer* w);
+  void WriteFDE(Writer* w, int);
 
-  void WriteFDEStateOnEntry(Writer *w);
-  void WriteFDEStateAfterRBPPush(Writer *w);
-  void WriteFDEStateAfterRBPSet(Writer *w);
-  void WriteFDEStateAfterRBPPop(Writer *w);
+  void WriteFDEStateOnEntry(Writer* w);
+  void WriteFDEStateAfterRBPPush(Writer* w);
+  void WriteFDEStateAfterRBPSet(Writer* w);
+  void WriteFDEStateAfterRBPPop(Writer* w);
 
-  void WriteLength(Writer *w,
+  void WriteLength(Writer* w,
                    Writer::Slot<uint32_t>* length_slot,
                    int initial_position);
 
  private:
-  CodeDescription *desc_;
+  CodeDescription* desc_;
 
   // DWARF3 Specification, Table 7.23
   enum CFIInstructions {
@@ -1623,7 +1624,7 @@
 };
 
 
-void UnwindInfoSection::WriteLength(Writer *w,
+void UnwindInfoSection::WriteLength(Writer* w,
                                     Writer::Slot<uint32_t>* length_slot,
                                     int initial_position) {
   uint32_t align = (w->position() - initial_position) % kPointerSize;
@@ -1639,7 +1640,7 @@
 }
 
 
-UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
+UnwindInfoSection::UnwindInfoSection(CodeDescription* desc)
 #ifdef __ELF
     : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
 #else
@@ -1648,7 +1649,7 @@
 #endif
       desc_(desc) { }
 
-int UnwindInfoSection::WriteCIE(Writer *w) {
+int UnwindInfoSection::WriteCIE(Writer* w) {
   Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
   uint32_t cie_position = w->position();
 
@@ -1668,7 +1669,7 @@
 }
 
 
-void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) {
+void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) {
   // The only FDE for this function. The CFA is the current RBP.
   Writer::Slot<uint32_t> fde_length_slot = w->CreateSlotHere<uint32_t>();
   int fde_position = w->position();
@@ -1686,7 +1687,7 @@
 }
 
 
-void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) {
+void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) {
   // The first state, just after the control has been transferred to the the
   // function.
 
@@ -1713,7 +1714,7 @@
 }
 
 
-void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) {
+void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) {
   // The second state, just after RBP has been pushed.
 
   // RBP / CFA for this function is now the current RSP, so just set the
@@ -1734,7 +1735,7 @@
 }
 
 
-void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) {
+void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) {
   // The third state, after the RBP has been set.
 
   // The CFA can now directly be set to RBP.
@@ -1749,7 +1750,7 @@
 }
 
 
-void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) {
+void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
   // The fourth (final) state. The RBP has been popped (just before issuing a
   // return).
 
@@ -1769,7 +1770,7 @@
 }
 
 
-bool UnwindInfoSection::WriteBody(Writer *w) {
+bool UnwindInfoSection::WriteBody(Writer* w) {
   uint32_t cie_position = WriteCIE(w);
   WriteFDE(w, cie_position);
   return true;
@@ -1810,8 +1811,8 @@
   struct JITDescriptor {
     uint32_t version_;
     uint32_t action_flag_;
-    JITCodeEntry *relevant_entry_;
-    JITCodeEntry *first_entry_;
+    JITCodeEntry* relevant_entry_;
+    JITCodeEntry* first_entry_;
   };
 
   // GDB will place breakpoint into this function.
@@ -1998,7 +1999,7 @@
   }
 }
 
-static void AddUnwindInfo(CodeDescription *desc) {
+static void AddUnwindInfo(CodeDescription* desc) {
 #ifdef V8_TARGET_ARCH_X64
   if (desc->tag() == GDBJITInterface::FUNCTION) {
     // To avoid propagating unwinding information through
@@ -2035,7 +2036,7 @@
 }
 
 
-Mutex* GDBJITInterface::mutex_ = OS::CreateMutex();
+static LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
 
 
 void GDBJITInterface::AddCode(const char* name,
@@ -2045,7 +2046,7 @@
                               CompilationInfo* info) {
   if (!FLAG_gdbjit) return;
 
-  ScopedLock lock(mutex_);
+  ScopedLock lock(mutex.Pointer());
   AssertNoAllocation no_gc;
 
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@@ -2126,7 +2127,7 @@
 void GDBJITInterface::RemoveCode(Code* code) {
   if (!FLAG_gdbjit) return;
 
-  ScopedLock lock(mutex_);
+  ScopedLock lock(mutex.Pointer());
   HashMap::Entry* e = GetEntries()->Lookup(code,
                                            HashForCodeObject(code),
                                            false);
@@ -2146,7 +2147,7 @@
 
 void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
                                                GDBJITLineInfo* line_info) {
-  ScopedLock lock(mutex_);
+  ScopedLock lock(mutex.Pointer());
   ASSERT(!IsLineInfoTagged(line_info));
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
   ASSERT(e->value == NULL);
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
index 2cf15bc..0eca938 100644
--- a/src/gdb-jit.h
+++ b/src/gdb-jit.h
@@ -132,9 +132,6 @@
   static void RemoveCode(Code* code);
 
   static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
-
- private:
-  static Mutex* mutex_;
 };
 
 #define GDBJIT(action) GDBJITInterface::action
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 87066fa..9c0ad45 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -232,7 +232,7 @@
       VMState state(isolate, EXTERNAL);
       func(object, par);
     }
-    // Absense of explicit cleanup or revival of weak handle
+    // Absence of explicit cleanup or revival of weak handle
     // in most of the cases would lead to memory leak.
     ASSERT(state_ != NEAR_DEATH);
     return true;
@@ -384,6 +384,7 @@
     : isolate_(isolate),
       number_of_weak_handles_(0),
       number_of_global_object_weak_handles_(0),
+      number_of_global_handles_(0),
       first_block_(NULL),
       first_used_block_(NULL),
       first_free_(NULL),
@@ -403,6 +404,7 @@
 
 Handle<Object> GlobalHandles::Create(Object* value) {
   isolate_->counters()->global_handles()->Increment();
+  number_of_global_handles_++;
   if (first_free_ == NULL) {
     first_block_ = new NodeBlock(first_block_);
     first_block_->PutNodesOnFreeList(&first_free_);
@@ -423,6 +425,7 @@
 
 void GlobalHandles::Destroy(Object** location) {
   isolate_->counters()->global_handles()->Decrement();
+  number_of_global_handles_--;
   if (location == NULL) return;
   Node::FromLocation(location)->Release(this);
 }
diff --git a/src/global-handles.h b/src/global-handles.h
index 153d4da..ddf5fe2 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -143,6 +143,11 @@
     return number_of_global_object_weak_handles_;
   }
 
+  // Returns the current number of handles to global objects.
+  int NumberOfGlobalHandles() {
+    return number_of_global_handles_;
+  }
+
   // Clear the weakness of a global handle.
   void ClearWeakness(Object** location);
 
@@ -248,6 +253,9 @@
   // number_of_weak_handles_.
   int number_of_global_object_weak_handles_;
 
+  // Field always containing the number of handles to global objects.
+  int number_of_global_handles_;
+
   // List of all allocated node blocks.
   NodeBlock* first_block_;
 
diff --git a/src/globals.h b/src/globals.h
index 6c6966a..25d4ffe 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -175,17 +175,22 @@
 // than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
 // works on compilers that don't have it (like MSVC).
 #if V8_HOST_ARCH_64_BIT
-#ifdef _MSC_VER
+#if defined(_MSC_VER)
 #define V8_UINT64_C(x)  (x ## UI64)
 #define V8_INT64_C(x)   (x ## I64)
 #define V8_INTPTR_C(x)  (x ## I64)
 #define V8_PTR_PREFIX "ll"
-#else  // _MSC_VER
+#elif defined(__MINGW64__)
+#define V8_UINT64_C(x)  (x ## ULL)
+#define V8_INT64_C(x)   (x ## LL)
+#define V8_INTPTR_C(x)  (x ## LL)
+#define V8_PTR_PREFIX "I64"
+#else
 #define V8_UINT64_C(x)  (x ## UL)
 #define V8_INT64_C(x)   (x ## L)
 #define V8_INTPTR_C(x)  (x ## L)
 #define V8_PTR_PREFIX "l"
-#endif  // _MSC_VER
+#endif
 #else  // V8_HOST_ARCH_64_BIT
 #define V8_INTPTR_C(x)  (x)
 #define V8_PTR_PREFIX ""
@@ -230,6 +235,9 @@
 
 const int kDoubleSizeLog2 = 3;
 
+// Size of the state of a the random number generator.
+const int kRandomStateSize = 2 * kIntSize;
+
 #if V8_HOST_ARCH_64_BIT
 const int kPointerSizeLog2 = 3;
 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
@@ -255,8 +263,13 @@
 const int kBinary32MantissaBits = 23;
 const int kBinary32ExponentShift = 23;
 
-// ASCII/UC16 constants
+// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
+// other bits set.
+const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
+
+// ASCII/UTF-16 constants
 // Code-point values in Unicode 4.0 are 21 bits wide.
+// Code units in UTF-16 are 16 bits wide.
 typedef uint16_t uc16;
 typedef int32_t uc32;
 const int kASCIISize    = kCharSize;
@@ -287,7 +300,7 @@
 // The USE(x) template is used to silence C++ compiler warnings
 // issued for (yet) unused variables (typically parameters).
 template <typename T>
-static inline void USE(T) { }
+inline void USE(T) { }
 
 
 // FUNCTION_ADDR(f) gets the address of a C function f.
@@ -351,6 +364,39 @@
 class FreeStoreAllocationPolicy;
 template <typename T, class P = FreeStoreAllocationPolicy> class List;
 
+// -----------------------------------------------------------------------------
+// Declarations for use in both the preparser and the rest of V8.
+
+// The different language modes that V8 implements. ES5 defines two language
+// modes: an unrestricted mode respectively a strict mode which are indicated by
+// CLASSIC_MODE respectively STRICT_MODE in the enum. The harmony spec drafts
+// for the next ES standard specify a new third mode which is called 'extended
+// mode'. The extended mode is only available if the harmony flag is set. It is
+// based on the 'strict mode' and adds new functionality to it. This means that
+// most of the semantics of these two modes coincide.
+//
+// In the current draft the term 'base code' is used to refer to code that is
+// neither in strict nor extended mode. However, the more distinguishing term
+// 'classic mode' is used in V8 instead to avoid mix-ups.
+
+enum LanguageMode {
+  CLASSIC_MODE,
+  STRICT_MODE,
+  EXTENDED_MODE
+};
+
+
+// The Strict Mode (ECMA-262 5th edition, 4.2.2).
+//
+// This flag is used in the backend to represent the language mode. So far
+// there is no semantic difference between the strict and the extended mode in
+// the backend, so both modes are represented by the kStrictMode value.
+enum StrictModeFlag {
+  kNonStrictMode,
+  kStrictMode
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_GLOBALS_H_
diff --git a/src/handles.cc b/src/handles.cc
index c482fa6..416ecbd 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -190,7 +190,11 @@
 
   // Inobject slack tracking will reclaim redundant inobject space later,
   // so we can afford to adjust the estimate generously.
-  return estimate + 8;
+  if (FLAG_clever_optimizations) {
+    return estimate + 8;
+  } else {
+    return estimate + 3;
+  }
 }
 
 
@@ -204,42 +208,6 @@
 }
 
 
-void NormalizeProperties(Handle<JSObject> object,
-                         PropertyNormalizationMode mode,
-                         int expected_additional_properties) {
-  CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
-                          object->NormalizeProperties(
-                              mode,
-                              expected_additional_properties));
-}
-
-
-Handle<SeededNumberDictionary> NormalizeElements(Handle<JSObject> object) {
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->NormalizeElements(),
-                     SeededNumberDictionary);
-}
-
-
-void TransformToFastProperties(Handle<JSObject> object,
-                               int unused_property_fields) {
-  CALL_HEAP_FUNCTION_VOID(
-      object->GetIsolate(),
-      object->TransformToFastProperties(unused_property_fields));
-}
-
-
-Handle<SeededNumberDictionary> SeededNumberDictionarySet(
-    Handle<SeededNumberDictionary> dictionary,
-    uint32_t index,
-    Handle<Object> value,
-    PropertyDetails details) {
-  CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
-                     dictionary->Set(index, *value, details),
-                     SeededNumberDictionary);
-}
-
-
 void FlattenString(Handle<String> string) {
   CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
 }
@@ -261,17 +229,6 @@
 }
 
 
-Handle<Object> SetProperty(Handle<JSReceiver> object,
-                           Handle<String> key,
-                           Handle<Object> value,
-                           PropertyAttributes attributes,
-                           StrictModeFlag strict_mode) {
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->SetProperty(*key, *value, attributes, strict_mode),
-                     Object);
-}
-
-
 Handle<Object> SetProperty(Handle<Object> object,
                            Handle<Object> key,
                            Handle<Object> value,
@@ -299,16 +256,6 @@
 }
 
 
-Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
-                                     Handle<String> key,
-                                     Handle<Object> value,
-                                     PropertyDetails details) {
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->SetNormalizedProperty(*key, *value, details),
-                     Object);
-}
-
-
 Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
                                    Handle<Object> key) {
   Isolate* isolate = object->GetIsolate();
@@ -318,30 +265,6 @@
 }
 
 
-Handle<Object> SetLocalPropertyIgnoreAttributes(
-    Handle<JSObject> object,
-    Handle<String> key,
-    Handle<Object> value,
-    PropertyAttributes attributes) {
-  CALL_HEAP_FUNCTION(
-    object->GetIsolate(),
-    object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
-    Object);
-}
-
-
-void SetLocalPropertyNoThrow(Handle<JSObject> object,
-                             Handle<String> key,
-                             Handle<Object> value,
-                             PropertyAttributes attributes) {
-  Isolate* isolate = object->GetIsolate();
-  ASSERT(!isolate->has_pending_exception());
-  CHECK(!SetLocalPropertyIgnoreAttributes(
-        object, key, value, attributes).is_null());
-  CHECK(!isolate->has_pending_exception());
-}
-
-
 Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
                                           Handle<String> key,
                                           Handle<Object> value,
@@ -372,24 +295,6 @@
 }
 
 
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
-                           Handle<String> name,
-                           LookupResult* result) {
-  PropertyAttributes attributes;
-  Isolate* isolate = Isolate::Current();
-  CALL_HEAP_FUNCTION(isolate,
-                     obj->GetProperty(*obj, result, *name, &attributes),
-                     Object);
-}
-
-
-Handle<Object> GetElement(Handle<Object> obj,
-                          uint32_t index) {
-  Isolate* isolate = Isolate::Current();
-  CALL_HEAP_FUNCTION(isolate, Runtime::GetElement(obj, index), Object);
-}
-
-
 Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
                                           Handle<JSObject> holder,
                                           Handle<String> name,
@@ -403,12 +308,6 @@
 }
 
 
-Handle<Object> GetPrototype(Handle<Object> obj) {
-  Handle<Object> result(obj->GetPrototype());
-  return result;
-}
-
-
 Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
   const bool skip_hidden_prototypes = false;
   CALL_HEAP_FUNCTION(obj->GetIsolate(),
@@ -416,43 +315,6 @@
 }
 
 
-Handle<Object> PreventExtensions(Handle<JSObject> object) {
-  CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
-}
-
-
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
-                                   JSObject::HiddenPropertiesFlag flag) {
-  CALL_HEAP_FUNCTION(obj->GetIsolate(),
-                     obj->GetHiddenProperties(flag),
-                     Object);
-}
-
-
-int GetIdentityHash(Handle<JSObject> obj) {
-  CALL_AND_RETRY(obj->GetIsolate(),
-                 obj->GetIdentityHash(JSObject::ALLOW_CREATION),
-                 return Smi::cast(__object__)->value(),
-                 return 0);
-}
-
-
-Handle<Object> DeleteElement(Handle<JSObject> obj,
-                             uint32_t index) {
-  CALL_HEAP_FUNCTION(obj->GetIsolate(),
-                     obj->DeleteElement(index, JSObject::NORMAL_DELETION),
-                     Object);
-}
-
-
-Handle<Object> DeleteProperty(Handle<JSObject> obj,
-                              Handle<String> prop) {
-  CALL_HEAP_FUNCTION(obj->GetIsolate(),
-                     obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
-                     Object);
-}
-
-
 Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
   Isolate* isolate = Isolate::Current();
   CALL_HEAP_FUNCTION(
@@ -470,35 +332,6 @@
 }
 
 
-Handle<Object> SetElement(Handle<JSObject> object,
-                          uint32_t index,
-                          Handle<Object> value,
-                          StrictModeFlag strict_mode) {
-  if (object->HasExternalArrayElements()) {
-    if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
-      bool has_exception;
-      Handle<Object> number = Execution::ToNumber(value, &has_exception);
-      if (has_exception) return Handle<Object>();
-      value = number;
-    }
-  }
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->SetElement(index, *value, strict_mode, true),
-                     Object);
-}
-
-
-Handle<Object> SetOwnElement(Handle<JSObject> object,
-                             uint32_t index,
-                             Handle<Object> value,
-                             StrictModeFlag strict_mode) {
-  ASSERT(!object->HasExternalArrayElements());
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->SetElement(index, *value, strict_mode, false),
-                     Object);
-}
-
-
 Handle<JSObject> Copy(Handle<JSObject> obj) {
   Isolate* isolate = obj->GetIsolate();
   CALL_HEAP_FUNCTION(isolate,
@@ -521,8 +354,9 @@
   Handle<Object> cache = Utils::OpenHandle(*handle);
   JSValue* wrapper = JSValue::cast(*cache);
   Foreign* foreign = Script::cast(wrapper->value())->wrapper();
-  ASSERT(foreign->address() == reinterpret_cast<Address>(cache.location()));
-  foreign->set_address(0);
+  ASSERT(foreign->foreign_address() ==
+         reinterpret_cast<Address>(cache.location()));
+  foreign->set_foreign_address(0);
   Isolate* isolate = Isolate::Current();
   isolate->global_handles()->Destroy(cache.location());
   isolate->counters()->script_wrappers()->Decrement();
@@ -530,10 +364,10 @@
 
 
 Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
-  if (script->wrapper()->address() != NULL) {
+  if (script->wrapper()->foreign_address() != NULL) {
     // Return the script wrapper directly from the cache.
     return Handle<JSValue>(
-        reinterpret_cast<JSValue**>(script->wrapper()->address()));
+        reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
   }
   Isolate* isolate = Isolate::Current();
   // Construct a new script wrapper.
@@ -549,7 +383,8 @@
   Handle<Object> handle = isolate->global_handles()->Create(*result);
   isolate->global_handles()->MakeWeak(handle.location(), NULL,
                                       &ClearWrapperCache);
-  script->wrapper()->set_address(reinterpret_cast<Address>(handle.location()));
+  script->wrapper()->set_foreign_address(
+      reinterpret_cast<Address>(handle.location()));
   return result;
 }
 
@@ -665,6 +500,19 @@
   return right + script->line_offset()->value();
 }
 
+// Convert code position into column number.
+int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
+  int line_number = GetScriptLineNumber(script, code_pos);
+  if (line_number == -1) return -1;
+
+  AssertNoAllocation no_allocation;
+  FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
+  line_number = line_number - script->line_offset()->value();
+  if (line_number == 0) return code_pos + script->column_offset()->value();
+  int prev_line_end_pos =
+      Smi::cast(line_ends_array->get(line_number - 1))->value();
+  return code_pos - (prev_line_end_pos + 1);
+}
 
 int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
   AssertNoAllocation no_allocation;
@@ -696,7 +544,7 @@
 
 
 // Compute the property keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
                                                  Handle<JSObject> object) {
   Isolate* isolate = receiver->GetIsolate();
   Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
@@ -718,7 +566,7 @@
 
 
 // Compute the element keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
                                                    Handle<JSObject> object) {
   Isolate* isolate = receiver->GetIsolate();
   Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
@@ -749,8 +597,9 @@
 }
 
 
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
-                                          KeyCollectionType type) {
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
+                                          KeyCollectionType type,
+                                          bool* threw) {
   USE(ContainsOnlyValidKeys);
   Isolate* isolate = object->GetIsolate();
   Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
@@ -765,6 +614,16 @@
   for (Handle<Object> p = object;
        *p != isolate->heap()->null_value();
        p = Handle<Object>(p->GetPrototype(), isolate)) {
+    if (p->IsJSProxy()) {
+      Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
+      Handle<Object> args[] = { proxy };
+      Handle<Object> names = Execution::Call(
+          isolate->proxy_enumerate(), object, ARRAY_SIZE(args), args, threw);
+      if (*threw) return content;
+      content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
+      break;
+    }
+
     Handle<JSObject> current(JSObject::cast(*p), isolate);
 
     // Check access rights if required.
@@ -831,11 +690,11 @@
 }
 
 
-Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
+Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
   Isolate* isolate = object->GetIsolate();
   isolate->counters()->for_in()->Increment();
-  Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
-                                                       INCLUDE_PROTOS);
+  Handle<FixedArray> elements =
+      GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, threw);
   return isolate->factory()->NewJSArrayWithElements(elements);
 }
 
@@ -852,31 +711,62 @@
                                 isolate);
     }
     isolate->counters()->enum_cache_misses()->Increment();
-    int num_enum = object->NumberOfEnumProperties();
+    Handle<Map> map(object->map());
+    int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
+
     Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
     Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
+
+    Handle<FixedArray> indices;
+    Handle<FixedArray> sort_array2;
+
+    if (cache_result) {
+      indices = isolate->factory()->NewFixedArray(num_enum);
+      sort_array2 = isolate->factory()->NewFixedArray(num_enum);
+    }
+
     Handle<DescriptorArray> descs =
         Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
+
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
       if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
-        (*storage)->set(index, descs->GetKey(i));
+        storage->set(index, descs->GetKey(i));
         PropertyDetails details(descs->GetDetails(i));
-        (*sort_array)->set(index, Smi::FromInt(details.index()));
+        sort_array->set(index, Smi::FromInt(details.index()));
+        if (!indices.is_null()) {
+          if (details.type() != FIELD) {
+            indices = Handle<FixedArray>();
+            sort_array2 = Handle<FixedArray>();
+          } else {
+            int field_index = Descriptor::IndexFromValue(descs->GetValue(i));
+            if (field_index >= map->inobject_properties()) {
+              field_index = -(field_index - map->inobject_properties() + 1);
+            }
+            indices->set(index, Smi::FromInt(field_index));
+            sort_array2->set(index, Smi::FromInt(details.index()));
+          }
+        }
         index++;
       }
     }
-    (*storage)->SortPairs(*sort_array, sort_array->length());
+    storage->SortPairs(*sort_array, sort_array->length());
+    if (!indices.is_null()) {
+      indices->SortPairs(*sort_array2, sort_array2->length());
+    }
     if (cache_result) {
       Handle<FixedArray> bridge_storage =
           isolate->factory()->NewFixedArray(
               DescriptorArray::kEnumCacheBridgeLength);
       DescriptorArray* desc = object->map()->instance_descriptors();
-      desc->SetEnumCache(*bridge_storage, *storage);
+      desc->SetEnumCache(*bridge_storage,
+                         *storage,
+                         indices.is_null() ? Object::cast(Smi::FromInt(0))
+                                           : Object::cast(*indices));
     }
     ASSERT(storage->length() == index);
     return storage;
   } else {
-    int num_enum = object->NumberOfEnumProperties();
+    int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
     Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
     Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
     object->property_dictionary()->CopyEnumKeysTo(*storage, *sort_array);
@@ -885,8 +775,24 @@
 }
 
 
+Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
+                                       Handle<Object> key) {
+  CALL_HEAP_FUNCTION(table->GetIsolate(),
+                     table->Add(*key),
+                     ObjectHashSet);
+}
+
+
+Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
+                                          Handle<Object> key) {
+  CALL_HEAP_FUNCTION(table->GetIsolate(),
+                     table->Remove(*key),
+                     ObjectHashSet);
+}
+
+
 Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
-                                               Handle<JSObject> key,
+                                               Handle<Object> key,
                                                Handle<Object> value) {
   CALL_HEAP_FUNCTION(table->GetIsolate(),
                      table->Put(*key, *value),
@@ -894,53 +800,162 @@
 }
 
 
-bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
-                    ClearExceptionFlag flag) {
-  return shared->is_compiled() || CompileLazyShared(shared, flag);
-}
+// This method determines the type of string involved and then gets the UTF8
+// length of the string.  It doesn't flatten the string and has log(n) recursion
+// for a string of length n.  If the failure flag gets set, then we have to
+// flatten the string and retry.  Failures are caused by surrogate pairs in deep
+// cons strings.
 
+// Single surrogate characters that are encountered in the UTF-16 character
+// sequence of the input string get counted as 3 UTF-8 bytes, because that
+// is the way that WriteUtf8 will encode them.  Surrogate pairs are counted and
+// encoded as one 4-byte UTF-8 sequence.
 
-static bool CompileLazyHelper(CompilationInfo* info,
-                              ClearExceptionFlag flag) {
-  // Compile the source information to a code object.
-  ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
-  ASSERT(!info->isolate()->has_pending_exception());
-  bool result = Compiler::CompileLazy(info);
-  ASSERT(result != Isolate::Current()->has_pending_exception());
-  if (!result && flag == CLEAR_EXCEPTION) {
-    info->isolate()->clear_pending_exception();
+// This function conceptually uses recursion on the two halves of cons strings.
+// However, in order to avoid the recursion going too deep it recurses on the
+// second string of the cons, but iterates on the first substring (by manually
+// eliminating it as a tail recursion).  This means it counts the UTF-8 length
+// from the end to the start, which makes no difference to the total.
+
+// Surrogate pairs are recognized even if they are split across two sides of a
+// cons, which complicates the implementation somewhat.  Therefore, too deep
+// recursion cannot always be avoided.  This case is detected, and the failure
+// flag is set, a signal to the caller that the string should be flattened and
+// the operation retried.
+int Utf8LengthHelper(String* input,
+                     int from,
+                     int to,
+                     bool followed_by_surrogate,
+                     int max_recursion,
+                     bool* failure,
+                     bool* starts_with_surrogate) {
+  if (from == to) return 0;
+  int total = 0;
+  bool dummy;
+  while (true) {
+    if (input->IsAsciiRepresentation()) {
+      *starts_with_surrogate = false;
+      return total + to - from;
+    }
+    switch (StringShape(input).representation_tag()) {
+      case kConsStringTag: {
+        ConsString* str = ConsString::cast(input);
+        String* first = str->first();
+        String* second = str->second();
+        int first_length = first->length();
+        if (first_length - from > to - first_length) {
+          if (first_length < to) {
+            // Right hand side is shorter.  No need to check the recursion depth
+            // since this can only happen log(n) times.
+            bool right_starts_with_surrogate = false;
+            total += Utf8LengthHelper(second,
+                                      0,
+                                      to - first_length,
+                                      followed_by_surrogate,
+                                      max_recursion - 1,
+                                      failure,
+                                      &right_starts_with_surrogate);
+            if (*failure) return 0;
+            followed_by_surrogate = right_starts_with_surrogate;
+            input = first;
+            to = first_length;
+          } else {
+            // We only need the left hand side.
+            input = first;
+          }
+        } else {
+          if (first_length > from) {
+            // Left hand side is shorter.
+            if (first->IsAsciiRepresentation()) {
+              total += first_length - from;
+              *starts_with_surrogate = false;
+              starts_with_surrogate = &dummy;
+              input = second;
+              from = 0;
+              to -= first_length;
+            } else if (second->IsAsciiRepresentation()) {
+              followed_by_surrogate = false;
+              total += to - first_length;
+              input = first;
+              to = first_length;
+            } else if (max_recursion > 0) {
+              bool right_starts_with_surrogate = false;
+              // Recursing on the long one.  This may fail.
+              total += Utf8LengthHelper(second,
+                                        0,
+                                        to - first_length,
+                                        followed_by_surrogate,
+                                        max_recursion - 1,
+                                        failure,
+                                        &right_starts_with_surrogate);
+              if (*failure) return 0;
+              input = first;
+              to = first_length;
+              followed_by_surrogate = right_starts_with_surrogate;
+            } else {
+              *failure = true;
+              return 0;
+            }
+          } else {
+            // We only need the right hand side.
+            input = second;
+            from = 0;
+            to -= first_length;
+          }
+        }
+        continue;
+      }
+      case kExternalStringTag:
+      case kSeqStringTag: {
+        Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector();
+        const uc16* p = vector.start();
+        int previous = unibrow::Utf16::kNoPreviousCharacter;
+        for (int i = from; i < to; i++) {
+          uc16 c = p[i];
+          total += unibrow::Utf8::Length(c, previous);
+          previous = c;
+        }
+        if (to - from > 0) {
+          if (unibrow::Utf16::IsLeadSurrogate(previous) &&
+              followed_by_surrogate) {
+            total -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
+          }
+          if (unibrow::Utf16::IsTrailSurrogate(p[from])) {
+            *starts_with_surrogate = true;
+          }
+        }
+        return total;
+      }
+      case kSlicedStringTag: {
+        SlicedString* str = SlicedString::cast(input);
+        int offset = str->offset();
+        input = str->parent();
+        from += offset;
+        to += offset;
+        continue;
+      }
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return 0;
   }
-  return result;
+  return 0;
 }
 
 
-bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
-                       ClearExceptionFlag flag) {
-  CompilationInfo info(shared);
-  return CompileLazyHelper(&info, flag);
-}
-
-
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
-  bool result = true;
-  if (function->shared()->is_compiled()) {
-    function->ReplaceCode(function->shared()->code());
-    function->shared()->set_code_age(0);
-  } else {
-    CompilationInfo info(function);
-    result = CompileLazyHelper(&info, flag);
-    ASSERT(!result || function->is_compiled());
-  }
-  return result;
-}
-
-
-bool CompileOptimized(Handle<JSFunction> function,
-                      int osr_ast_id,
-                      ClearExceptionFlag flag) {
-  CompilationInfo info(function);
-  info.SetOptimizing(osr_ast_id);
-  return CompileLazyHelper(&info, flag);
+int Utf8Length(Handle<String> str) {
+  bool dummy;
+  bool failure;
+  int len;
+  const int kRecursionBudget = 100;
+  do {
+    failure = false;
+    len = Utf8LengthHelper(
+        *str, 0, str->length(), false, kRecursionBudget, &failure, &dummy);
+    if (failure) FlattenString(str);
+  } while (failure);
+  return len;
 }
 
 } }  // namespace v8::internal
diff --git a/src/handles.h b/src/handles.h
index 5674120..960696b 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -167,18 +167,6 @@
 // an object of expected type, or the handle is an error if running out
 // of space or encountering an internal error.
 
-void NormalizeProperties(Handle<JSObject> object,
-                         PropertyNormalizationMode mode,
-                         int expected_additional_properties);
-Handle<SeededNumberDictionary> NormalizeElements(Handle<JSObject> object);
-void TransformToFastProperties(Handle<JSObject> object,
-                               int unused_property_fields);
-MUST_USE_RESULT Handle<SeededNumberDictionary> SeededNumberDictionarySet(
-    Handle<SeededNumberDictionary> dictionary,
-    uint32_t index,
-    Handle<Object> value,
-    PropertyDetails details);
-
 // Flattens a string.
 void FlattenString(Handle<String> str);
 
@@ -186,11 +174,7 @@
 // string.
 Handle<String> FlattenGetString(Handle<String> str);
 
-Handle<Object> SetProperty(Handle<JSReceiver> object,
-                           Handle<String> key,
-                           Handle<Object> value,
-                           PropertyAttributes attributes,
-                           StrictModeFlag strict_mode);
+int Utf8Length(Handle<String> str);
 
 Handle<Object> SetProperty(Handle<Object> object,
                            Handle<Object> key,
@@ -203,78 +187,22 @@
                                 Handle<Object> value,
                                 PropertyAttributes attributes);
 
-Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
-                                     Handle<String> key,
-                                     Handle<Object> value,
-                                     PropertyDetails details);
-
 Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
                                    Handle<Object> key);
 
-Handle<Object> SetLocalPropertyIgnoreAttributes(
-    Handle<JSObject> object,
-    Handle<String> key,
-    Handle<Object> value,
-    PropertyAttributes attributes);
-
-// Used to set local properties on the object we totally control
-// and which therefore has no accessors and alikes.
-void SetLocalPropertyNoThrow(Handle<JSObject> object,
-                             Handle<String> key,
-                             Handle<Object> value,
-                             PropertyAttributes attributes = NONE);
-
-Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
-                                          Handle<String> key,
-                                          Handle<Object> value,
-                                          PropertyAttributes attributes,
-                                          StrictModeFlag strict_mode);
-
-MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
-                                          uint32_t index,
-                                          Handle<Object> value,
-                                          StrictModeFlag strict_mode);
-
-Handle<Object> SetOwnElement(Handle<JSObject> object,
-                             uint32_t index,
-                             Handle<Object> value,
-                             StrictModeFlag strict_mode);
-
 Handle<Object> GetProperty(Handle<JSReceiver> obj,
                            const char* name);
 
 Handle<Object> GetProperty(Handle<Object> obj,
                            Handle<Object> key);
 
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
-                           Handle<String> name,
-                           LookupResult* result);
-
-
-Handle<Object> GetElement(Handle<Object> obj,
-                          uint32_t index);
-
 Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
                                           Handle<JSObject> holder,
                                           Handle<String> name,
                                           PropertyAttributes* attributes);
 
-Handle<Object> GetPrototype(Handle<Object> obj);
-
 Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
 
-// Return the object's hidden properties object. If the object has no hidden
-// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
-// hidden property object will be allocated. Otherwise Heap::undefined_value
-// is returned.
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
-                                   JSObject::HiddenPropertiesFlag flag);
-
-int GetIdentityHash(Handle<JSObject> obj);
-
-Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
-Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
-
 Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index);
 
 Handle<JSObject> Copy(Handle<JSObject> obj);
@@ -298,21 +226,23 @@
 int GetScriptLineNumber(Handle<Script> script, int code_position);
 // The safe version does not make heap allocations but may work much slower.
 int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
+int GetScriptColumnNumber(Handle<Script> script, int code_position);
 
 // Computes the enumerable keys from interceptors. Used for debug mirrors and
 // by GetKeysInFixedArrayFor below.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
                                                  Handle<JSObject> object);
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
                                                    Handle<JSObject> object);
 
 enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
 
 // Computes the enumerable keys for a JSObject. Used for implementing
 // "for (n in object) { }".
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
-                                          KeyCollectionType type);
-Handle<JSArray> GetKeysFor(Handle<JSObject> object);
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
+                                          KeyCollectionType type,
+                                          bool* threw);
+Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw);
 Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
                                        bool cache_result);
 
@@ -326,7 +256,6 @@
                          int end,
                          PretenureFlag pretenure = NOT_TENURED);
 
-
 // Sets the expected number of properties for the function's instances.
 void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
 
@@ -345,28 +274,16 @@
 Handle<Object> SetPrototype(Handle<JSFunction> function,
                             Handle<Object> prototype);
 
-Handle<Object> PreventExtensions(Handle<JSObject> object);
+Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
+                                       Handle<Object> key);
+
+Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
+                                          Handle<Object> key);
 
 Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
-                                               Handle<JSObject> key,
+                                               Handle<Object> key,
                                                Handle<Object> value);
 
-// Does lazy compilation of the given function. Returns true on success and
-// false if the compilation resulted in a stack overflow.
-enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
-
-bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
-                    ClearExceptionFlag flag);
-
-bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
-                       ClearExceptionFlag flag);
-
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
-
-bool CompileOptimized(Handle<JSFunction> function,
-                      int osr_ast_id,
-                      ClearExceptionFlag flag);
-
 class NoHandleAllocation BASE_EMBEDDED {
  public:
 #ifndef DEBUG
diff --git a/src/hashmap.cc b/src/hashmap.cc
deleted file mode 100644
index 1422afd..0000000
--- a/src/hashmap.cc
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "utils.h"
-#include "allocation.h"
-
-#include "hashmap.h"
-
-namespace v8 {
-namespace internal {
-
-Allocator HashMap::DefaultAllocator;
-
-
-HashMap::HashMap() {
-  allocator_ = NULL;
-  match_ = NULL;
-}
-
-
-HashMap::HashMap(MatchFun match,
-                 Allocator* allocator,
-                 uint32_t initial_capacity) {
-  allocator_ = allocator;
-  match_ = match;
-  Initialize(initial_capacity);
-}
-
-
-HashMap::~HashMap() {
-  if (allocator_) {
-    allocator_->Delete(map_);
-  }
-}
-
-
-HashMap::Entry* HashMap::Lookup(void* key, uint32_t hash, bool insert) {
-  // Find a matching entry.
-  Entry* p = Probe(key, hash);
-  if (p->key != NULL) {
-    return p;
-  }
-
-  // No entry found; insert one if necessary.
-  if (insert) {
-    p->key = key;
-    p->value = NULL;
-    p->hash = hash;
-    occupancy_++;
-
-    // Grow the map if we reached >= 80% occupancy.
-    if (occupancy_ + occupancy_/4 >= capacity_) {
-      Resize();
-      p = Probe(key, hash);
-    }
-
-    return p;
-  }
-
-  // No entry found and none inserted.
-  return NULL;
-}
-
-
-void HashMap::Remove(void* key, uint32_t hash) {
-  // Lookup the entry for the key to remove.
-  Entry* p = Probe(key, hash);
-  if (p->key == NULL) {
-    // Key not found nothing to remove.
-    return;
-  }
-
-  // To remove an entry we need to ensure that it does not create an empty
-  // entry that will cause the search for another entry to stop too soon. If all
-  // the entries between the entry to remove and the next empty slot have their
-  // initial position inside this interval, clearing the entry to remove will
-  // not break the search. If, while searching for the next empty entry, an
-  // entry is encountered which does not have its initial position between the
-  // entry to remove and the position looked at, then this entry can be moved to
-  // the place of the entry to remove without breaking the search for it. The
-  // entry made vacant by this move is now the entry to remove and the process
-  // starts over.
-  // Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
-
-  // This guarantees loop termination as there is at least one empty entry so
-  // eventually the removed entry will have an empty entry after it.
-  ASSERT(occupancy_ < capacity_);
-
-  // p is the candidate entry to clear. q is used to scan forwards.
-  Entry* q = p;  // Start at the entry to remove.
-  while (true) {
-    // Move q to the next entry.
-    q = q + 1;
-    if (q == map_end()) {
-      q = map_;
-    }
-
-    // All entries between p and q have their initial position between p and q
-    // and the entry p can be cleared without breaking the search for these
-    // entries.
-    if (q->key == NULL) {
-      break;
-    }
-
-    // Find the initial position for the entry at position q.
-    Entry* r = map_ + (q->hash & (capacity_ - 1));
-
-    // If the entry at position q has its initial position outside the range
-    // between p and q it can be moved forward to position p and will still be
-    // found. There is now a new candidate entry for clearing.
-    if ((q > p && (r <= p || r > q)) ||
-        (q < p && (r <= p && r > q))) {
-      *p = *q;
-      p = q;
-    }
-  }
-
-  // Clear the entry which is allowed to en emptied.
-  p->key = NULL;
-  occupancy_--;
-}
-
-
-void HashMap::Clear() {
-  // Mark all entries as empty.
-  const Entry* end = map_end();
-  for (Entry* p = map_; p < end; p++) {
-    p->key = NULL;
-  }
-  occupancy_ = 0;
-}
-
-
-HashMap::Entry* HashMap::Start() const {
-  return Next(map_ - 1);
-}
-
-
-HashMap::Entry* HashMap::Next(Entry* p) const {
-  const Entry* end = map_end();
-  ASSERT(map_ - 1 <= p && p < end);
-  for (p++; p < end; p++) {
-    if (p->key != NULL) {
-      return p;
-    }
-  }
-  return NULL;
-}
-
-
-HashMap::Entry* HashMap::Probe(void* key, uint32_t hash) {
-  ASSERT(key != NULL);
-
-  ASSERT(IsPowerOf2(capacity_));
-  Entry* p = map_ + (hash & (capacity_ - 1));
-  const Entry* end = map_end();
-  ASSERT(map_ <= p && p < end);
-
-  ASSERT(occupancy_ < capacity_);  // Guarantees loop termination.
-  while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
-    p++;
-    if (p >= end) {
-      p = map_;
-    }
-  }
-
-  return p;
-}
-
-
-void HashMap::Initialize(uint32_t capacity) {
-  ASSERT(IsPowerOf2(capacity));
-  map_ = reinterpret_cast<Entry*>(allocator_->New(capacity * sizeof(Entry)));
-  if (map_ == NULL) {
-    v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
-    return;
-  }
-  capacity_ = capacity;
-  Clear();
-}
-
-
-void HashMap::Resize() {
-  Entry* map = map_;
-  uint32_t n = occupancy_;
-
-  // Allocate larger map.
-  Initialize(capacity_ * 2);
-
-  // Rehash all current entries.
-  for (Entry* p = map; n > 0; p++) {
-    if (p->key != NULL) {
-      Lookup(p->key, p->hash, true)->value = p->value;
-      n--;
-    }
-  }
-
-  // Delete old map.
-  allocator_->Delete(map);
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/hashmap.h b/src/hashmap.h
index 5c13212..5aeb895 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,39 +29,22 @@
 #define V8_HASHMAP_H_
 
 #include "allocation.h"
+#include "checks.h"
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
-
-// Allocator defines the memory allocator interface
-// used by HashMap and implements a default allocator.
-class Allocator BASE_EMBEDDED {
+template<class AllocationPolicy>
+class TemplateHashMapImpl {
  public:
-  virtual ~Allocator()  {}
-  virtual void* New(size_t size)  { return Malloced::New(size); }
-  virtual void Delete(void* p)  { Malloced::Delete(p); }
-};
-
-
-class HashMap {
- public:
-  static Allocator DefaultAllocator;
-
   typedef bool (*MatchFun) (void* key1, void* key2);
 
-  // Dummy constructor.  This constructor doesn't set up the hash
-  // map properly so don't use it unless you have good reason (e.g.,
-  // you know that the HashMap will never be used).
-  HashMap();
-
   // initial_capacity is the size of the initial hash map;
   // it must be a power of 2 (and thus must not be 0).
-  explicit HashMap(MatchFun match,
-                   Allocator* allocator = &DefaultAllocator,
-                   uint32_t initial_capacity = 8);
+  TemplateHashMapImpl(MatchFun match, uint32_t initial_capacity = 8);
 
-  ~HashMap();
+  ~TemplateHashMapImpl();
 
   // HashMap entries are (key, value, hash) triplets.
   // Some clients may not need to use the value slot
@@ -105,7 +88,6 @@
   Entry* Next(Entry* p) const;
 
  private:
-  Allocator* allocator_;
   MatchFun match_;
   Entry* map_;
   uint32_t capacity_;
@@ -117,6 +99,241 @@
   void Resize();
 };
 
+typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
+
+template<class P>
+TemplateHashMapImpl<P>::TemplateHashMapImpl(MatchFun match,
+                    uint32_t initial_capacity) {
+  match_ = match;
+  Initialize(initial_capacity);
+}
+
+
+template<class P>
+TemplateHashMapImpl<P>::~TemplateHashMapImpl() {
+  P::Delete(map_);
+}
+
+
+template<class P>
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
+    void* key, uint32_t hash, bool insert) {
+  // Find a matching entry.
+  Entry* p = Probe(key, hash);
+  if (p->key != NULL) {
+    return p;
+  }
+
+  // No entry found; insert one if necessary.
+  if (insert) {
+    p->key = key;
+    p->value = NULL;
+    p->hash = hash;
+    occupancy_++;
+
+    // Grow the map if we reached >= 80% occupancy.
+    if (occupancy_ + occupancy_/4 >= capacity_) {
+      Resize();
+      p = Probe(key, hash);
+    }
+
+    return p;
+  }
+
+  // No entry found and none inserted.
+  return NULL;
+}
+
+
+template<class P>
+void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
+  // Lookup the entry for the key to remove.
+  Entry* p = Probe(key, hash);
+  if (p->key == NULL) {
+    // Key not found nothing to remove.
+    return;
+  }
+
+  // To remove an entry we need to ensure that it does not create an empty
+  // entry that will cause the search for another entry to stop too soon. If all
+  // the entries between the entry to remove and the next empty slot have their
+  // initial position inside this interval, clearing the entry to remove will
+  // not break the search. If, while searching for the next empty entry, an
+  // entry is encountered which does not have its initial position between the
+  // entry to remove and the position looked at, then this entry can be moved to
+  // the place of the entry to remove without breaking the search for it. The
+  // entry made vacant by this move is now the entry to remove and the process
+  // starts over.
+  // Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
+
+  // This guarantees loop termination as there is at least one empty entry so
+  // eventually the removed entry will have an empty entry after it.
+  ASSERT(occupancy_ < capacity_);
+
+  // p is the candidate entry to clear. q is used to scan forwards.
+  Entry* q = p;  // Start at the entry to remove.
+  while (true) {
+    // Move q to the next entry.
+    q = q + 1;
+    if (q == map_end()) {
+      q = map_;
+    }
+
+    // All entries between p and q have their initial position between p and q
+    // and the entry p can be cleared without breaking the search for these
+    // entries.
+    if (q->key == NULL) {
+      break;
+    }
+
+    // Find the initial position for the entry at position q.
+    Entry* r = map_ + (q->hash & (capacity_ - 1));
+
+    // If the entry at position q has its initial position outside the range
+    // between p and q it can be moved forward to position p and will still be
+    // found. There is now a new candidate entry for clearing.
+    if ((q > p && (r <= p || r > q)) ||
+        (q < p && (r <= p && r > q))) {
+      *p = *q;
+      p = q;
+    }
+  }
+
+  // Clear the entry which is allowed to en emptied.
+  p->key = NULL;
+  occupancy_--;
+}
+
+
+template<class P>
+void TemplateHashMapImpl<P>::Clear() {
+  // Mark all entries as empty.
+  const Entry* end = map_end();
+  for (Entry* p = map_; p < end; p++) {
+    p->key = NULL;
+  }
+  occupancy_ = 0;
+}
+
+
+template<class P>
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Start() const {
+  return Next(map_ - 1);
+}
+
+
+template<class P>
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p)
+    const {
+  const Entry* end = map_end();
+  ASSERT(map_ - 1 <= p && p < end);
+  for (p++; p < end; p++) {
+    if (p->key != NULL) {
+      return p;
+    }
+  }
+  return NULL;
+}
+
+
+template<class P>
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key,
+                                                            uint32_t hash) {
+  ASSERT(key != NULL);
+
+  ASSERT(IsPowerOf2(capacity_));
+  Entry* p = map_ + (hash & (capacity_ - 1));
+  const Entry* end = map_end();
+  ASSERT(map_ <= p && p < end);
+
+  ASSERT(occupancy_ < capacity_);  // Guarantees loop termination.
+  while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
+    p++;
+    if (p >= end) {
+      p = map_;
+    }
+  }
+
+  return p;
+}
+
+
+template<class P>
+void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) {
+  ASSERT(IsPowerOf2(capacity));
+  map_ = reinterpret_cast<Entry*>(P::New(capacity * sizeof(Entry)));
+  if (map_ == NULL) {
+    v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
+    return;
+  }
+  capacity_ = capacity;
+  Clear();
+}
+
+
+template<class P>
+void TemplateHashMapImpl<P>::Resize() {
+  Entry* map = map_;
+  uint32_t n = occupancy_;
+
+  // Allocate larger map.
+  Initialize(capacity_ * 2);
+
+  // Rehash all current entries.
+  for (Entry* p = map; n > 0; p++) {
+    if (p->key != NULL) {
+      Lookup(p->key, p->hash, true)->value = p->value;
+      n--;
+    }
+  }
+
+  // Delete old map.
+  P::Delete(map);
+}
+
+
+// A hash map for pointer keys and values with an STL-like interface.
+template<class Key, class Value, class AllocationPolicy>
+class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
+ public:
+  STATIC_ASSERT(sizeof(Key*) == sizeof(void*));  // NOLINT
+  STATIC_ASSERT(sizeof(Value*) == sizeof(void*));  // NOLINT
+  struct value_type {
+    Key* first;
+    Value* second;
+  };
+
+  class Iterator {
+   public:
+    Iterator& operator++() {
+      entry_ = map_->Next(entry_);
+      return *this;
+    }
+
+    value_type* operator->() { return reinterpret_cast<value_type*>(entry_); }
+    bool operator!=(const Iterator& other) { return  entry_ != other.entry_; }
+
+   private:
+    Iterator(const TemplateHashMapImpl<AllocationPolicy>* map,
+             typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry) :
+        map_(map), entry_(entry) { }
+
+    const TemplateHashMapImpl<AllocationPolicy>* map_;
+    typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry_;
+
+    friend class TemplateHashMap;
+  };
+
+  TemplateHashMap(
+      typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match)
+    : TemplateHashMapImpl<AllocationPolicy>(match) { }
+
+  Iterator begin() const { return Iterator(this, this->Start()); }
+  Iterator end() const { return Iterator(this, NULL); }
+  Iterator find(Key* key, bool insert = false) {
+    return Iterator(this, this->Lookup(key, key->Hash(), insert));
+  }
+};
 
 } }  // namespace v8::internal
 
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 7b666af..706d288 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,21 +32,53 @@
 #include "isolate.h"
 #include "list-inl.h"
 #include "objects.h"
+#include "platform.h"
 #include "v8-counters.h"
+#include "store-buffer.h"
+#include "store-buffer-inl.h"
 
 namespace v8 {
 namespace internal {
 
 void PromotionQueue::insert(HeapObject* target, int size) {
+  if (emergency_stack_ != NULL) {
+    emergency_stack_->Add(Entry(target, size));
+    return;
+  }
+
+  if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
+    NewSpacePage* rear_page =
+        NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
+    ASSERT(!rear_page->prev_page()->is_anchor());
+    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
+    ActivateGuardIfOnTheSamePage();
+  }
+
+  if (guard_) {
+    ASSERT(GetHeadPage() ==
+           Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
+
+    if ((rear_ - 2) < limit_) {
+      RelocateQueueHead();
+      emergency_stack_->Add(Entry(target, size));
+      return;
+    }
+  }
+
   *(--rear_) = reinterpret_cast<intptr_t>(target);
   *(--rear_) = size;
   // Assert no overflow into live objects.
-  ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
+#ifdef DEBUG
+  SemiSpace::AssertValidRange(HEAP->new_space()->top(),
+                              reinterpret_cast<Address>(rear_));
+#endif
 }
 
 
-int Heap::MaxObjectSizeInPagedSpace() {
-  return Page::kMaxHeapObjectSize;
+void PromotionQueue::ActivateGuardIfOnTheSamePage() {
+  guard_ = guard_ ||
+      heap_->new_space()->active_space()->current_page()->address() ==
+      GetHeadPage()->address();
 }
 
 
@@ -83,13 +115,14 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
-                   ? lo_space_->AllocateRaw(size)
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
+                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<HeapObject*>(result)->set_map(map);
+  // String maps are all immortal immovable objects.
+  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
   // Set length and hash fields of the allocated string.
   String* answer = String::cast(result);
   answer->set_length(str.length());
@@ -116,8 +149,8 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
-                   ? lo_space_->AllocateRaw(size)
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
+                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -181,7 +214,7 @@
   } else if (CODE_SPACE == space) {
     result = code_space_->AllocateRaw(size_in_bytes);
   } else if (LO_SPACE == space) {
-    result = lo_space_->AllocateRaw(size_in_bytes);
+    result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
   } else if (CELL_SPACE == space) {
     result = cell_space_->AllocateRaw(size_in_bytes);
   } else {
@@ -193,19 +226,21 @@
 }
 
 
-MaybeObject* Heap::NumberFromInt32(int32_t value) {
+MaybeObject* Heap::NumberFromInt32(
+    int32_t value, PretenureFlag pretenure) {
   if (Smi::IsValid(value)) return Smi::FromInt(value);
   // Bypass NumberFromDouble to avoid various redundant checks.
-  return AllocateHeapNumber(FastI2D(value));
+  return AllocateHeapNumber(FastI2D(value), pretenure);
 }
 
 
-MaybeObject* Heap::NumberFromUint32(uint32_t value) {
+MaybeObject* Heap::NumberFromUint32(
+    uint32_t value, PretenureFlag pretenure) {
   if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
     return Smi::FromInt((int32_t)value);
   }
   // Bypass NumberFromDouble to avoid various redundant checks.
-  return AllocateHeapNumber(FastUI2D(value));
+  return AllocateHeapNumber(FastUI2D(value), pretenure);
 }
 
 
@@ -220,10 +255,8 @@
   // Dispose of the C++ object if it has not already been disposed.
   if (*resource_addr != NULL) {
     (*resource_addr)->Dispose();
+    *resource_addr = NULL;
   }
-
-  // Clear the resource pointer in the string.
-  *resource_addr = NULL;
 }
 
 
@@ -265,6 +298,11 @@
 }
 
 
+bool Heap::InNewSpace(Address addr) {
+  return new_space_.Contains(addr);
+}
+
+
 bool Heap::InFromSpace(Object* object) {
   return new_space_.FromSpaceContains(object);
 }
@@ -275,29 +313,36 @@
 }
 
 
+bool Heap::OldGenerationAllocationLimitReached() {
+  if (!incremental_marking()->IsStopped()) return false;
+  return OldGenerationSpaceAvailable() < 0;
+}
+
+
 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
   // An object should be promoted if:
   // - the object has survived a scavenge operation or
   // - to space is already 25% full.
-  return old_address < new_space_.age_mark()
-      || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
+  NewSpacePage* page = NewSpacePage::FromAddress(old_address);
+  Address age_mark = new_space_.age_mark();
+  bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
+      (!page->ContainsLimit(age_mark) || old_address < age_mark);
+  return below_mark || (new_space_.Size() + object_size) >=
+                        (new_space_.EffectiveCapacity() >> 2);
 }
 
 
 void Heap::RecordWrite(Address address, int offset) {
-  if (new_space_.Contains(address)) return;
-  ASSERT(!new_space_.FromSpaceContains(address));
-  SLOW_ASSERT(Contains(address + offset));
-  Page::FromAddress(address)->MarkRegionDirty(address + offset);
+  if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
 }
 
 
 void Heap::RecordWrites(Address address, int start, int len) {
-  if (new_space_.Contains(address)) return;
-  ASSERT(!new_space_.FromSpaceContains(address));
-  Page* page = Page::FromAddress(address);
-  page->SetRegionMarks(page->GetRegionMarks() |
-      page->GetRegionMaskForSpan(address + start, len * kPointerSize));
+  if (!InNewSpace(address)) {
+    for (int i = 0; i < len; i++) {
+      store_buffer_.Mark(address + start + i * kPointerSize);
+    }
+  }
 }
 
 
@@ -336,38 +381,12 @@
 
 
 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
-  ASSERT(IsAligned(byte_size, kPointerSize));
   CopyWords(reinterpret_cast<Object**>(dst),
             reinterpret_cast<Object**>(src),
             byte_size / kPointerSize);
 }
 
 
-void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                   Address src,
-                                                   int byte_size) {
-  ASSERT(IsAligned(byte_size, kPointerSize));
-
-  Page* page = Page::FromAddress(dst);
-  uint32_t marks = page->GetRegionMarks();
-
-  for (int remaining = byte_size / kPointerSize;
-       remaining > 0;
-       remaining--) {
-    Memory::Object_at(dst) = Memory::Object_at(src);
-
-    if (InNewSpace(Memory::Object_at(dst))) {
-      marks |= page->GetRegionMaskForAddress(dst);
-    }
-
-    dst += kPointerSize;
-    src += kPointerSize;
-  }
-
-  page->SetRegionMarks(marks);
-}
-
-
 void Heap::MoveBlock(Address dst, Address src, int byte_size) {
   ASSERT(IsAligned(byte_size, kPointerSize));
 
@@ -387,16 +406,6 @@
 }
 
 
-void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                   Address src,
-                                                   int byte_size) {
-  ASSERT(IsAligned(byte_size, kPointerSize));
-  ASSERT((dst < src) || (dst >= (src + byte_size)));
-
-  CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
-}
-
-
 void Heap::ScavengePointer(HeapObject** p) {
   ScavengeObject(p, *p);
 }
@@ -414,7 +423,9 @@
   // If the first word is a forwarding address, the object has already been
   // copied.
   if (first_word.IsForwardingAddress()) {
-    *p = first_word.ToForwardingAddress();
+    HeapObject* dest = first_word.ToForwardingAddress();
+    ASSERT(HEAP->InFromSpace(*p));
+    *p = dest;
     return;
   }
 
@@ -423,8 +434,10 @@
 }
 
 
-bool Heap::CollectGarbage(AllocationSpace space) {
-  return CollectGarbage(space, SelectGarbageCollector(space));
+bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
+  const char* collector_reason = NULL;
+  GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+  return CollectGarbage(space, collector, gc_reason, collector_reason);
 }
 
 
@@ -448,7 +461,7 @@
 
 
 int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
-  ASSERT(HasBeenSetup());
+  ASSERT(HasBeenSetUp());
   int amount = amount_of_external_allocated_memory_ + change_in_bytes;
   if (change_in_bytes >= 0) {
     // Avoid overflow.
@@ -459,7 +472,7 @@
         amount_of_external_allocated_memory_ -
         amount_of_external_allocated_memory_at_last_global_gc_;
     if (amount_since_last_global_gc > external_allocation_limit_) {
-      CollectAllGarbage(false);
+      CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
     }
   } else {
     // Avoid underflow.
@@ -476,6 +489,7 @@
   roots_[kLastScriptIdRootIndex] = last_script_id;
 }
 
+
 Isolate* Heap::isolate() {
   return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
       reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
@@ -489,7 +503,6 @@
 #define GC_GREEDY_CHECK() { }
 #endif
 
-
 // Calls the FUNCTION_CALL function and retries it up to three times
 // to guarantee that any allocations performed during the call will
 // succeed if there's enough memory.
@@ -508,7 +521,8 @@
     }                                                                     \
     if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY;                \
     ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)->     \
-                                    allocation_space());                  \
+                                    allocation_space(),                   \
+                                    "allocation failure");                \
     __maybe_object__ = FUNCTION_CALL;                                     \
     if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE;            \
     if (__maybe_object__->IsOutOfMemory()) {                              \
@@ -516,7 +530,7 @@
     }                                                                     \
     if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY;                \
     ISOLATE->counters()->gc_last_resort_from_handles()->Increment();      \
-    ISOLATE->heap()->CollectAllAvailableGarbage();                        \
+    ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc");        \
     {                                                                     \
       AlwaysAllocateScope __scope__;                                      \
       __maybe_object__ = FUNCTION_CALL;                                   \
@@ -581,11 +595,11 @@
 #ifdef DEBUG
   for (int i = 0; i < new_space_strings_.length(); ++i) {
     ASSERT(heap_->InNewSpace(new_space_strings_[i]));
-    ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_null_value());
+    ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
   }
   for (int i = 0; i < old_space_strings_.length(); ++i) {
     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
-    ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_null_value());
+    ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
   }
 #endif
 }
@@ -600,7 +614,9 @@
 
 void ExternalStringTable::ShrinkNewStrings(int position) {
   new_space_strings_.Rewind(position);
-  Verify();
+  if (FLAG_verify_heap) {
+    Verify();
+  }
 }
 
 
@@ -643,15 +659,15 @@
     case ATAN:
       return atan(input);
     case COS:
-      return cos(input);
+      return fast_cos(input);
     case EXP:
       return exp(input);
     case LOG:
-      return log(input);
+      return fast_log(input);
     case SIN:
-      return sin(input);
+      return fast_sin(input);
     case TAN:
-      return tan(input);
+      return fast_tan(input);
     default:
       return 0.0;  // Never happens.
   }
@@ -683,18 +699,92 @@
 }
 
 
-Heap* _inline_get_heap_() {
-  return HEAP;
+AlwaysAllocateScope::AlwaysAllocateScope() {
+  // We shouldn't hit any nested scopes, because that requires
+  // non-handle code to call handle code. The code still works but
+  // performance will degrade, so we want to catch this situation
+  // in debug mode.
+  ASSERT(HEAP->always_allocate_scope_depth_ == 0);
+  HEAP->always_allocate_scope_depth_++;
 }
 
 
-void MarkCompactCollector::SetMark(HeapObject* obj) {
-  tracer_->increment_marked_count();
+AlwaysAllocateScope::~AlwaysAllocateScope() {
+  HEAP->always_allocate_scope_depth_--;
+  ASSERT(HEAP->always_allocate_scope_depth_ == 0);
+}
+
+
+LinearAllocationScope::LinearAllocationScope() {
+  HEAP->linear_allocation_scope_depth_++;
+}
+
+
+LinearAllocationScope::~LinearAllocationScope() {
+  HEAP->linear_allocation_scope_depth_--;
+  ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
+}
+
+
 #ifdef DEBUG
-  UpdateLiveObjectCount(obj);
-#endif
-  obj->SetMark();
+void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
+  for (Object** current = start; current < end; current++) {
+    if ((*current)->IsHeapObject()) {
+      HeapObject* object = HeapObject::cast(*current);
+      ASSERT(HEAP->Contains(object));
+      ASSERT(object->map()->IsMap());
+    }
+  }
 }
+#endif
+
+
+double GCTracer::SizeOfHeapObjects() {
+  return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
+}
+
+
+#ifdef DEBUG
+DisallowAllocationFailure::DisallowAllocationFailure() {
+  old_state_ = HEAP->disallow_allocation_failure_;
+  HEAP->disallow_allocation_failure_ = true;
+}
+
+
+DisallowAllocationFailure::~DisallowAllocationFailure() {
+  HEAP->disallow_allocation_failure_ = old_state_;
+}
+#endif
+
+
+#ifdef DEBUG
+AssertNoAllocation::AssertNoAllocation() {
+  old_state_ = HEAP->allow_allocation(false);
+}
+
+
+AssertNoAllocation::~AssertNoAllocation() {
+  HEAP->allow_allocation(old_state_);
+}
+
+
+DisableAssertNoAllocation::DisableAssertNoAllocation() {
+  old_state_ = HEAP->allow_allocation(true);
+}
+
+
+DisableAssertNoAllocation::~DisableAssertNoAllocation() {
+  HEAP->allow_allocation(old_state_);
+}
+
+#else
+
+AssertNoAllocation::AssertNoAllocation() { }
+AssertNoAllocation::~AssertNoAllocation() { }
+DisableAssertNoAllocation::DisableAssertNoAllocation() { }
+DisableAssertNoAllocation::~DisableAssertNoAllocation() { }
+
+#endif
 
 
 } }  // namespace v8::internal
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 7e613e9..8be6f27 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -51,7 +51,7 @@
 }
 
 
-void HeapProfiler::Setup() {
+void HeapProfiler::SetUp() {
   Isolate* isolate = Isolate::Current();
   if (isolate->heap_profiler() == NULL) {
     isolate->set_heap_profiler(new HeapProfiler());
@@ -114,7 +114,6 @@
   bool generation_completed = true;
   switch (s_type) {
     case HeapSnapshot::kFull: {
-      HEAP->CollectAllGarbage(true);
       HeapSnapshotGenerator generator(result, control);
       generation_completed = generator.GenerateSnapshot();
       break;
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index b1bc91c..ef5c4f4 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -48,7 +48,7 @@
 // to generate .hp files for use by the GHC/Valgrind tool hp2ps.
 class HeapProfiler {
  public:
-  static void Setup();
+  static void SetUp();
   static void TearDown();
 
   static HeapSnapshot* TakeSnapshot(const char* name,
diff --git a/src/heap.cc b/src/heap.cc
index c91f769..a1cccf6 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,13 +36,16 @@
 #include "deoptimizer.h"
 #include "global-handles.h"
 #include "heap-profiler.h"
+#include "incremental-marking.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "natives.h"
 #include "objects-visiting.h"
+#include "objects-visiting-inl.h"
 #include "runtime-profiler.h"
 #include "scopeinfo.h"
 #include "snapshot.h"
+#include "store-buffer.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
@@ -57,12 +60,7 @@
 namespace v8 {
 namespace internal {
 
-
-static const intptr_t kMinimumPromotionLimit = 2 * MB;
-static const intptr_t kMinimumAllocationLimit = 8 * MB;
-
-
-static Mutex* gc_initializer_mutex = OS::CreateMutex();
+static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
 
 
 Heap::Heap()
@@ -70,27 +68,21 @@
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
 #if defined(ANDROID)
-      reserved_semispace_size_(2*MB),
-      max_semispace_size_(2*MB),
-      initial_semispace_size_(128*KB),
-      max_old_generation_size_(192*MB),
-      max_executable_size_(max_old_generation_size_),
+#define LUMP_OF_MEMORY (128 * KB)
       code_range_size_(0),
 #elif defined(V8_TARGET_ARCH_X64)
-      reserved_semispace_size_(16*MB),
-      max_semispace_size_(16*MB),
-      initial_semispace_size_(1*MB),
-      max_old_generation_size_(1400*MB),
-      max_executable_size_(256*MB),
+#define LUMP_OF_MEMORY (2 * MB)
       code_range_size_(512*MB),
 #else
-      reserved_semispace_size_(8*MB),
-      max_semispace_size_(8*MB),
-      initial_semispace_size_(512*KB),
-      max_old_generation_size_(700*MB),
-      max_executable_size_(128*MB),
+#define LUMP_OF_MEMORY MB
       code_range_size_(0),
 #endif
+      reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      initial_semispace_size_(Page::kPageSize),
+      max_old_generation_size_(700ul * LUMP_OF_MEMORY),
+      max_executable_size_(256l * LUMP_OF_MEMORY),
+
 // Variables set based on semispace_size_ and old_generation_size_ in
 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
 // Will be 4 * reserved_semispace_size_ to ensure that young
@@ -100,6 +92,8 @@
       always_allocate_scope_depth_(0),
       linear_allocation_scope_depth_(0),
       contexts_disposed_(0),
+      global_ic_age_(0),
+      scan_on_scavenge_pages_(0),
       new_space_(this),
       old_pointer_space_(NULL),
       old_data_space_(NULL),
@@ -109,9 +103,9 @@
       lo_space_(NULL),
       gc_state_(NOT_IN_GC),
       gc_post_processing_depth_(0),
-      mc_count_(0),
       ms_count_(0),
       gc_count_(0),
+      remembered_unmapped_pages_index_(0),
       unflattened_strings_length_(0),
 #ifdef DEBUG
       allocation_allowed_(true),
@@ -119,12 +113,16 @@
       disallow_allocation_failure_(false),
       debug_utils_(NULL),
 #endif  // DEBUG
+      new_space_high_promotion_mode_active_(false),
       old_gen_promotion_limit_(kMinimumPromotionLimit),
       old_gen_allocation_limit_(kMinimumAllocationLimit),
+      old_gen_limit_factor_(1),
+      size_of_old_gen_at_last_old_space_gc_(0),
       external_allocation_limit_(0),
       amount_of_external_allocated_memory_(0),
       amount_of_external_allocated_memory_at_last_global_gc_(0),
       old_gen_exhausted_(false),
+      store_buffer_rebuilder_(store_buffer()),
       hidden_symbol_(NULL),
       global_gc_prologue_callback_(NULL),
       global_gc_epilogue_callback_(NULL),
@@ -141,12 +139,20 @@
       min_in_mutator_(kMaxInt),
       alive_after_last_gc_(0),
       last_gc_end_timestamp_(0.0),
-      page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
+      store_buffer_(this),
+      marking_(this),
+      incremental_marking_(this),
       number_idle_notifications_(0),
       last_idle_notification_gc_count_(0),
       last_idle_notification_gc_count_init_(false),
+      idle_notification_will_schedule_next_gc_(false),
+      mark_sweeps_since_idle_round_started_(0),
+      ms_count_at_last_idle_notification_(0),
+      gc_count_at_last_idle_gc_(0),
+      scavenges_since_last_idle_round_(kIdleScavengeThreshold),
+      promotion_queue_(this),
       configured_(false),
-      is_safe_to_read_maps_(true) {
+      chunks_queued_for_free_(NULL) {
   // Allow build-time customization of the max semispace size. Building
   // V8 with snapshots and a non-default max semispace size is much
   // easier if you can define it as part of the build environment.
@@ -171,7 +177,7 @@
 
 
 intptr_t Heap::Capacity() {
-  if (!HasBeenSetup()) return 0;
+  if (!HasBeenSetUp()) return 0;
 
   return new_space_.Capacity() +
       old_pointer_space_->Capacity() +
@@ -183,7 +189,7 @@
 
 
 intptr_t Heap::CommittedMemory() {
-  if (!HasBeenSetup()) return 0;
+  if (!HasBeenSetUp()) return 0;
 
   return new_space_.CommittedMemory() +
       old_pointer_space_->CommittedMemory() +
@@ -195,14 +201,14 @@
 }
 
 intptr_t Heap::CommittedMemoryExecutable() {
-  if (!HasBeenSetup()) return 0;
+  if (!HasBeenSetUp()) return 0;
 
   return isolate()->memory_allocator()->SizeExecutable();
 }
 
 
 intptr_t Heap::Available() {
-  if (!HasBeenSetup()) return 0;
+  if (!HasBeenSetUp()) return 0;
 
   return new_space_.Available() +
       old_pointer_space_->Available() +
@@ -213,7 +219,7 @@
 }
 
 
-bool Heap::HasBeenSetup() {
+bool Heap::HasBeenSetUp() {
   return old_pointer_space_ != NULL &&
          old_data_space_ != NULL &&
          code_space_ != NULL &&
@@ -224,42 +230,26 @@
 
 
 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
-  ASSERT(!HEAP->InNewSpace(object));  // Code only works for old objects.
-  ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
-  MapWord map_word = object->map_word();
-  map_word.ClearMark();
-  map_word.ClearOverflow();
-  return object->SizeFromMap(map_word.ToMap());
-}
-
-
-int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
-  ASSERT(!HEAP->InNewSpace(object));  // Code only works for old objects.
-  ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
-  uint32_t marker = Memory::uint32_at(object->address());
-  if (marker == MarkCompactCollector::kSingleFreeEncoding) {
-    return kIntSize;
-  } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
-    return Memory::int_at(object->address() + kIntSize);
-  } else {
-    MapWord map_word = object->map_word();
-    Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
-    Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
-    return object->SizeFromMap(map);
+  if (IntrusiveMarking::IsMarked(object)) {
+    return IntrusiveMarking::SizeOfMarkedObject(object);
   }
+  return object->SizeFromMap(object->map());
 }
 
 
-GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
+GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
+                                              const char** reason) {
   // Is global GC requested?
   if (space != NEW_SPACE || FLAG_gc_global) {
     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
+    *reason = "GC in old space requested";
     return MARK_COMPACTOR;
   }
 
   // Is enough data promoted to justify a global GC?
   if (OldGenerationPromotionLimitReached()) {
     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
+    *reason = "promotion limit reached";
     return MARK_COMPACTOR;
   }
 
@@ -267,6 +257,7 @@
   if (old_gen_exhausted_) {
     isolate_->counters()->
         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
+    *reason = "old generations exhausted";
     return MARK_COMPACTOR;
   }
 
@@ -282,10 +273,12 @@
   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
     isolate_->counters()->
         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
+    *reason = "scavenge might not succeed";
     return MARK_COMPACTOR;
   }
 
   // Default
+  *reason = NULL;
   return SCAVENGER;
 }
 
@@ -400,6 +393,7 @@
 #endif  // DEBUG
 
   LiveObjectList::GCPrologue();
+  store_buffer()->GCPrologue();
 }
 
 intptr_t Heap::SizeOfObjects() {
@@ -412,6 +406,7 @@
 }
 
 void Heap::GarbageCollectionEpilogue() {
+  store_buffer()->GCEpilogue();
   LiveObjectList::GCEpilogue();
 #ifdef DEBUG
   allow_allocation(true);
@@ -443,22 +438,20 @@
 }
 
 
-void Heap::CollectAllGarbage(bool force_compaction) {
+void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
-  mark_compact_collector_.SetForceCompaction(force_compaction);
-  CollectGarbage(OLD_POINTER_SPACE);
-  mark_compact_collector_.SetForceCompaction(false);
+  mark_compact_collector_.SetFlags(flags);
+  CollectGarbage(OLD_POINTER_SPACE, gc_reason);
+  mark_compact_collector_.SetFlags(kNoGCFlags);
 }
 
 
-void Heap::CollectAllAvailableGarbage() {
+void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
-  mark_compact_collector()->SetForceCompaction(true);
-
   // Major GC would invoke weak handle callbacks on weakly reachable
   // handles, but won't collect weakly reachable objects until next
   // major GC.  Therefore if we collect aggressively and weak handle callback
@@ -467,17 +460,27 @@
   // Note: as weak callbacks can execute arbitrary code, we cannot
   // hope that eventually there will be no weak callbacks invocations.
   // Therefore stop recollecting after several attempts.
+  mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
+                                     kReduceMemoryFootprintMask);
+  isolate_->compilation_cache()->Clear();
   const int kMaxNumberOfAttempts = 7;
   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
-    if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
+    if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
       break;
     }
   }
-  mark_compact_collector()->SetForceCompaction(false);
+  mark_compact_collector()->SetFlags(kNoGCFlags);
+  new_space_.Shrink();
+  UncommitFromSpace();
+  Shrink();
+  incremental_marking()->UncommitMarkingDeque();
 }
 
 
-bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
+bool Heap::CollectGarbage(AllocationSpace space,
+                          GarbageCollector collector,
+                          const char* gc_reason,
+                          const char* collector_reason) {
   // The VM is in the GC state until exiting this function.
   VMState state(isolate_, GC);
 
@@ -490,9 +493,27 @@
   allocation_timeout_ = Max(6, FLAG_gc_interval);
 #endif
 
+  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Scavenge during marking.\n");
+    }
+  }
+
+  if (collector == MARK_COMPACTOR &&
+      !mark_compact_collector()->abort_incremental_marking_ &&
+      !incremental_marking()->IsStopped() &&
+      !incremental_marking()->should_hurry() &&
+      FLAG_incremental_marking_steps) {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+    }
+    collector = SCAVENGER;
+    collector_reason = "incremental marking delaying mark-sweep";
+  }
+
   bool next_gc_likely_to_collect_more = false;
 
-  { GCTracer tracer(this);
+  { GCTracer tracer(this, gc_reason, collector_reason);
     GarbageCollectionPrologue();
     // The GC count was incremented in the prologue.  Tell the tracer about
     // it.
@@ -512,13 +533,24 @@
     GarbageCollectionEpilogue();
   }
 
+  ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+  if (incremental_marking()->IsStopped()) {
+    if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
+      incremental_marking()->Start();
+    }
+  }
+
   return next_gc_likely_to_collect_more;
 }
 
 
 void Heap::PerformScavenge() {
-  GCTracer tracer(this);
-  PerformGarbageCollection(SCAVENGER, &tracer);
+  GCTracer tracer(this, NULL, NULL);
+  if (incremental_marking()->IsStopped()) {
+    PerformGarbageCollection(SCAVENGER, &tracer);
+  } else {
+    PerformGarbageCollection(MARK_COMPACTOR, &tracer);
+  }
 }
 
 
@@ -531,7 +563,7 @@
     for (Object** p = start; p < end; p++) {
       if ((*p)->IsHeapObject()) {
         // Check that the symbol is actually a symbol.
-        ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
+        ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
       }
     }
   }
@@ -547,6 +579,17 @@
 }
 
 
+static bool AbortIncrementalMarkingAndCollectGarbage(
+    Heap* heap,
+    AllocationSpace space,
+    const char* gc_reason = NULL) {
+  heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
+  bool result = heap->CollectGarbage(space, gc_reason);
+  heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
+  return result;
+}
+
+
 void Heap::ReserveSpace(
     int new_space_size,
     int pointer_space_size,
@@ -563,30 +606,38 @@
   PagedSpace* cell_space = Heap::cell_space();
   LargeObjectSpace* lo_space = Heap::lo_space();
   bool gc_performed = true;
-  while (gc_performed) {
+  int counter = 0;
+  static const int kThreshold = 20;
+  while (gc_performed && counter++ < kThreshold) {
     gc_performed = false;
     if (!new_space->ReserveSpace(new_space_size)) {
-      Heap::CollectGarbage(NEW_SPACE);
+      Heap::CollectGarbage(NEW_SPACE,
+                           "failed to reserve space in the new space");
       gc_performed = true;
     }
     if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
-      Heap::CollectGarbage(OLD_POINTER_SPACE);
+      AbortIncrementalMarkingAndCollectGarbage(this, OLD_POINTER_SPACE,
+          "failed to reserve space in the old pointer space");
       gc_performed = true;
     }
     if (!(old_data_space->ReserveSpace(data_space_size))) {
-      Heap::CollectGarbage(OLD_DATA_SPACE);
+      AbortIncrementalMarkingAndCollectGarbage(this, OLD_DATA_SPACE,
+          "failed to reserve space in the old data space");
       gc_performed = true;
     }
     if (!(code_space->ReserveSpace(code_space_size))) {
-      Heap::CollectGarbage(CODE_SPACE);
+      AbortIncrementalMarkingAndCollectGarbage(this, CODE_SPACE,
+          "failed to reserve space in the code space");
       gc_performed = true;
     }
     if (!(map_space->ReserveSpace(map_space_size))) {
-      Heap::CollectGarbage(MAP_SPACE);
+      AbortIncrementalMarkingAndCollectGarbage(this, MAP_SPACE,
+          "failed to reserve space in the map space");
       gc_performed = true;
     }
     if (!(cell_space->ReserveSpace(cell_space_size))) {
-      Heap::CollectGarbage(CELL_SPACE);
+      AbortIncrementalMarkingAndCollectGarbage(this, CELL_SPACE,
+          "failed to reserve space in the cell space");
       gc_performed = true;
     }
     // We add a slack-factor of 2 in order to have space for a series of
@@ -598,10 +649,16 @@
     large_object_size += cell_space_size + map_space_size + code_space_size +
         data_space_size + pointer_space_size;
     if (!(lo_space->ReserveSpace(large_object_size))) {
-      Heap::CollectGarbage(LO_SPACE);
+      AbortIncrementalMarkingAndCollectGarbage(this, LO_SPACE,
+          "failed to reserve space in the large object space");
       gc_performed = true;
     }
   }
+
+  if (gc_performed) {
+    // Failed to reserve the space after several attempts.
+    V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
+  }
 }
 
 
@@ -610,13 +667,6 @@
 
   // Committing memory to from space failed.
   // Try shrinking and try again.
-  PagedSpaces spaces;
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
-       space = spaces.next()) {
-    space->RelinkPageListInChunkOrder(true);
-  }
-
   Shrink();
   if (new_space_.CommitFromSpaceIfNeeded()) return;
 
@@ -631,13 +681,17 @@
 
   Object* context = global_contexts_list_;
   while (!context->IsUndefined()) {
-    // Get the caches for this context:
-    FixedArray* caches =
-      Context::cast(context)->jsfunction_result_caches();
-    // Clear the caches:
-    int length = caches->length();
-    for (int i = 0; i < length; i++) {
-      JSFunctionResultCache::cast(caches->get(i))->Clear();
+    // Get the caches for this context. GC can happen when the context
+    // is not fully initialized, so the caches can be undefined.
+    Object* caches_or_undefined =
+        Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
+    if (!caches_or_undefined->IsUndefined()) {
+      FixedArray* caches = FixedArray::cast(caches_or_undefined);
+      // Clear the caches:
+      int length = caches->length();
+      for (int i = 0; i < length; i++) {
+        JSFunctionResultCache::cast(caches->get(i))->Clear();
+      }
     }
     // Get the next context:
     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
@@ -647,45 +701,42 @@
 
 
 void Heap::ClearNormalizedMapCaches() {
-  if (isolate_->bootstrapper()->IsActive()) return;
+  if (isolate_->bootstrapper()->IsActive() &&
+      !incremental_marking()->IsMarking()) {
+    return;
+  }
 
   Object* context = global_contexts_list_;
   while (!context->IsUndefined()) {
-    Context::cast(context)->normalized_map_cache()->Clear();
+    // GC can happen when the context is not fully initialized,
+    // so the cache can be undefined.
+    Object* cache =
+        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
+    if (!cache->IsUndefined()) {
+      NormalizedMapCache::cast(cache)->Clear();
+    }
     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
   }
 }
 
 
-#ifdef DEBUG
-
-enum PageWatermarkValidity {
-  ALL_VALID,
-  ALL_INVALID
-};
-
-static void VerifyPageWatermarkValidity(PagedSpace* space,
-                                        PageWatermarkValidity validity) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  bool expected_value = (validity == ALL_VALID);
-  while (it.has_next()) {
-    Page* page = it.next();
-    ASSERT(page->IsWatermarkValid() == expected_value);
-  }
-}
-#endif
-
 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
   double survival_rate =
       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
       start_new_space_size;
 
-  if (survival_rate > kYoungSurvivalRateThreshold) {
+  if (survival_rate > kYoungSurvivalRateHighThreshold) {
     high_survival_rate_period_length_++;
   } else {
     high_survival_rate_period_length_ = 0;
   }
 
+  if (survival_rate < kYoungSurvivalRateLowThreshold) {
+    low_survival_rate_period_length_++;
+  } else {
+    low_survival_rate_period_length_ = 0;
+  }
+
   double survival_rate_diff = survival_rate_ - survival_rate;
 
   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
@@ -707,7 +758,9 @@
     PROFILE(isolate_, CodeMovingGCEvent());
   }
 
-  VerifySymbolTable();
+  if (FLAG_verify_heap) {
+    VerifySymbolTable();
+  }
   if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
     ASSERT(!allocation_allowed_);
     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
@@ -727,6 +780,13 @@
 
   int start_new_space_size = Heap::new_space()->SizeAsInt();
 
+  if (IsHighSurvivalRate()) {
+    // We speed up the incremental marker if it is running so that it
+    // does not fall behind the rate of promotion, which would cause a
+    // constantly growing old space.
+    incremental_marking()->NotifyOfHighPromotionRate();
+  }
+
   if (collector == MARK_COMPACTOR) {
     // Perform mark-sweep with optional compaction.
     MarkCompact(tracer);
@@ -736,11 +796,7 @@
 
     UpdateSurvivalRateTrend(start_new_space_size);
 
-    intptr_t old_gen_size = PromotedSpaceSize();
-    old_gen_promotion_limit_ =
-        old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
-    old_gen_allocation_limit_ =
-        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
 
     if (high_survival_rate_during_scavenges &&
         IsStableOrIncreasingSurvivalTrend()) {
@@ -750,10 +806,16 @@
       // In this case we aggressively raise old generation memory limits to
       // postpone subsequent mark-sweep collection and thus trade memory
       // space for the mutation speed.
-      old_gen_promotion_limit_ *= 2;
-      old_gen_allocation_limit_ *= 2;
+      old_gen_limit_factor_ = 2;
+    } else {
+      old_gen_limit_factor_ = 1;
     }
 
+    old_gen_promotion_limit_ =
+        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+    old_gen_allocation_limit_ =
+        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+
     old_gen_exhausted_ = false;
   } else {
     tracer_ = tracer;
@@ -763,6 +825,37 @@
     UpdateSurvivalRateTrend(start_new_space_size);
   }
 
+  if (!new_space_high_promotion_mode_active_ &&
+      new_space_.Capacity() == new_space_.MaximumCapacity() &&
+      IsStableOrIncreasingSurvivalTrend() &&
+      IsHighSurvivalRate()) {
+    // Stable high survival rates even though young generation is at
+    // maximum capacity indicates that most objects will be promoted.
+    // To decrease scavenger pauses and final mark-sweep pauses, we
+    // have to limit maximal capacity of the young generation.
+    new_space_high_promotion_mode_active_ = true;
+    if (FLAG_trace_gc) {
+      PrintF("Limited new space size due to high promotion rate: %d MB\n",
+             new_space_.InitialCapacity() / MB);
+    }
+  } else if (new_space_high_promotion_mode_active_ &&
+      IsStableOrDecreasingSurvivalTrend() &&
+      IsLowSurvivalRate()) {
+    // Decreasing low survival rates might indicate that the above high
+    // promotion mode is over and we should allow the young generation
+    // to grow again.
+    new_space_high_promotion_mode_active_ = false;
+    if (FLAG_trace_gc) {
+      PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
+             new_space_.MaximumCapacity() / MB);
+    }
+  }
+
+  if (new_space_high_promotion_mode_active_ &&
+      new_space_.Capacity() > new_space_.InitialCapacity()) {
+    new_space_.Shrink();
+  }
+
   isolate_->counters()->objs_since_last_young()->Set(0);
 
   gc_post_processing_depth_++;
@@ -782,9 +875,7 @@
         amount_of_external_allocated_memory_;
   }
 
-  GCCallbackFlags callback_flags = tracer->is_compacting()
-      ? kGCCallbackFlagCompacted
-      : kNoGCCallbackFlags;
+  GCCallbackFlags callback_flags = kNoGCCallbackFlags;
   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
       gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
@@ -796,7 +887,9 @@
     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
     global_gc_epilogue_callback_();
   }
-  VerifySymbolTable();
+  if (FLAG_verify_heap) {
+    VerifySymbolTable();
+  }
 
   return next_gc_likely_to_collect_more;
 }
@@ -808,34 +901,26 @@
 
   mark_compact_collector_.Prepare(tracer);
 
-  bool is_compacting = mark_compact_collector_.IsCompacting();
+  ms_count_++;
+  tracer->set_full_gc_count(ms_count_);
 
-  if (is_compacting) {
-    mc_count_++;
-  } else {
-    ms_count_++;
-  }
-  tracer->set_full_gc_count(mc_count_ + ms_count_);
+  MarkCompactPrologue();
 
-  MarkCompactPrologue(is_compacting);
-
-  is_safe_to_read_maps_ = false;
   mark_compact_collector_.CollectGarbage();
-  is_safe_to_read_maps_ = true;
 
   LOG(isolate_, ResourceEvent("markcompact", "end"));
 
   gc_state_ = NOT_IN_GC;
 
-  Shrink();
-
   isolate_->counters()->objs_since_last_full()->Set(0);
 
   contexts_disposed_ = 0;
+
+  isolate_->set_context_exit_happened(false);
 }
 
 
-void Heap::MarkCompactPrologue(bool is_compacting) {
+void Heap::MarkCompactPrologue() {
   // At any old GC clear the keyed lookup cache to enable collection of unused
   // maps.
   isolate_->keyed_lookup_cache()->Clear();
@@ -847,7 +932,7 @@
 
   CompletelyClearInstanceofCache();
 
-  if (is_compacting) FlushNumberStringCache();
+  FlushNumberStringCache();
   if (FLAG_cleanup_code_caches_at_gc) {
     polymorphic_code_cache()->set_cache(undefined_value());
   }
@@ -857,13 +942,8 @@
 
 
 Object* Heap::FindCodeObject(Address a) {
-  Object* obj = NULL;  // Initialization to please compiler.
-  { MaybeObject* maybe_obj = code_space_->FindObject(a);
-    if (!maybe_obj->ToObject(&obj)) {
-      obj = lo_space_->FindObject(a)->ToObjectUnchecked();
-    }
-  }
-  return obj;
+  return isolate()->inner_pointer_to_code_cache()->
+      GcSafeFindCodeForInnerPointer(a);
 }
 
 
@@ -911,23 +991,29 @@
   // do not expect them.
   VerifyNonPointerSpacePointersVisitor v;
   HeapObjectIterator code_it(HEAP->code_space());
-  for (HeapObject* object = code_it.next();
-       object != NULL; object = code_it.next())
+  for (HeapObject* object = code_it.Next();
+       object != NULL; object = code_it.Next())
     object->Iterate(&v);
 
-  HeapObjectIterator data_it(HEAP->old_data_space());
-  for (HeapObject* object = data_it.next();
-       object != NULL; object = data_it.next())
-    object->Iterate(&v);
+  // The old data space was normally swept conservatively so that the iterator
+  // doesn't work, so we normally skip the next bit.
+  if (!HEAP->old_data_space()->was_swept_conservatively()) {
+    HeapObjectIterator data_it(HEAP->old_data_space());
+    for (HeapObject* object = data_it.Next();
+         object != NULL; object = data_it.Next())
+      object->Iterate(&v);
+  }
 }
 #endif
 
 
 void Heap::CheckNewSpaceExpansionCriteria() {
   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
-      survived_since_last_expansion_ > new_space_.Capacity()) {
-    // Grow the size of new space if there is room to grow and enough
-    // data has survived scavenge since the last expansion.
+      survived_since_last_expansion_ > new_space_.Capacity() &&
+      !new_space_high_promotion_mode_active_) {
+    // Grow the size of new space if there is room to grow, enough data
+    // has survived scavenge since the last expansion and we are not in
+    // high promotion mode.
     new_space_.Grow();
     survived_since_last_expansion_ = 0;
   }
@@ -940,29 +1026,107 @@
 }
 
 
+void Heap::ScavengeStoreBufferCallback(
+    Heap* heap,
+    MemoryChunk* page,
+    StoreBufferEvent event) {
+  heap->store_buffer_rebuilder_.Callback(page, event);
+}
+
+
+void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
+  if (event == kStoreBufferStartScanningPagesEvent) {
+    start_of_current_page_ = NULL;
+    current_page_ = NULL;
+  } else if (event == kStoreBufferScanningPageEvent) {
+    if (current_page_ != NULL) {
+      // If this page already overflowed the store buffer during this iteration.
+      if (current_page_->scan_on_scavenge()) {
+        // Then we should wipe out the entries that have been added for it.
+        store_buffer_->SetTop(start_of_current_page_);
+      } else if (store_buffer_->Top() - start_of_current_page_ >=
+                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
+        // Did we find too many pointers in the previous page?  The heuristic is
+        // that no page can take more then 1/5 the remaining slots in the store
+        // buffer.
+        current_page_->set_scan_on_scavenge(true);
+        store_buffer_->SetTop(start_of_current_page_);
+      } else {
+        // In this case the page we scanned took a reasonable number of slots in
+        // the store buffer.  It has now been rehabilitated and is no longer
+        // marked scan_on_scavenge.
+        ASSERT(!current_page_->scan_on_scavenge());
+      }
+    }
+    start_of_current_page_ = store_buffer_->Top();
+    current_page_ = page;
+  } else if (event == kStoreBufferFullEvent) {
+    // The current page overflowed the store buffer again.  Wipe out its entries
+    // in the store buffer and mark it scan-on-scavenge again.  This may happen
+    // several times while scanning.
+    if (current_page_ == NULL) {
+      // Store Buffer overflowed while scanning promoted objects.  These are not
+      // in any particular page, though they are likely to be clustered by the
+      // allocation routines.
+      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
+    } else {
+      // Store Buffer overflowed while scanning a particular old space page for
+      // pointers to new space.
+      ASSERT(current_page_ == page);
+      ASSERT(page != NULL);
+      current_page_->set_scan_on_scavenge(true);
+      ASSERT(start_of_current_page_ != store_buffer_->Top());
+      store_buffer_->SetTop(start_of_current_page_);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void PromotionQueue::Initialize() {
+  // Assumes that a NewSpacePage exactly fits a number of promotion queue
+  // entries (where each is a pair of intptr_t). This allows us to simplify
+  // the test fpr when to switch pages.
+  ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
+         == 0);
+  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
+  front_ = rear_ =
+      reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
+  emergency_stack_ = NULL;
+  guard_ = false;
+}
+
+
+void PromotionQueue::RelocateQueueHead() {
+  ASSERT(emergency_stack_ == NULL);
+
+  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+  intptr_t* head_start = rear_;
+  intptr_t* head_end =
+      Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
+
+  int entries_count =
+      static_cast<int>(head_end - head_start) / kEntrySizeInWords;
+
+  emergency_stack_ = new List<Entry>(2 * entries_count);
+
+  while (head_start != head_end) {
+    int size = static_cast<int>(*(head_start++));
+    HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+    emergency_stack_->Add(Entry(obj, size));
+  }
+  rear_ = head_end;
+}
+
+
 void Heap::Scavenge() {
 #ifdef DEBUG
-  if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
+  if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
 #endif
 
   gc_state_ = SCAVENGE;
 
-  SwitchScavengingVisitorsTableIfProfilingWasEnabled();
-
-  Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
-#ifdef DEBUG
-  VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
-  VerifyPageWatermarkValidity(map_space_, ALL_VALID);
-#endif
-
-  // We do not update an allocation watermark of the top page during linear
-  // allocation to avoid overhead. So to maintain the watermark invariant
-  // we have to manually cache the watermark and mark the top page as having an
-  // invalid watermark. This guarantees that dirty regions iteration will use a
-  // correct watermark even if a linear allocation happens.
-  old_pointer_space_->FlushTopPageWatermark();
-  map_space_->FlushTopPageWatermark();
-
   // Implements Cheney's copying algorithm
   LOG(isolate_, ResourceEvent("scavenge", "begin"));
 
@@ -970,10 +1134,16 @@
   isolate_->descriptor_lookup_cache()->Clear();
 
   // Used for updating survived_since_last_expansion_ at function end.
-  intptr_t survived_watermark = PromotedSpaceSize();
+  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
 
   CheckNewSpaceExpansionCriteria();
 
+  SelectScavengingVisitorsTable();
+
+  incremental_marking()->PrepareForScavenge();
+
+  AdvanceSweepers(static_cast<int>(new_space_.Size()));
+
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
   new_space_.Flip();
@@ -996,32 +1166,29 @@
   // for the addresses of promoted objects: every object promoted
   // frees up its size in bytes from the top of the new space, and
   // objects are at least one pointer in size.
-  Address new_space_front = new_space_.ToSpaceLow();
-  promotion_queue_.Initialize(new_space_.ToSpaceHigh());
+  Address new_space_front = new_space_.ToSpaceStart();
+  promotion_queue_.Initialize();
 
-  is_safe_to_read_maps_ = false;
+#ifdef DEBUG
+  store_buffer()->Clean();
+#endif
+
   ScavengeVisitor scavenge_visitor(this);
   // Copy roots.
   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
 
-  // Copy objects reachable from the old generation.  By definition,
-  // there are no intergenerational pointers in code or data spaces.
-  IterateDirtyRegions(old_pointer_space_,
-                      &Heap::IteratePointersInDirtyRegion,
-                      &ScavengePointer,
-                      WATERMARK_CAN_BE_INVALID);
-
-  IterateDirtyRegions(map_space_,
-                      &IteratePointersInDirtyMapsRegion,
-                      &ScavengePointer,
-                      WATERMARK_CAN_BE_INVALID);
-
-  lo_space_->IterateDirtyRegions(&ScavengePointer);
+  // Copy objects reachable from the old generation.
+  {
+    StoreBufferRebuildScope scope(this,
+                                  store_buffer(),
+                                  &ScavengeStoreBufferCallback);
+    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
+  }
 
   // Copy objects reachable from cells by scavenging cell values directly.
   HeapObjectIterator cell_iterator(cell_space_);
-  for (HeapObject* cell = cell_iterator.next();
-       cell != NULL; cell = cell_iterator.next()) {
+  for (HeapObject* cell = cell_iterator.Next();
+       cell != NULL; cell = cell_iterator.Next()) {
     if (cell->IsJSGlobalPropertyCell()) {
       Address value_address =
           reinterpret_cast<Address>(cell) +
@@ -1040,27 +1207,34 @@
       &scavenge_visitor);
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
 
-
   UpdateNewSpaceReferencesInExternalStringTable(
       &UpdateNewSpaceReferenceInExternalStringTableEntry);
 
+  promotion_queue_.Destroy();
+
   LiveObjectList::UpdateReferencesForScavengeGC();
-  isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+  if (!FLAG_watch_ic_patching) {
+    isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+  }
+  incremental_marking()->UpdateMarkingDequeAfterScavenge();
 
   ASSERT(new_space_front == new_space_.top());
 
-  is_safe_to_read_maps_ = true;
-
   // Set age mark.
   new_space_.set_age_mark(new_space_.top());
 
+  new_space_.LowerInlineAllocationLimit(
+      new_space_.inline_allocation_limit_step());
+
   // Update how much has survived scavenge.
   IncrementYoungSurvivorsCounter(static_cast<int>(
-      (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
+      (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
 
   LOG(isolate_, ResourceEvent("scavenge", "end"));
 
   gc_state_ = NOT_IN_GC;
+
+  scavenges_since_last_idle_round_++;
 }
 
 
@@ -1081,7 +1255,9 @@
 
 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
     ExternalStringTableUpdaterCallback updater_func) {
-  external_string_table_.Verify();
+  if (FLAG_verify_heap) {
+    external_string_table_.Verify();
+  }
 
   if (external_string_table_.new_space_strings_.is_empty()) return;
 
@@ -1112,35 +1288,56 @@
 }
 
 
+void Heap::UpdateReferencesInExternalStringTable(
+    ExternalStringTableUpdaterCallback updater_func) {
+
+  // Update old space string references.
+  if (external_string_table_.old_space_strings_.length() > 0) {
+    Object** start = &external_string_table_.old_space_strings_[0];
+    Object** end = start + external_string_table_.old_space_strings_.length();
+    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
+  }
+
+  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
+}
+
+
 static Object* ProcessFunctionWeakReferences(Heap* heap,
                                              Object* function,
                                              WeakObjectRetainer* retainer) {
-  Object* head = heap->undefined_value();
+  Object* undefined = heap->undefined_value();
+  Object* head = undefined;
   JSFunction* tail = NULL;
   Object* candidate = function;
-  while (candidate != heap->undefined_value()) {
+  while (candidate != undefined) {
     // Check whether to keep the candidate in the list.
     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
     Object* retain = retainer->RetainAs(candidate);
     if (retain != NULL) {
-      if (head == heap->undefined_value()) {
+      if (head == undefined) {
         // First element in the list.
-        head = candidate_function;
+        head = retain;
       } else {
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
-        tail->set_next_function_link(candidate_function);
+        tail->set_next_function_link(retain);
       }
       // Retained function is new tail.
+      candidate_function = reinterpret_cast<JSFunction*>(retain);
       tail = candidate_function;
+
+      ASSERT(retain->IsUndefined() || retain->IsJSFunction());
+
+      if (retain == undefined) break;
     }
+
     // Move to next element in the list.
     candidate = candidate_function->next_function_link();
   }
 
   // Terminate the list if there is one or more elements.
   if (tail != NULL) {
-    tail->set_next_function_link(heap->undefined_value());
+    tail->set_next_function_link(undefined);
   }
 
   return head;
@@ -1148,28 +1345,32 @@
 
 
 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
-  Object* head = undefined_value();
+  Object* undefined = undefined_value();
+  Object* head = undefined;
   Context* tail = NULL;
   Object* candidate = global_contexts_list_;
-  while (candidate != undefined_value()) {
+  while (candidate != undefined) {
     // Check whether to keep the candidate in the list.
     Context* candidate_context = reinterpret_cast<Context*>(candidate);
     Object* retain = retainer->RetainAs(candidate);
     if (retain != NULL) {
-      if (head == undefined_value()) {
+      if (head == undefined) {
         // First element in the list.
-        head = candidate_context;
+        head = retain;
       } else {
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
         tail->set_unchecked(this,
                             Context::NEXT_CONTEXT_LINK,
-                            candidate_context,
+                            retain,
                             UPDATE_WRITE_BARRIER);
       }
       // Retained context is new tail.
+      candidate_context = reinterpret_cast<Context*>(retain);
       tail = candidate_context;
 
+      if (retain == undefined) break;
+
       // Process the weak list of optimized functions for the context.
       Object* function_list_head =
           ProcessFunctionWeakReferences(
@@ -1181,6 +1382,7 @@
                                        function_list_head,
                                        UPDATE_WRITE_BARRIER);
     }
+
     // Move to next element in the list.
     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
   }
@@ -1198,6 +1400,28 @@
 }
 
 
+void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
+  AssertNoAllocation no_allocation;
+
+  class VisitorAdapter : public ObjectVisitor {
+   public:
+    explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
+        : visitor_(visitor) {}
+    virtual void VisitPointers(Object** start, Object** end) {
+      for (Object** p = start; p < end; p++) {
+        if ((*p)->IsExternalString()) {
+          visitor_->VisitExternalString(Utils::ToLocal(
+              Handle<String>(String::cast(*p))));
+        }
+      }
+    }
+   private:
+    v8::ExternalResourceVisitor* visitor_;
+  } visitor_adapter(visitor);
+  external_string_table_.Iterate(&visitor_adapter);
+}
+
+
 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
  public:
   static inline void VisitPointer(Heap* heap, Object** p) {
@@ -1212,35 +1436,45 @@
 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
                          Address new_space_front) {
   do {
-    ASSERT(new_space_front <= new_space_.top());
-
+    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
     // The addresses new_space_front and new_space_.top() define a
     // queue of unprocessed copied objects.  Process them until the
     // queue is empty.
-    while (new_space_front < new_space_.top()) {
-      HeapObject* object = HeapObject::FromAddress(new_space_front);
-      new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
+    while (new_space_front != new_space_.top()) {
+      if (!NewSpacePage::IsAtEnd(new_space_front)) {
+        HeapObject* object = HeapObject::FromAddress(new_space_front);
+        new_space_front +=
+          NewSpaceScavenger::IterateBody(object->map(), object);
+      } else {
+        new_space_front =
+            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
+      }
     }
 
     // Promote and process all the to-be-promoted objects.
-    while (!promotion_queue_.is_empty()) {
-      HeapObject* target;
-      int size;
-      promotion_queue_.remove(&target, &size);
+    {
+      StoreBufferRebuildScope scope(this,
+                                    store_buffer(),
+                                    &ScavengeStoreBufferCallback);
+      while (!promotion_queue()->is_empty()) {
+        HeapObject* target;
+        int size;
+        promotion_queue()->remove(&target, &size);
 
-      // Promoted object might be already partially visited
-      // during dirty regions iteration. Thus we search specificly
-      // for pointers to from semispace instead of looking for pointers
-      // to new space.
-      ASSERT(!target->IsMap());
-      IterateAndMarkPointersToFromSpace(target->address(),
-                                        target->address() + size,
-                                        &ScavengePointer);
+        // Promoted object might be already partially visited
+        // during old space pointer iteration. Thus we search specificly
+        // for pointers to from semispace instead of looking for pointers
+        // to new space.
+        ASSERT(!target->IsMap());
+        IterateAndMarkPointersToFromSpace(target->address(),
+                                          target->address() + size,
+                                          &ScavengeObject);
+      }
     }
 
     // Take another spin if there are now unswept objects in new space
     // (there are currently no more unswept promoted objects).
-  } while (new_space_front < new_space_.top());
+  } while (new_space_front != new_space_.top());
 
   return new_space_front;
 }
@@ -1252,26 +1486,11 @@
 };
 
 
-typedef void (*ScavengingCallback)(Map* map,
-                                   HeapObject** slot,
-                                   HeapObject* object);
+enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
 
 
-static Atomic32 scavenging_visitors_table_mode_;
-static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
-
-INLINE(static void DoScavengeObject(Map* map,
-                                    HeapObject** slot,
-                                    HeapObject* obj));
-
-
-void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
-  scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
-}
-
-
-template<LoggingAndProfiling logging_and_profiling_mode>
+template<MarksHandling marks_handling,
+         LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
   static void Initialize() {
@@ -1306,9 +1525,13 @@
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                     Visit);
 
-    table_.Register(kVisitJSFunction,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<JSFunction::kSize>);
+    if (marks_handling == IGNORE_MARKS) {
+      table_.Register(kVisitJSFunction,
+                      &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                          template VisitSpecialized<JSFunction::kSize>);
+    } else {
+      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
+    }
 
     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
                                    kVisitDataObject,
@@ -1349,10 +1572,10 @@
   // Helper function used by CopyObject to copy a source object to an
   // allocated target object and update the forwarding pointer in the source
   // object.  Returns the target object.
-  INLINE(static HeapObject* MigrateObject(Heap* heap,
-                                          HeapObject* source,
-                                          HeapObject* target,
-                                          int size)) {
+  INLINE(static void MigrateObject(Heap* heap,
+                                   HeapObject* source,
+                                   HeapObject* target,
+                                   int size)) {
     // Copy the content of source to target.
     heap->CopyBlock(target->address(), source->address(), size);
 
@@ -1373,26 +1596,30 @@
       }
     }
 
-    return target;
+    if (marks_handling == TRANSFER_MARKS) {
+      if (Marking::TransferColor(source, target)) {
+        MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
+      }
+    }
   }
 
-
   template<ObjectContents object_contents, SizeRestriction size_restriction>
   static inline void EvacuateObject(Map* map,
                                     HeapObject** slot,
                                     HeapObject* object,
                                     int object_size) {
-    ASSERT((size_restriction != SMALL) ||
-           (object_size <= Page::kMaxHeapObjectSize));
-    ASSERT(object->Size() == object_size);
+    SLOW_ASSERT((size_restriction != SMALL) ||
+                (object_size <= Page::kMaxNonCodeHeapObjectSize));
+    SLOW_ASSERT(object->Size() == object_size);
 
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     if (heap->ShouldBePromoted(object->address(), object_size)) {
       MaybeObject* maybe_result;
 
       if ((size_restriction != SMALL) &&
-          (object_size > Page::kMaxHeapObjectSize)) {
-        maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
+          (object_size > Page::kMaxNonCodeHeapObjectSize)) {
+        maybe_result = heap->lo_space()->AllocateRaw(object_size,
+                                                     NOT_EXECUTABLE);
       } else {
         if (object_contents == DATA_OBJECT) {
           maybe_result = heap->old_data_space()->AllocateRaw(object_size);
@@ -1404,7 +1631,12 @@
       Object* result = NULL;  // Initialization to please compiler.
       if (maybe_result->ToObject(&result)) {
         HeapObject* target = HeapObject::cast(result);
-        *slot = MigrateObject(heap, object , target, object_size);
+
+        // Order is important: slot might be inside of the target if target
+        // was allocated over a dead object and slot comes from the store
+        // buffer.
+        *slot = target;
+        MigrateObject(heap, object, target, object_size);
 
         if (object_contents == POINTER_OBJECT) {
           heap->promotion_queue()->insert(target, object_size);
@@ -1414,13 +1646,42 @@
         return;
       }
     }
-    Object* result =
-        heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
-    *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
+    MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
+    heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+    Object* result = allocation->ToObjectUnchecked();
+    HeapObject* target = HeapObject::cast(result);
+
+    // Order is important: slot might be inside of the target if target
+    // was allocated over a dead object and slot comes from the store
+    // buffer.
+    *slot = target;
+    MigrateObject(heap, object, target, object_size);
     return;
   }
 
 
+  static inline void EvacuateJSFunction(Map* map,
+                                        HeapObject** slot,
+                                        HeapObject* object) {
+    ObjectEvacuationStrategy<POINTER_OBJECT>::
+        template VisitSpecialized<JSFunction::kSize>(map, slot, object);
+
+    HeapObject* target = *slot;
+    MarkBit mark_bit = Marking::MarkBitFrom(target);
+    if (Marking::IsBlack(mark_bit)) {
+      // This object is black and it might not be rescanned by marker.
+      // We should explicitly record code entry slot for compaction because
+      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+      // miss it as it is not HeapObject-tagged.
+      Address code_entry_slot =
+          target->address() + JSFunction::kCodeEntryOffset;
+      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+      map->GetHeap()->mark_compact_collector()->
+          RecordCodeEntrySlot(code_entry_slot, code);
+    }
+  }
+
+
   static inline void EvacuateFixedArray(Map* map,
                                         HeapObject** slot,
                                         HeapObject* object) {
@@ -1479,14 +1740,17 @@
                                                HeapObject* object) {
     ASSERT(IsShortcutCandidate(map->instance_type()));
 
-    if (ConsString::cast(object)->unchecked_second() ==
-        map->heap()->empty_string()) {
+    Heap* heap = map->GetHeap();
+
+    if (marks_handling == IGNORE_MARKS &&
+        ConsString::cast(object)->unchecked_second() ==
+        heap->empty_string()) {
       HeapObject* first =
           HeapObject::cast(ConsString::cast(object)->unchecked_first());
 
       *slot = first;
 
-      if (!map->heap()->InNewSpace(first)) {
+      if (!heap->InNewSpace(first)) {
         object->set_map_word(MapWord::FromForwardingAddress(first));
         return;
       }
@@ -1500,7 +1764,7 @@
         return;
       }
 
-      DoScavengeObject(first->map(), slot, first);
+      heap->DoScavengeObject(first->map(), slot, first);
       object->set_map_word(MapWord::FromForwardingAddress(*slot));
       return;
     }
@@ -1531,55 +1795,70 @@
 };
 
 
-template<LoggingAndProfiling logging_and_profiling_mode>
+template<MarksHandling marks_handling,
+         LoggingAndProfiling logging_and_profiling_mode>
 VisitorDispatchTable<ScavengingCallback>
-    ScavengingVisitor<logging_and_profiling_mode>::table_;
+    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
 
 
 static void InitializeScavengingVisitorsTables() {
-  ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
-  scavenging_visitors_table_.CopyFrom(
-      ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
-  scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
 }
 
 
-void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
-  if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
-    // Table was already updated by some isolate.
-    return;
-  }
-
-  if (isolate()->logger()->is_logging() |
+void Heap::SelectScavengingVisitorsTable() {
+  bool logging_and_profiling =
+      isolate()->logger()->is_logging() ||
       CpuProfiler::is_profiling(isolate()) ||
       (isolate()->heap_profiler() != NULL &&
-       isolate()->heap_profiler()->is_profiling())) {
-    // If one of the isolates is doing scavenge at this moment of time
-    // it might see this table in an inconsitent state when
-    // some of the callbacks point to
-    // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
-    // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
-    // However this does not lead to any bugs as such isolate does not have
-    // profiling enabled and any isolate with enabled profiling is guaranteed
-    // to see the table in the consistent state.
-    scavenging_visitors_table_.CopyFrom(
-        ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
+       isolate()->heap_profiler()->is_profiling());
 
-    // We use Release_Store to prevent reordering of this write before writes
-    // to the table.
-    Release_Store(&scavenging_visitors_table_mode_,
-                  LOGGING_AND_PROFILING_ENABLED);
+  if (!incremental_marking()->IsMarking()) {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<IGNORE_MARKS,
+                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<IGNORE_MARKS,
+                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+  } else {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<TRANSFER_MARKS,
+                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<TRANSFER_MARKS,
+                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+
+    if (incremental_marking()->IsCompacting()) {
+      // When compacting forbid short-circuiting of cons-strings.
+      // Scavenging code relies on the fact that new space object
+      // can't be evacuated into evacuation candidate but
+      // short-circuiting violates this assumption.
+      scavenging_visitors_table_.Register(
+          StaticVisitorBase::kVisitShortcutCandidate,
+          scavenging_visitors_table_.GetVisitorById(
+              StaticVisitorBase::kVisitConsString));
+    }
   }
 }
 
 
 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
-  ASSERT(HEAP->InFromSpace(object));
+  SLOW_ASSERT(HEAP->InFromSpace(object));
   MapWord first_word = object->map_word();
-  ASSERT(!first_word.IsForwardingAddress());
+  SLOW_ASSERT(!first_word.IsForwardingAddress());
   Map* map = first_word.ToMap();
-  DoScavengeObject(map, p, object);
+  map->GetHeap()->DoScavengeObject(map, p, object);
 }
 
 
@@ -1605,29 +1884,31 @@
 }
 
 
-MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
+MaybeObject* Heap::AllocateMap(InstanceType instance_type,
+                               int instance_size,
+                               ElementsKind elements_kind) {
   Object* result;
   { MaybeObject* maybe_result = AllocateRawMap();
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
   Map* map = reinterpret_cast<Map*>(result);
-  map->set_map(meta_map());
+  map->set_map_no_write_barrier(meta_map());
   map->set_instance_type(instance_type);
   map->set_visitor_id(
       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
-  map->set_prototype(null_value());
-  map->set_constructor(null_value());
+  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
+  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
   map->set_instance_size(instance_size);
   map->set_inobject_properties(0);
   map->set_pre_allocated_property_fields(0);
   map->init_instance_descriptors();
-  map->set_code_cache(empty_fixed_array());
-  map->set_prototype_transitions(empty_fixed_array());
+  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
-  map->set_elements_kind(FAST_ELEMENTS);
+  map->set_elements_kind(elements_kind);
 
   // If the map object is aligned fill the padding area with Smi 0 objects.
   if (Map::kPadStart < Map::kSize) {
@@ -1640,13 +1921,12 @@
 
 
 MaybeObject* Heap::AllocateCodeCache() {
-  Object* result;
-  { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
+  CodeCache* code_cache;
+  { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
+    if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
   }
-  CodeCache* code_cache = CodeCache::cast(result);
-  code_cache->set_default_cache(empty_fixed_array());
-  code_cache->set_normal_type_cache(undefined_value());
+  code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
   return code_cache;
 }
 
@@ -1656,6 +1936,40 @@
 }
 
 
+MaybeObject* Heap::AllocateAccessorPair() {
+  AccessorPair* accessors;
+  { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
+    if (!maybe_accessors->To(&accessors)) return maybe_accessors;
+  }
+  accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
+  accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
+  return accessors;
+}
+
+
+MaybeObject* Heap::AllocateTypeFeedbackInfo() {
+  TypeFeedbackInfo* info;
+  { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
+    if (!maybe_info->To(&info)) return maybe_info;
+  }
+  info->set_ic_total_count(0);
+  info->set_ic_with_typeinfo_count(0);
+  info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
+                                SKIP_WRITE_BARRIER);
+  return info;
+}
+
+
+MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
+  AliasedArgumentsEntry* entry;
+  { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
+    if (!maybe_entry->To(&entry)) return maybe_entry;
+  }
+  entry->set_aliased_context_slot(aliased_context_slot);
+  return entry;
+}
+
+
 const Heap::StringTypeTable Heap::string_type_table[] = {
 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
   {type, size, k##camel_name##MapRootIndex},
@@ -1707,12 +2021,19 @@
   }
   set_empty_fixed_array(FixedArray::cast(obj));
 
-  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_null_value(obj);
+  set_null_value(Oddball::cast(obj));
   Oddball::cast(obj)->set_kind(Oddball::kNull);
 
+  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_undefined_value(Oddball::cast(obj));
+  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+  ASSERT(!InNewSpace(undefined_value()));
+
   // Allocate the empty descriptor array.
   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -1753,7 +2074,7 @@
         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_serialized_scope_info_map(Map::cast(obj));
+  set_scope_info_map(Map::cast(obj));
 
   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -1798,6 +2119,12 @@
   }
   set_byte_array_map(Map::cast(obj));
 
+  { MaybeObject* maybe_obj =
+        AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_free_space_map(Map::cast(obj));
+
   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
@@ -1926,6 +2253,12 @@
         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
+  set_module_context_map(Map::cast(obj));
+
+  { MaybeObject* maybe_obj =
+        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
   Map* global_context_map = Map::cast(obj);
   global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
   set_global_context_map(global_context_map);
@@ -1950,7 +2283,7 @@
 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate heap numbers in paged
   // spaces.
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
 
   Object* result;
@@ -1959,7 +2292,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  HeapObject::cast(result)->set_map(heap_number_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
   HeapNumber::cast(result)->set_value(value);
   return result;
 }
@@ -1971,13 +2304,13 @@
 
   // This version of AllocateHeapNumber is optimized for
   // allocation in new space.
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
   Object* result;
   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  HeapObject::cast(result)->set_map(heap_number_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
   HeapNumber::cast(result)->set_value(value);
   return result;
 }
@@ -1988,7 +2321,8 @@
   { MaybeObject* maybe_result = AllocateRawCell();
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  HeapObject::cast(result)->set_map(global_property_cell_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(
+      global_property_cell_map());
   JSGlobalPropertyCell::cast(result)->set_value(value);
   return result;
 }
@@ -1998,7 +2332,7 @@
                                  Object* to_number,
                                  byte kind) {
   Object* result;
-  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
@@ -2011,7 +2345,13 @@
   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_neander_map(Map::cast(obj));
+  // Don't use Smi-only elements optimizations for objects with the neander
+  // map. There are too many cases where element values are set directly with a
+  // bottleneck to trap the Smi-only -> fast elements transition, and there
+  // appears to be no benefit for optimize this case.
+  Map* new_neander_map = Map::cast(obj);
+  new_neander_map->set_elements_kind(FAST_ELEMENTS);
+  set_neander_map(new_neander_map);
 
   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2056,6 +2396,12 @@
   // To workaround the problem, make separate functions without inlining.
   Heap::CreateJSEntryStub();
   Heap::CreateJSConstructEntryStub();
+
+  // Create stubs that should be there, so we don't unexpectedly have to
+  // create them if we need them during the creation of another stub.
+  // Stub creation mixes raw pointers and handles in an unsafe manner so
+  // we cannot create stubs while we are creating stubs.
+  CodeStub::GenerateStubsAheadOfTime();
 }
 
 
@@ -2066,20 +2412,22 @@
   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_minus_zero_value(obj);
+  set_minus_zero_value(HeapNumber::cast(obj));
   ASSERT(signbit(minus_zero_value()->Number()) != 0);
 
   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_nan_value(obj);
+  set_nan_value(HeapNumber::cast(obj));
 
-  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_undefined_value(obj);
-  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
-  ASSERT(!InNewSpace(undefined_value()));
+  set_infinity_value(HeapNumber::cast(obj));
+
+  // The hole has not been created yet, but we want to put something
+  // predictable in the gaps in the symbol table, so lets make that Smi zero.
+  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
 
   // Allocate initial symbol table.
   { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
@@ -2088,19 +2436,17 @@
   // Don't use set_symbol_table() due to asserts.
   roots_[kSymbolTableRootIndex] = obj;
 
-  // Assign the print strings for oddballs after creating symboltable.
-  Object* symbol;
-  { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
-    if (!maybe_symbol->ToObject(&symbol)) return false;
-  }
-  Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
-  Oddball::cast(undefined_value())->set_to_number(nan_value());
-
-  // Allocate the null_value
+  // Finish initializing oddballs after creating symboltable.
   { MaybeObject* maybe_obj =
-        Oddball::cast(null_value())->Initialize("null",
-                                                Smi::FromInt(0),
-                                                Oddball::kNull);
+        undefined_value()->Initialize("undefined",
+                                      nan_value(),
+                                      Oddball::kUndefined);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+
+  // Initialize the null_value.
+  { MaybeObject* maybe_obj =
+        null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
 
@@ -2109,28 +2455,28 @@
                                            Oddball::kTrue);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_true_value(obj);
+  set_true_value(Oddball::cast(obj));
 
   { MaybeObject* maybe_obj = CreateOddball("false",
                                            Smi::FromInt(0),
                                            Oddball::kFalse);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_false_value(obj);
+  set_false_value(Oddball::cast(obj));
 
   { MaybeObject* maybe_obj = CreateOddball("hole",
                                            Smi::FromInt(-1),
                                            Oddball::kTheHole);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_the_hole_value(obj);
+  set_the_hole_value(Oddball::cast(obj));
 
   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
                                            Smi::FromInt(-4),
                                            Oddball::kArgumentMarker);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_arguments_marker(obj);
+  set_arguments_marker(Oddball::cast(obj));
 
   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
                                            Smi::FromInt(-2),
@@ -2186,6 +2532,7 @@
   }
   set_code_stubs(UnseededNumberDictionary::cast(obj));
 
+
   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
   // is set to avoid expanding the dictionary during bootstrapping.
   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
@@ -2214,7 +2561,10 @@
   }
   set_intrinsic_function_names(StringDictionary::cast(obj));
 
-  if (InitializeNumberStringCache()->IsFailure()) return false;
+  { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_number_string_cache(FixedArray::cast(obj));
 
   // Allocate cache for single character ASCII strings.
   { MaybeObject* maybe_obj =
@@ -2313,7 +2663,7 @@
       }
     }
   }
-  array->set_map(heap->fixed_cow_array_map());
+  array->set_map_no_write_barrier(heap->fixed_cow_array_map());
 }
 
 
@@ -2324,20 +2674,44 @@
 }
 
 
-MaybeObject* Heap::InitializeNumberStringCache() {
-  // Compute the size of the number string cache based on the max heap size.
-  // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
-  // max_semispace_size_ ==   8 MB => number_string_cache_size = 16KB.
-  int number_string_cache_size = max_semispace_size_ / 512;
-  number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
-  Object* obj;
+MaybeObject* Heap::AllocateInitialNumberStringCache() {
   MaybeObject* maybe_obj =
-      AllocateFixedArray(number_string_cache_size * 2, TENURED);
-  if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
+      AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
   return maybe_obj;
 }
 
 
+int Heap::FullSizeNumberStringCacheLength() {
+  // Compute the size of the number string cache based on the max newspace size.
+  // The number string cache has a minimum size based on twice the initial cache
+  // size to ensure that it is bigger after being made 'full size'.
+  int number_string_cache_size = max_semispace_size_ / 512;
+  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
+                                 Min(0x4000, number_string_cache_size));
+  // There is a string and a number per entry so the length is twice the number
+  // of entries.
+  return number_string_cache_size * 2;
+}
+
+
+void Heap::AllocateFullSizeNumberStringCache() {
+  // The idea is to have a small number string cache in the snapshot to keep
+  // boot-time memory usage down.  If we expand the number string cache already
+  // while creating the snapshot then that didn't work out.
+  ASSERT(!Serializer::enabled());
+  MaybeObject* maybe_obj =
+      AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
+  Object* new_cache;
+  if (maybe_obj->ToObject(&new_cache)) {
+    // We don't bother to repopulate the cache with entries from the old cache.
+    // It will be repopulated soon enough with new strings.
+    set_number_string_cache(FixedArray::cast(new_cache));
+  }
+  // If allocation fails then we just return without doing anything.  It is only
+  // a cache, so best effort is OK here.
+}
+
+
 void Heap::FlushNumberStringCache() {
   // Flush the number to string cache.
   int len = number_string_cache()->length();
@@ -2383,11 +2757,17 @@
   int mask = (number_string_cache()->length() >> 1) - 1;
   if (number->IsSmi()) {
     hash = smi_get_hash(Smi::cast(number)) & mask;
-    number_string_cache()->set(hash * 2, Smi::cast(number));
   } else {
     hash = double_get_hash(number->Number()) & mask;
-    number_string_cache()->set(hash * 2, number);
   }
+  if (number_string_cache()->get(hash * 2) != undefined_value() &&
+      number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
+    // The first time we have a hash collision, we move to the full sized
+    // number string cache.
+    AllocateFullSizeNumberStringCache();
+    return;
+  }
+  number_string_cache()->set(hash * 2, number);
   number_string_cache()->set(hash * 2 + 1, string);
 }
 
@@ -2422,6 +2802,15 @@
 }
 
 
+MaybeObject* Heap::Uint32ToString(uint32_t value,
+                                  bool check_number_string_cache) {
+  Object* number;
+  MaybeObject* maybe = NumberFromUint32(value);
+  if (!maybe->To<Object>(&number)) return maybe;
+  return NumberToString(number, check_number_string_cache);
+}
+
+
 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
 }
@@ -2478,14 +2867,12 @@
 
 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate foreigns in paged spaces.
-  STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-  Object* result;
-  { MaybeObject* maybe_result = Allocate(foreign_map(), space);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-
-  Foreign::cast(result)->set_address(address);
+  Foreign* result;
+  MaybeObject* maybe_result = Allocate(foreign_map(), space);
+  if (!maybe_result->To(&result)) return maybe_result;
+  result->set_foreign_address(address);
   return result;
 }
 
@@ -2499,18 +2886,20 @@
   share->set_name(name);
   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
   share->set_code(illegal);
-  share->set_scope_info(SerializedScopeInfo::Empty());
+  share->set_scope_info(ScopeInfo::Empty());
   Code* construct_stub =
       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
   share->set_construct_stub(construct_stub);
   share->set_instance_class_name(Object_symbol());
-  share->set_function_data(undefined_value());
-  share->set_script(undefined_value());
-  share->set_debug_info(undefined_value());
-  share->set_inferred_name(empty_string());
-  share->set_initial_map(undefined_value());
-  share->set_this_property_assignments(undefined_value());
-  share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
+  share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
+  share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
+  share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
+  share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
+  share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
+  share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
+  share->set_deopt_counter(FLAG_deopt_every_n_times);
+  share->set_profiler_ticks(0);
+  share->set_ast_node_count(0);
 
   // Set integer fields (smi or int, depending on the architecture).
   share->set_length(0);
@@ -2541,8 +2930,8 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   JSMessageObject* message = JSMessageObject::cast(result);
-  message->set_properties(Heap::empty_fixed_array());
-  message->set_elements(Heap::empty_fixed_array());
+  message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
+  message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
   message->set_type(type);
   message->set_arguments(arguments);
   message->set_start_position(start_position);
@@ -2633,8 +3022,8 @@
   bool is_ascii_data_in_two_byte_string = false;
   if (!is_ascii) {
     // At least one of the strings uses two-byte representation so we
-    // can't use the fast case code for short ascii strings below, but
-    // we can try to save memory if all chars actually fit in ascii.
+    // can't use the fast case code for short ASCII strings below, but
+    // we can try to save memory if all chars actually fit in ASCII.
     is_ascii_data_in_two_byte_string =
         first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
     if (is_ascii_data_in_two_byte_string) {
@@ -2643,9 +3032,9 @@
   }
 
   // If the resulting string is small make a flat string.
-  if (length < String::kMinNonFlatLength) {
+  if (length < ConsString::kMinLength) {
     // Note that neither of the two inputs can be a slice because:
-    STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
+    STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
     ASSERT(first->IsFlat());
     ASSERT(second->IsFlat());
     if (is_ascii) {
@@ -2658,14 +3047,14 @@
       // Copy first part.
       const char* src;
       if (first->IsExternalString()) {
-        src = ExternalAsciiString::cast(first)->resource()->data();
+        src = ExternalAsciiString::cast(first)->GetChars();
       } else {
         src = SeqAsciiString::cast(first)->GetChars();
       }
       for (int i = 0; i < first_length; i++) *dest++ = src[i];
       // Copy second part.
       if (second->IsExternalString()) {
-        src = ExternalAsciiString::cast(second)->resource()->data();
+        src = ExternalAsciiString::cast(second)->GetChars();
       } else {
         src = SeqAsciiString::cast(second)->GetChars();
       }
@@ -2721,7 +3110,7 @@
                                      int end,
                                      PretenureFlag pretenure) {
   int length = end - start;
-  if (length == 0) {
+  if (length <= 0) {
     return empty_string();
   } else if (length == 1) {
     return LookupSingleCharacterStringFromCode(buffer->Get(start));
@@ -2737,25 +3126,23 @@
   // Make an attempt to flatten the buffer to reduce access time.
   buffer = buffer->TryFlattenGetString();
 
-  // TODO(1626): For now slicing external strings is not supported.  However,
-  // a flat cons string can have an external string as first part in some cases.
-  // Therefore we have to single out this case as well.
   if (!FLAG_string_slices ||
-      (buffer->IsConsString() &&
-        (!buffer->IsFlat() ||
-         !ConsString::cast(buffer)->first()->IsSeqString())) ||
-      buffer->IsExternalString() ||
+      !buffer->IsFlat() ||
       length < SlicedString::kMinLength ||
       pretenure == TENURED) {
     Object* result;
-    { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
-                     ? AllocateRawAsciiString(length, pretenure)
-                     : AllocateRawTwoByteString(length, pretenure);
+    // WriteToFlat takes care of the case when an indirect string has a
+    // different encoding from its underlying string.  These encodings may
+    // differ because of externalization.
+    bool is_ascii = buffer->IsAsciiRepresentation();
+    { MaybeObject* maybe_result = is_ascii
+                                  ? AllocateRawAsciiString(length, pretenure)
+                                  : AllocateRawTwoByteString(length, pretenure);
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     String* string_result = String::cast(result);
     // Copy the characters into the new object.
-    if (buffer->IsAsciiRepresentation()) {
+    if (is_ascii) {
       ASSERT(string_result->IsAsciiRepresentation());
       char* dest = SeqAsciiString::cast(string_result)->GetChars();
       String::WriteToFlat(buffer, dest, start, end);
@@ -2768,12 +3155,19 @@
   }
 
   ASSERT(buffer->IsFlat());
-  ASSERT(!buffer->IsExternalString());
 #if DEBUG
-  buffer->StringVerify();
+  if (FLAG_verify_heap) {
+    buffer->StringVerify();
+  }
 #endif
 
   Object* result;
+  // When slicing an indirect string we use its encoding for a newly created
+  // slice and don't check the encoding of the underlying string.  This is safe
+  // even if the encodings are different because of externalization.  If an
+  // indirect ASCII string is pointing to a two-byte string, the two-byte char
+  // codes of the underlying string must still fit into ASCII (because
+  // externalization must not change char codes).
   { Map* map = buffer->IsAsciiRepresentation()
                  ? sliced_ascii_string_map()
                  : sliced_string_map();
@@ -2799,13 +3193,14 @@
     sliced_string->set_parent(buffer);
     sliced_string->set_offset(start);
   }
-  ASSERT(sliced_string->parent()->IsSeqString());
+  ASSERT(sliced_string->parent()->IsSeqString() ||
+         sliced_string->parent()->IsExternalString());
   return result;
 }
 
 
 MaybeObject* Heap::AllocateExternalStringFromAscii(
-    ExternalAsciiString::Resource* resource) {
+    const ExternalAsciiString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
     isolate()->context()->mark_out_of_memory();
@@ -2828,7 +3223,7 @@
 
 
 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
-    ExternalTwoByteString::Resource* resource) {
+    const ExternalTwoByteString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
     isolate()->context()->mark_out_of_memory();
@@ -2890,13 +3285,14 @@
   }
   int size = ByteArray::SizeFor(length);
   Object* result;
-  { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
+  { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
                    ? old_data_space_->AllocateRaw(size)
-                   : lo_space_->AllocateRaw(size);
+                   : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
+  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
+      byte_array_map());
   reinterpret_cast<ByteArray*>(result)->set_length(length);
   return result;
 }
@@ -2908,13 +3304,14 @@
   }
   int size = ByteArray::SizeFor(length);
   AllocationSpace space =
-      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
+      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
   Object* result;
   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
+  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
+      byte_array_map());
   reinterpret_cast<ByteArray*>(result)->set_length(length);
   return result;
 }
@@ -2924,12 +3321,12 @@
   if (size == 0) return;
   HeapObject* filler = HeapObject::FromAddress(addr);
   if (size == kPointerSize) {
-    filler->set_map(one_pointer_filler_map());
+    filler->set_map_no_write_barrier(one_pointer_filler_map());
   } else if (size == 2 * kPointerSize) {
-    filler->set_map(two_pointer_filler_map());
+    filler->set_map_no_write_barrier(two_pointer_filler_map());
   } else {
-    filler->set_map(byte_array_map());
-    ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
+    filler->set_map_no_write_barrier(free_space_map());
+    FreeSpace::cast(filler)->set_size(size);
   }
 }
 
@@ -2946,7 +3343,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ExternalArray*>(result)->set_map(
+  reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
       MapForExternalArrayType(array_type));
   reinterpret_cast<ExternalArray*>(result)->set_length(length);
   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
@@ -2962,10 +3359,9 @@
                               bool immovable) {
   // Allocate ByteArray before the Code object, so that we do not risk
   // leaving uninitialized Code object (and breaking the heap).
-  Object* reloc_info;
-  { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
-    if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
-  }
+  ByteArray* reloc_info;
+  MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
+  if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
 
   // Compute size.
   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
@@ -2974,8 +3370,8 @@
   MaybeObject* maybe_result;
   // Large code objects and code objects which should stay at a fixed address
   // are allocated in large object space.
-  if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
-    maybe_result = lo_space_->AllocateRawCode(obj_size);
+  if (obj_size > code_space()->AreaSize() || immovable) {
+    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -2984,18 +3380,21 @@
   if (!maybe_result->ToObject(&result)) return maybe_result;
 
   // Initialize the object
-  HeapObject::cast(result)->set_map(code_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(code_map());
   Code* code = Code::cast(result);
   ASSERT(!isolate_->code_range()->exists() ||
       isolate_->code_range()->contains(code->address()));
   code->set_instruction_size(desc.instr_size);
-  code->set_relocation_info(ByteArray::cast(reloc_info));
+  code->set_relocation_info(reloc_info);
   code->set_flags(flags);
   if (code->is_call_stub() || code->is_keyed_call_stub()) {
     code->set_check_type(RECEIVER_MAP_CHECK);
   }
-  code->set_deoptimization_data(empty_fixed_array());
-  code->set_next_code_flushing_candidate(undefined_value());
+  code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
+  code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  code->set_gc_metadata(Smi::FromInt(0));
+  code->set_ic_age(global_ic_age_);
   // Allow self references to created code object by patching the handle to
   // point to the newly allocated Code object.
   if (!self_reference.is_null()) {
@@ -3009,7 +3408,9 @@
   code->CopyFrom(desc);
 
 #ifdef DEBUG
-  code->Verify();
+  if (FLAG_verify_heap) {
+    code->Verify();
+  }
 #endif
   return code;
 }
@@ -3019,8 +3420,8 @@
   // Allocate an object the same size as the code object.
   int obj_size = code->Size();
   MaybeObject* maybe_result;
-  if (obj_size > MaxObjectSizeInPagedSpace()) {
-    maybe_result = lo_space_->AllocateRawCode(obj_size);
+  if (obj_size > code_space()->AreaSize()) {
+    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -3062,8 +3463,8 @@
       static_cast<size_t>(code->instruction_end() - old_addr);
 
   MaybeObject* maybe_result;
-  if (new_obj_size > MaxObjectSizeInPagedSpace()) {
-    maybe_result = lo_space_->AllocateRawCode(new_obj_size);
+  if (new_obj_size > code_space()->AreaSize()) {
+    maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(new_obj_size);
   }
@@ -3089,7 +3490,9 @@
   new_code->Relocate(new_addr - old_addr);
 
 #ifdef DEBUG
-  code->Verify();
+  if (FLAG_verify_heap) {
+    code->Verify();
+  }
 #endif
   return new_code;
 }
@@ -3107,14 +3510,15 @@
         AllocateRaw(map->instance_size(), space, retry_space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  HeapObject::cast(result)->set_map(map);
+  // No need for write barrier since object is white and map is in old space.
+  HeapObject::cast(result)->set_map_no_write_barrier(map);
   return result;
 }
 
 
-MaybeObject* Heap::InitializeFunction(JSFunction* function,
-                                      SharedFunctionInfo* shared,
-                                      Object* prototype) {
+void Heap::InitializeFunction(JSFunction* function,
+                              SharedFunctionInfo* shared,
+                              Object* prototype) {
   ASSERT(!prototype->IsMap());
   function->initialize_properties();
   function->initialize_elements();
@@ -3122,9 +3526,8 @@
   function->set_code(shared->code());
   function->set_prototype_or_initial_map(prototype);
   function->set_context(undefined_value());
-  function->set_literals(empty_fixed_array());
+  function->set_literals_or_bindings(empty_fixed_array());
   function->set_next_function_link(undefined_value());
-  return function;
 }
 
 
@@ -3134,8 +3537,18 @@
   // different context.
   JSFunction* object_function =
       function->context()->global_context()->object_function();
+
+  // Each function prototype gets a copy of the object function map.
+  // This avoid unwanted sharing of maps between prototypes of different
+  // constructors.
+  Map* new_map;
+  ASSERT(object_function->has_initial_map());
+  { MaybeObject* maybe_map =
+        object_function->initial_map()->CopyDropTransitions();
+    if (!maybe_map->To<Map>(&new_map)) return maybe_map;
+  }
   Object* prototype;
-  { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
+  { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
     if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
   }
   // When creating the prototype for the function we must set its
@@ -3160,7 +3573,8 @@
   { MaybeObject* maybe_result = Allocate(function_map, space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  return InitializeFunction(JSFunction::cast(result), shared, prototype);
+  InitializeFunction(JSFunction::cast(result), shared, prototype);
+  return result;
 }
 
 
@@ -3171,7 +3585,7 @@
   JSObject* boilerplate;
   int arguments_object_size;
   bool strict_mode_callee = callee->IsJSFunction() &&
-                            JSFunction::cast(callee)->shared()->strict_mode();
+      !JSFunction::cast(callee)->shared()->is_classic_mode();
   if (strict_mode_callee) {
     boilerplate =
         isolate()->context()->global_context()->
@@ -3277,22 +3691,22 @@
       // Inline constructor can only handle inobject properties.
       fun->shared()->ForbidInlineConstructor();
     } else {
-      Object* descriptors_obj;
+      DescriptorArray* descriptors;
       { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
-        if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
+        if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
           return maybe_descriptors_obj;
         }
       }
-      DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
+      DescriptorArray::WhitenessWitness witness(descriptors);
       for (int i = 0; i < count; i++) {
         String* name = fun->shared()->GetThisPropertyAssignmentName(i);
         ASSERT(name->IsSymbol());
         FieldDescriptor field(name, i, NONE);
         field.SetEnumerationIndex(i);
-        descriptors->Set(i, &field);
+        descriptors->Set(i, &field, witness);
       }
       descriptors->SetNextEnumerationIndex(count);
-      descriptors->SortUnchecked();
+      descriptors->SortUnchecked(witness);
 
       // The descriptors may contain duplicates because the compiler does not
       // guarantee the uniqueness of property names (it would have required
@@ -3322,14 +3736,17 @@
   // TODO(1240798): Initialize the object's body using valid initial values
   // according to the object's initial map.  For example, if the map's
   // instance type is JS_ARRAY_TYPE, the length field should be initialized
-  // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
-  // fixed array (eg, Heap::empty_fixed_array()).  Currently, the object
+  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
+  // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
   // verification code has to cope with (temporarily) invalid objects.  See
   // for example, JSArray::JSArrayVerify).
   Object* filler;
   // We cannot always fill with one_pointer_filler_map because objects
   // created from API functions expect their internal fields to be initialized
   // with undefined_value.
+  // Pre-allocated fields need to be initialized with undefined_value as well
+  // so that object accesses before the constructor completes (e.g. in the
+  // debugger) will not cause a crash.
   if (map->constructor()->IsJSFunction() &&
       JSFunction::cast(map->constructor())->shared()->
           IsInobjectSlackTrackingInProgress()) {
@@ -3339,7 +3756,7 @@
   } else {
     filler = Heap::undefined_value();
   }
-  obj->InitializeBody(map->instance_size(), filler);
+  obj->InitializeBody(map, Heap::undefined_value(), filler);
 }
 
 
@@ -3367,7 +3784,7 @@
   // Allocate the JSObject.
   AllocationSpace space =
       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
-  if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+  if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
   Object* obj;
   { MaybeObject* maybe_obj = Allocate(map, space);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -3377,7 +3794,8 @@
   InitializeJSObjectFromMap(JSObject::cast(obj),
                             FixedArray::cast(properties),
                             map);
-  ASSERT(JSObject::cast(obj)->HasFastElements());
+  ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
+         JSObject::cast(obj)->HasFastElements());
   return obj;
 }
 
@@ -3394,8 +3812,8 @@
     Map::cast(initial_map)->set_constructor(constructor);
   }
   // Allocate the object based on the constructors initial map.
-  MaybeObject* result =
-      AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
+  MaybeObject* result = AllocateJSObjectFromMap(
+      constructor->initial_map(), pretenure);
 #ifdef DEBUG
   // Make sure result is NOT a global object if valid.
   Object* non_failure;
@@ -3405,6 +3823,64 @@
 }
 
 
+MaybeObject* Heap::AllocateJSArrayAndStorage(
+    ElementsKind elements_kind,
+    int length,
+    int capacity,
+    ArrayStorageAllocationMode mode,
+    PretenureFlag pretenure) {
+  ASSERT(capacity >= length);
+  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
+  JSArray* array;
+  if (!maybe_array->To(&array)) return maybe_array;
+
+  if (capacity == 0) {
+    array->set_length(Smi::FromInt(0));
+    array->set_elements(empty_fixed_array());
+    return array;
+  }
+
+  FixedArrayBase* elms;
+  MaybeObject* maybe_elms = NULL;
+  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+      maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
+    } else {
+      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+      maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
+    }
+  } else {
+    ASSERT(elements_kind == FAST_ELEMENTS ||
+           elements_kind == FAST_SMI_ONLY_ELEMENTS);
+    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+      maybe_elms = AllocateUninitializedFixedArray(capacity);
+    } else {
+      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+      maybe_elms = AllocateFixedArrayWithHoles(capacity);
+    }
+  }
+  if (!maybe_elms->To(&elms)) return maybe_elms;
+
+  array->set_elements(elms);
+  array->set_length(Smi::FromInt(length));
+  return array;
+}
+
+
+MaybeObject* Heap::AllocateJSArrayWithElements(
+    FixedArrayBase* elements,
+    ElementsKind elements_kind,
+    PretenureFlag pretenure) {
+  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
+  JSArray* array;
+  if (!maybe_array->To(&array)) return maybe_array;
+
+  array->set_elements(elements);
+  array->set_length(Smi::FromInt(elements->length()));
+  return array;
+}
+
+
 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
   // Allocate map.
   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
@@ -3420,6 +3896,7 @@
   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   result->set_handler(handler);
+  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
   return result;
 }
 
@@ -3443,6 +3920,7 @@
   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   result->set_handler(handler);
+  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
   result->set_call_trap(call_trap);
   result->set_construct_trap(construct_trap);
   return result;
@@ -3510,7 +3988,7 @@
   }
   Map* new_map = Map::cast(obj);
 
-  // Setup the global object as a normalized object.
+  // Set up the global object as a normalized object.
   global->set_map(new_map);
   global->map()->clear_instance_descriptors();
   global->set_properties(dictionary);
@@ -3525,13 +4003,15 @@
 MaybeObject* Heap::CopyJSObject(JSObject* source) {
   // Never used to copy functions.  If functions need to be copied we
   // have to be careful to clear the literals array.
-  ASSERT(!source->IsJSFunction());
+  SLOW_ASSERT(!source->IsJSFunction());
 
   // Make the clone.
   Map* map = source->map();
   int object_size = map->instance_size();
   Object* clone;
 
+  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
+
   // If we're forced to always allocate, we use the general allocation
   // functions which may leave us with an object in old space.
   if (always_allocate()) {
@@ -3548,10 +4028,11 @@
                  JSObject::kHeaderSize,
                  (object_size - JSObject::kHeaderSize) / kPointerSize);
   } else {
+    wb_mode = SKIP_WRITE_BARRIER;
     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
     }
-    ASSERT(InNewSpace(clone));
+    SLOW_ASSERT(InNewSpace(clone));
     // Since we know the clone is allocated in new space, we can copy
     // the contents without worrying about updating the write barrier.
     CopyBlock(HeapObject::cast(clone)->address(),
@@ -3559,6 +4040,8 @@
               object_size);
   }
 
+  SLOW_ASSERT(
+      JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
   FixedArray* properties = FixedArray::cast(source->properties());
   // Update elements if necessary.
@@ -3574,7 +4057,7 @@
       }
       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
     }
-    JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem));
+    JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
   }
   // Update properties if necessary.
   if (properties->length() > 0) {
@@ -3582,7 +4065,7 @@
     { MaybeObject* maybe_prop = CopyFixedArray(properties);
       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
     }
-    JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
+    JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
   }
   // Return the new clone.
   return clone;
@@ -3591,13 +4074,13 @@
 
 MaybeObject* Heap::ReinitializeJSReceiver(
     JSReceiver* object, InstanceType type, int size) {
-  ASSERT(type >= FIRST_JS_RECEIVER_TYPE);
+  ASSERT(type >= FIRST_JS_OBJECT_TYPE);
 
   // Allocate fresh map.
   // TODO(rossberg): Once we optimize proxies, cache these maps.
   Map* map;
-  MaybeObject* maybe_map_obj = AllocateMap(type, size);
-  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+  MaybeObject* maybe = AllocateMap(type, size);
+  if (!maybe->To<Map>(&map)) return maybe;
 
   // Check that the receiver has at least the size of the fresh object.
   int size_difference = object->map()->instance_size() - map->instance_size();
@@ -3608,30 +4091,35 @@
   // Allocate the backing storage for the properties.
   int prop_size = map->unused_property_fields() - map->inobject_properties();
   Object* properties;
-  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
-    if (!maybe_properties->ToObject(&properties)) return maybe_properties;
+  maybe = AllocateFixedArray(prop_size, TENURED);
+  if (!maybe->ToObject(&properties)) return maybe;
+
+  // Functions require some allocation, which might fail here.
+  SharedFunctionInfo* shared = NULL;
+  if (type == JS_FUNCTION_TYPE) {
+    String* name;
+    maybe = LookupAsciiSymbol("<freezing call trap>");
+    if (!maybe->To<String>(&name)) return maybe;
+    maybe = AllocateSharedFunctionInfo(name);
+    if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
   }
 
+  // Because of possible retries of this function after failure,
+  // we must NOT fail after this point, where we have changed the type!
+
   // Reset the map for the object.
   object->set_map(map);
+  JSObject* jsobj = JSObject::cast(object);
 
   // Reinitialize the object from the constructor map.
-  InitializeJSObjectFromMap(JSObject::cast(object),
-                            FixedArray::cast(properties), map);
+  InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
 
   // Functions require some minimal initialization.
   if (type == JS_FUNCTION_TYPE) {
-    String* name;
-    MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>");
-    if (!maybe_name->To<String>(&name)) return maybe_name;
-    SharedFunctionInfo* shared;
-    MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
-    if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
-    JSFunction* func;
-    MaybeObject* maybe_func =
-        InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
-    if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
-    func->set_context(isolate()->context()->global_context());
+    map->set_function_with_prototype(true);
+    InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
+    JSFunction::cast(object)->set_context(
+        isolate()->context()->global_context());
   }
 
   // Put in filler if the new object is smaller than the old.
@@ -3692,8 +4180,6 @@
 
 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
                                               PretenureFlag pretenure) {
-  // V8 only supports characters in the Basic Multilingual Plane.
-  const uc32 kMaxSupportedChar = 0xFFFF;
   // Count the number of characters in the UTF-8 string and check if
   // it is an ASCII string.
   Access<UnicodeCache::Utf8Decoder>
@@ -3701,8 +4187,12 @@
   decoder->Reset(string.start(), string.length());
   int chars = 0;
   while (decoder->has_more()) {
-    decoder->GetNext();
-    chars++;
+    uint32_t r = decoder->GetNext();
+    if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      chars++;
+    } else {
+      chars += 2;
+    }
   }
 
   Object* result;
@@ -3713,10 +4203,15 @@
   // Convert and copy the characters into the new object.
   String* string_result = String::cast(result);
   decoder->Reset(string.start(), string.length());
-  for (int i = 0; i < chars; i++) {
-    uc32 r = decoder->GetNext();
-    if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
-    string_result->Set(i, r);
+  int i = 0;
+  while (i < chars) {
+    uint32_t r = decoder->GetNext();
+    if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      string_result->Set(i++, unibrow::Utf16::LeadSurrogate(r));
+      string_result->Set(i++, unibrow::Utf16::TrailSurrogate(r));
+    } else {
+      string_result->Set(i++, r);
+    }
   }
   return result;
 }
@@ -3749,31 +4244,22 @@
   if (InNewSpace(string)) return NULL;
 
   // Find the corresponding symbol map for strings.
-  Map* map = string->map();
-  if (map == ascii_string_map()) {
-    return ascii_symbol_map();
+  switch (string->map()->instance_type()) {
+    case STRING_TYPE: return symbol_map();
+    case ASCII_STRING_TYPE: return ascii_symbol_map();
+    case CONS_STRING_TYPE: return cons_symbol_map();
+    case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
+    case EXTERNAL_STRING_TYPE: return external_symbol_map();
+    case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
+    case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
+      return external_symbol_with_ascii_data_map();
+    case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
+    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+      return short_external_ascii_symbol_map();
+    case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
+      return short_external_symbol_with_ascii_data_map();
+    default: return NULL;  // No match found.
   }
-  if (map == string_map()) {
-    return symbol_map();
-  }
-  if (map == cons_string_map()) {
-    return cons_symbol_map();
-  }
-  if (map == cons_ascii_string_map()) {
-    return cons_ascii_symbol_map();
-  }
-  if (map == external_string_map()) {
-    return external_symbol_map();
-  }
-  if (map == external_ascii_string_map()) {
-    return external_ascii_symbol_map();
-  }
-  if (map == external_string_with_ascii_data_map()) {
-    return external_symbol_with_ascii_data_map();
-  }
-
-  // No match found.
-  return NULL;
 }
 
 
@@ -3782,8 +4268,8 @@
                                           uint32_t hash_field) {
   ASSERT(chars >= 0);
   // Ensure the chars matches the number of characters in the buffer.
-  ASSERT(static_cast<unsigned>(chars) == buffer->Length());
-  // Determine whether the string is ascii.
+  ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
+  // Determine whether the string is ASCII.
   bool is_ascii = true;
   while (buffer->has_more()) {
     if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
@@ -3813,13 +4299,13 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
-                   ? lo_space_->AllocateRaw(size)
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
+                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<HeapObject*>(result)->set_map(map);
+  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
   // Set length and hash fields of the allocated string.
   String* answer = String::cast(result);
   answer->set_length(chars);
@@ -3828,8 +4314,15 @@
   ASSERT_EQ(size, answer->Size());
 
   // Fill in the characters.
-  for (int i = 0; i < chars; i++) {
-    answer->Set(i, buffer->GetNext());
+  int i = 0;
+  while (i < chars) {
+    uint32_t character = buffer->GetNext();
+    if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
+      answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
+    } else {
+      answer->Set(i++, character);
+    }
   }
   return answer;
 }
@@ -3850,11 +4343,12 @@
     if (size > kMaxObjectSizeInNewSpace) {
       // Allocate in large object space, retry space will be ignored.
       space = LO_SPACE;
-    } else if (size > MaxObjectSizeInPagedSpace()) {
+    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Allocate in new space, retry in large object space.
       retry_space = LO_SPACE;
     }
-  } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+  } else if (space == OLD_DATA_SPACE &&
+             size > Page::kMaxNonCodeHeapObjectSize) {
     space = LO_SPACE;
   }
   Object* result;
@@ -3863,7 +4357,7 @@
   }
 
   // Partially initialize the object.
-  HeapObject::cast(result)->set_map(ascii_string_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
   String::cast(result)->set_length(length);
   String::cast(result)->set_hash_field(String::kEmptyHashField);
   ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -3885,11 +4379,12 @@
     if (size > kMaxObjectSizeInNewSpace) {
       // Allocate in large object space, retry space will be ignored.
       space = LO_SPACE;
-    } else if (size > MaxObjectSizeInPagedSpace()) {
+    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Allocate in new space, retry in large object space.
       retry_space = LO_SPACE;
     }
-  } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+  } else if (space == OLD_DATA_SPACE &&
+             size > Page::kMaxNonCodeHeapObjectSize) {
     space = LO_SPACE;
   }
   Object* result;
@@ -3898,7 +4393,7 @@
   }
 
   // Partially initialize the object.
-  HeapObject::cast(result)->set_map(string_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(string_map());
   String::cast(result)->set_length(length);
   String::cast(result)->set_hash_field(String::kEmptyHashField);
   ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -3906,6 +4401,25 @@
 }
 
 
+MaybeObject* Heap::AllocateJSArray(
+    ElementsKind elements_kind,
+    PretenureFlag pretenure) {
+  Context* global_context = isolate()->context()->global_context();
+  JSFunction* array_function = global_context->array_function();
+  Map* map = array_function->initial_map();
+  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+    map = Map::cast(global_context->double_js_array_map());
+  } else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
+    map = Map::cast(global_context->object_js_array_map());
+  } else {
+    ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
+    ASSERT(map == global_context->smi_js_array_map());
+  }
+
+  return AllocateJSObjectFromMap(map, pretenure);
+}
+
+
 MaybeObject* Heap::AllocateEmptyFixedArray() {
   int size = FixedArray::SizeFor(0);
   Object* result;
@@ -3914,7 +4428,8 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   // Initialize the object.
-  reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
+  reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
+      fixed_array_map());
   reinterpret_cast<FixedArray*>(result)->set_length(0);
   return result;
 }
@@ -3931,7 +4446,7 @@
   int size = FixedArray::SizeFor(length);
   return size <= kMaxObjectSizeInNewSpace
       ? new_space_.AllocateRaw(size)
-      : lo_space_->AllocateRawFixedArray(size);
+      : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
 }
 
 
@@ -3943,13 +4458,13 @@
   }
   if (InNewSpace(obj)) {
     HeapObject* dst = HeapObject::cast(obj);
-    dst->set_map(map);
+    dst->set_map_no_write_barrier(map);
     CopyBlock(dst->address() + kPointerSize,
               src->address() + kPointerSize,
               FixedArray::SizeFor(len) - kPointerSize);
     return obj;
   }
-  HeapObject::cast(obj)->set_map(map);
+  HeapObject::cast(obj)->set_map_no_write_barrier(map);
   FixedArray* result = FixedArray::cast(obj);
   result->set_length(len);
 
@@ -3969,7 +4484,7 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   HeapObject* dst = HeapObject::cast(obj);
-  dst->set_map(map);
+  dst->set_map_no_write_barrier(map);
   CopyBlock(
       dst->address() + FixedDoubleArray::kLengthOffset,
       src->address() + FixedDoubleArray::kLengthOffset,
@@ -3987,7 +4502,7 @@
   }
   // Initialize header.
   FixedArray* array = reinterpret_cast<FixedArray*>(result);
-  array->set_map(fixed_array_map());
+  array->set_map_no_write_barrier(fixed_array_map());
   array->set_length(length);
   // Initialize body.
   ASSERT(!InNewSpace(undefined_value()));
@@ -4008,13 +4523,13 @@
     // Too big for new space.
     space = LO_SPACE;
   } else if (space == OLD_POINTER_SPACE &&
-             size > MaxObjectSizeInPagedSpace()) {
+             size > Page::kMaxNonCodeHeapObjectSize) {
     // Too big for old pointer space.
     space = LO_SPACE;
   }
 
   AllocationSpace retry_space =
-      (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
+      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
 
   return AllocateRaw(size, space, retry_space);
 }
@@ -4035,7 +4550,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  HeapObject::cast(result)->set_map(heap->fixed_array_map());
+  HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
   FixedArray* array = FixedArray::cast(result);
   array->set_length(length);
   MemsetPointer(array->data_start(), filler, length);
@@ -4068,7 +4583,8 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
-  reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
+  reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
+      fixed_array_map());
   FixedArray::cast(obj)->set_length(length);
   return obj;
 }
@@ -4082,7 +4598,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   // Initialize the object.
-  reinterpret_cast<FixedDoubleArray*>(result)->set_map(
+  reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
       fixed_double_array_map());
   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
   return result;
@@ -4092,16 +4608,38 @@
 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
     int length,
     PretenureFlag pretenure) {
-  if (length == 0) return empty_fixed_double_array();
+  if (length == 0) return empty_fixed_array();
 
-  Object* obj;
-  { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  Object* elements_object;
+  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
+  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
+  FixedDoubleArray* elements =
+      reinterpret_cast<FixedDoubleArray*>(elements_object);
+
+  elements->set_map_no_write_barrier(fixed_double_array_map());
+  elements->set_length(length);
+  return elements;
+}
+
+
+MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
+    int length,
+    PretenureFlag pretenure) {
+  if (length == 0) return empty_fixed_array();
+
+  Object* elements_object;
+  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
+  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
+  FixedDoubleArray* elements =
+      reinterpret_cast<FixedDoubleArray*>(elements_object);
+
+  for (int i = 0; i < length; ++i) {
+    elements->set_the_hole(i);
   }
 
-  reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
-  FixedDoubleArray::cast(obj)->set_length(length);
-  return obj;
+  elements->set_map_no_write_barrier(fixed_double_array_map());
+  elements->set_length(length);
+  return elements;
 }
 
 
@@ -4118,13 +4656,13 @@
     // Too big for new space.
     space = LO_SPACE;
   } else if (space == OLD_DATA_SPACE &&
-             size > MaxObjectSizeInPagedSpace()) {
+             size > Page::kMaxNonCodeHeapObjectSize) {
     // Too big for old data space.
     space = LO_SPACE;
   }
 
   AllocationSpace retry_space =
-      (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
+      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
 
   return AllocateRaw(size, space, retry_space);
 }
@@ -4135,7 +4673,8 @@
   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
+  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
+      hash_table_map());
   ASSERT(result->IsHashTable());
   return result;
 }
@@ -4148,7 +4687,10 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(global_context_map());
+  context->set_map_no_write_barrier(global_context_map());
+  context->set_smi_js_array_map(undefined_value());
+  context->set_double_js_array_map(undefined_value());
+  context->set_object_js_array_map(undefined_value());
   ASSERT(context->IsGlobalContext());
   ASSERT(result->IsContext());
   return result;
@@ -4162,7 +4704,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(function_context_map());
+  context->set_map_no_write_barrier(function_context_map());
   context->set_closure(function);
   context->set_previous(function->context());
   context->set_extension(NULL);
@@ -4182,7 +4724,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(catch_context_map());
+  context->set_map_no_write_barrier(catch_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(name);
@@ -4200,7 +4742,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(with_context_map());
+  context->set_map_no_write_barrier(with_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(extension);
@@ -4211,14 +4753,14 @@
 
 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
                                         Context* previous,
-                                        SerializedScopeInfo* scope_info) {
+                                        ScopeInfo* scope_info) {
   Object* result;
   { MaybeObject* maybe_result =
-        AllocateFixedArrayWithHoles(scope_info->NumberOfContextSlots());
+        AllocateFixedArrayWithHoles(scope_info->ContextLength());
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(block_context_map());
+  context->set_map_no_write_barrier(block_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(scope_info);
@@ -4227,14 +4769,11 @@
 }
 
 
-MaybeObject* Heap::AllocateSerializedScopeInfo(int length) {
-  Object* result;
-  { MaybeObject* maybe_result = AllocateFixedArray(length, TENURED);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  SerializedScopeInfo* scope_info =
-      reinterpret_cast<SerializedScopeInfo*>(result);
-  scope_info->set_map(serialized_scope_info_map());
+MaybeObject* Heap::AllocateScopeInfo(int length) {
+  FixedArray* scope_info;
+  MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
+  if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
+  scope_info->set_map_no_write_barrier(scope_info_map());
   return scope_info;
 }
 
@@ -4252,7 +4791,7 @@
   }
   int size = map->instance_size();
   AllocationSpace space =
-      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
+      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
   Object* result;
   { MaybeObject* maybe_result = Allocate(map, space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4262,7 +4801,127 @@
 }
 
 
-bool Heap::IdleNotification() {
+bool Heap::IsHeapIterable() {
+  return (!old_pointer_space()->was_swept_conservatively() &&
+          !old_data_space()->was_swept_conservatively());
+}
+
+
+void Heap::EnsureHeapIsIterable() {
+  ASSERT(IsAllocationAllowed());
+  if (!IsHeapIterable()) {
+    CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
+  }
+  ASSERT(IsHeapIterable());
+}
+
+
+void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
+  // This flag prevents incremental marking from requesting GC via stack guard
+  idle_notification_will_schedule_next_gc_ = true;
+  incremental_marking()->Step(step_size);
+  idle_notification_will_schedule_next_gc_ = false;
+
+  if (incremental_marking()->IsComplete()) {
+    bool uncommit = false;
+    if (gc_count_at_last_idle_gc_ == gc_count_) {
+      // No GC since the last full GC, the mutator is probably not active.
+      isolate_->compilation_cache()->Clear();
+      uncommit = true;
+    }
+    CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
+    gc_count_at_last_idle_gc_ = gc_count_;
+    if (uncommit) {
+      new_space_.Shrink();
+      UncommitFromSpace();
+    }
+  }
+}
+
+
+bool Heap::IdleNotification(int hint) {
+  const int kMaxHint = 1000;
+  intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10;
+  // The size factor is in range [3..100].
+  intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
+
+  if (contexts_disposed_ > 0) {
+    if (hint >= kMaxHint) {
+      // The embedder is requesting a lot of GC work after context disposal,
+      // we age inline caches so that they don't keep objects from
+      // the old context alive.
+      AgeInlineCaches();
+    }
+    int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
+    if (hint >= mark_sweep_time && !FLAG_expose_gc &&
+        incremental_marking()->IsStopped()) {
+      HistogramTimerScope scope(isolate_->counters()->gc_context());
+      CollectAllGarbage(kReduceMemoryFootprintMask,
+                        "idle notification: contexts disposed");
+    } else {
+      AdvanceIdleIncrementalMarking(step_size);
+      contexts_disposed_ = 0;
+    }
+    // Make sure that we have no pending context disposals.
+    // Take into account that we might have decided to delay full collection
+    // because incremental marking is in progress.
+    ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
+    return false;
+  }
+
+  if (hint >= kMaxHint || !FLAG_incremental_marking ||
+      FLAG_expose_gc || Serializer::enabled()) {
+    return IdleGlobalGC();
+  }
+
+  // By doing small chunks of GC work in each IdleNotification,
+  // perform a round of incremental GCs and after that wait until
+  // the mutator creates enough garbage to justify a new round.
+  // An incremental GC progresses as follows:
+  // 1. many incremental marking steps,
+  // 2. one old space mark-sweep-compact,
+  // 3. many lazy sweep steps.
+  // Use mark-sweep-compact events to count incremental GCs in a round.
+
+
+  if (incremental_marking()->IsStopped()) {
+    if (!IsSweepingComplete() &&
+        !AdvanceSweepers(static_cast<int>(step_size))) {
+      return false;
+    }
+  }
+
+  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+    if (EnoughGarbageSinceLastIdleRound()) {
+      StartIdleRound();
+    } else {
+      return true;
+    }
+  }
+
+  int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
+  mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
+  ms_count_at_last_idle_notification_ = ms_count_;
+
+  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+    FinishIdleRound();
+    return true;
+  }
+
+  if (incremental_marking()->IsStopped()) {
+    if (!WorthStartingGCWhenIdle()) {
+      FinishIdleRound();
+      return true;
+    }
+    incremental_marking()->Start();
+  }
+
+  AdvanceIdleIncrementalMarking(step_size);
+  return false;
+}
+
+
+bool Heap::IdleGlobalGC() {
   static const int kIdlesBeforeScavenge = 4;
   static const int kIdlesBeforeMarkSweep = 7;
   static const int kIdlesBeforeMarkCompact = 8;
@@ -4290,12 +4949,7 @@
   }
 
   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
-    if (contexts_disposed_ > 0) {
-      HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(false);
-    } else {
-      CollectGarbage(NEW_SPACE);
-    }
+    CollectGarbage(NEW_SPACE, "idle notification");
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
@@ -4304,32 +4958,16 @@
     // generated code for cached functions.
     isolate_->compilation_cache()->Clear();
 
-    CollectAllGarbage(false);
+    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
 
   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
-    CollectAllGarbage(true);
+    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
     number_idle_notifications_ = 0;
     finished = true;
-  } else if (contexts_disposed_ > 0) {
-    if (FLAG_expose_gc) {
-      contexts_disposed_ = 0;
-    } else {
-      HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(false);
-      last_idle_notification_gc_count_ = gc_count_;
-    }
-    // If this is the first idle notification, we reset the
-    // notification count to avoid letting idle notifications for
-    // context disposal garbage collections start a potentially too
-    // aggressive idle GC cycle.
-    if (number_idle_notifications_ <= 1) {
-      number_idle_notifications_ = 0;
-      uncommit = false;
-    }
   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
     // If we have received more than kIdlesBeforeMarkCompact idle
     // notifications we do not perform any cleanup because we don't
@@ -4337,10 +4975,8 @@
     finished = true;
   }
 
-  // Make sure that we have no pending context disposals and
-  // conditionally uncommit from space.
-  ASSERT(contexts_disposed_ == 0);
   if (uncommit) UncommitFromSpace();
+
   return finished;
 }
 
@@ -4348,7 +4984,7 @@
 #ifdef DEBUG
 
 void Heap::Print() {
-  if (!HasBeenSetup()) return;
+  if (!HasBeenSetUp()) return;
   isolate()->PrintStack();
   AllSpaces spaces;
   for (Space* space = spaces.next(); space != NULL; space = spaces.next())
@@ -4374,11 +5010,11 @@
   USE(title);
   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
          title, gc_count_);
-  PrintF("mark-compact GC : %d\n", mc_count_);
   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
          old_gen_promotion_limit_);
   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
          old_gen_allocation_limit_);
+  PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
 
   PrintF("\n");
   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@@ -4413,7 +5049,7 @@
 
 bool Heap::Contains(Address addr) {
   if (OS::IsOutsideAllocatedSpace(addr)) return false;
-  return HasBeenSetup() &&
+  return HasBeenSetUp() &&
     (new_space_.ToSpaceContains(addr) ||
      old_pointer_space_->Contains(addr) ||
      old_data_space_->Contains(addr) ||
@@ -4431,7 +5067,7 @@
 
 bool Heap::InSpace(Address addr, AllocationSpace space) {
   if (OS::IsOutsideAllocatedSpace(addr)) return false;
-  if (!HasBeenSetup()) return false;
+  if (!HasBeenSetUp()) return false;
 
   switch (space) {
     case NEW_SPACE:
@@ -4455,69 +5091,18 @@
 
 
 #ifdef DEBUG
-static void DummyScavengePointer(HeapObject** p) {
-}
-
-
-static void VerifyPointersUnderWatermark(
-    PagedSpace* space,
-    DirtyRegionCallback visit_dirty_region) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-
-  while (it.has_next()) {
-    Page* page = it.next();
-    Address start = page->ObjectAreaStart();
-    Address end = page->AllocationWatermark();
-
-    HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
-                              start,
-                              end,
-                              visit_dirty_region,
-                              &DummyScavengePointer);
-  }
-}
-
-
-static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
-  LargeObjectIterator it(space);
-  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
-    if (object->IsFixedArray()) {
-      Address slot_address = object->address();
-      Address end = object->address() + object->Size();
-
-      while (slot_address < end) {
-        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
-        // When we are not in GC the Heap::InNewSpace() predicate
-        // checks that pointers which satisfy predicate point into
-        // the active semispace.
-        HEAP->InNewSpace(*slot);
-        slot_address += kPointerSize;
-      }
-    }
-  }
-}
-
-
 void Heap::Verify() {
-  ASSERT(HasBeenSetup());
+  ASSERT(HasBeenSetUp());
+
+  store_buffer()->Verify();
 
   VerifyPointersVisitor visitor;
   IterateRoots(&visitor, VISIT_ONLY_STRONG);
 
   new_space_.Verify();
 
-  VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
-  old_pointer_space_->Verify(&dirty_regions_visitor);
-  map_space_->Verify(&dirty_regions_visitor);
-
-  VerifyPointersUnderWatermark(old_pointer_space_,
-                               &IteratePointersInDirtyRegion);
-  VerifyPointersUnderWatermark(map_space_,
-                               &IteratePointersInDirtyMapsRegion);
-  VerifyPointersUnderWatermark(lo_space_);
-
-  VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
-  VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
+  old_pointer_space_->Verify(&visitor);
+  map_space_->Verify(&visitor);
 
   VerifyPointersVisitor no_dirty_regions_visitor;
   old_data_space_->Verify(&no_dirty_regions_visitor);
@@ -4525,6 +5110,36 @@
   cell_space_->Verify(&no_dirty_regions_visitor);
 
   lo_space_->Verify();
+
+  VerifyNoAccessorPairSharing();
+}
+
+
+void Heap::VerifyNoAccessorPairSharing() {
+  // Verification is done in 2 phases: First we mark all AccessorPairs, checking
+  // that we mark only unmarked pairs, then we clear all marks, restoring the
+  // initial state. We use the Smi tag of the AccessorPair's getter as the
+  // marking bit, because we can never see a Smi as the getter.
+  for (int phase = 0; phase < 2; phase++) {
+    HeapObjectIterator iter(map_space());
+    for (HeapObject* obj = iter.Next(); obj != NULL; obj = iter.Next()) {
+      if (obj->IsMap()) {
+        DescriptorArray* descs = Map::cast(obj)->instance_descriptors();
+        for (int i = 0; i < descs->number_of_descriptors(); i++) {
+          if (descs->GetType(i) == CALLBACKS &&
+              descs->GetValue(i)->IsAccessorPair()) {
+            AccessorPair* accessors = AccessorPair::cast(descs->GetValue(i));
+            uintptr_t before = reinterpret_cast<intptr_t>(accessors->getter());
+            uintptr_t after = (phase == 0) ?
+                ((before & ~kSmiTagMask) | kSmiTag) :
+                ((before & ~kHeapObjectTag) | kHeapObjectTag);
+            CHECK(before != after);
+            accessors->set_getter(reinterpret_cast<Object*>(after));
+          }
+        }
+      }
+    }
+  }
 }
 #endif  // DEBUG
 
@@ -4621,277 +5236,223 @@
 
 #ifdef DEBUG
 void Heap::ZapFromSpace() {
-  ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
-  for (Address a = new_space_.FromSpaceLow();
-       a < new_space_.FromSpaceHigh();
-       a += kPointerSize) {
-    Memory::Address_at(a) = kFromSpaceZapValue;
+  NewSpacePageIterator it(new_space_.FromSpaceStart(),
+                          new_space_.FromSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    for (Address cursor = page->area_start(), limit = page->area_end();
+         cursor < limit;
+         cursor += kPointerSize) {
+      Memory::Address_at(cursor) = kFromSpaceZapValue;
+    }
   }
 }
 #endif  // DEBUG
 
 
-bool Heap::IteratePointersInDirtyRegion(Heap* heap,
-                                        Address start,
-                                        Address end,
-                                        ObjectSlotCallback copy_object_func) {
-  Address slot_address = start;
-  bool pointers_to_new_space_found = false;
-
-  while (slot_address < end) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (heap->InNewSpace(*slot)) {
-      ASSERT((*slot)->IsHeapObject());
-      copy_object_func(reinterpret_cast<HeapObject**>(slot));
-      if (heap->InNewSpace(*slot)) {
-        ASSERT((*slot)->IsHeapObject());
-        pointers_to_new_space_found = true;
-      }
-    }
-    slot_address += kPointerSize;
-  }
-  return pointers_to_new_space_found;
-}
-
-
-// Compute start address of the first map following given addr.
-static inline Address MapStartAlign(Address addr) {
-  Address page = Page::FromAddress(addr)->ObjectAreaStart();
-  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
-}
-
-
-// Compute end address of the first map preceding given addr.
-static inline Address MapEndAlign(Address addr) {
-  Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
-  return page + ((addr - page) / Map::kSize * Map::kSize);
-}
-
-
-static bool IteratePointersInDirtyMaps(Address start,
-                                       Address end,
-                                       ObjectSlotCallback copy_object_func) {
-  ASSERT(MapStartAlign(start) == start);
-  ASSERT(MapEndAlign(end) == end);
-
-  Address map_address = start;
-  bool pointers_to_new_space_found = false;
-
-  Heap* heap = HEAP;
-  while (map_address < end) {
-    ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
-    ASSERT(Memory::Object_at(map_address)->IsMap());
-
-    Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
-    Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
-
-    if (Heap::IteratePointersInDirtyRegion(heap,
-                                           pointer_fields_start,
-                                           pointer_fields_end,
-                                           copy_object_func)) {
-      pointers_to_new_space_found = true;
-    }
-
-    map_address += Map::kSize;
-  }
-
-  return pointers_to_new_space_found;
-}
-
-
-bool Heap::IteratePointersInDirtyMapsRegion(
-    Heap* heap,
-    Address start,
-    Address end,
-    ObjectSlotCallback copy_object_func) {
-  Address map_aligned_start = MapStartAlign(start);
-  Address map_aligned_end   = MapEndAlign(end);
-
-  bool contains_pointers_to_new_space = false;
-
-  if (map_aligned_start != start) {
-    Address prev_map = map_aligned_start - Map::kSize;
-    ASSERT(Memory::Object_at(prev_map)->IsMap());
-
-    Address pointer_fields_start =
-        Max(start, prev_map + Map::kPointerFieldsBeginOffset);
-
-    Address pointer_fields_end =
-        Min(prev_map + Map::kPointerFieldsEndOffset, end);
-
-    contains_pointers_to_new_space =
-      IteratePointersInDirtyRegion(heap,
-                                   pointer_fields_start,
-                                   pointer_fields_end,
-                                   copy_object_func)
-        || contains_pointers_to_new_space;
-  }
-
-  contains_pointers_to_new_space =
-    IteratePointersInDirtyMaps(map_aligned_start,
-                               map_aligned_end,
-                               copy_object_func)
-      || contains_pointers_to_new_space;
-
-  if (map_aligned_end != end) {
-    ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
-
-    Address pointer_fields_start =
-        map_aligned_end + Map::kPointerFieldsBeginOffset;
-
-    Address pointer_fields_end =
-        Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
-
-    contains_pointers_to_new_space =
-      IteratePointersInDirtyRegion(heap,
-                                   pointer_fields_start,
-                                   pointer_fields_end,
-                                   copy_object_func)
-        || contains_pointers_to_new_space;
-  }
-
-  return contains_pointers_to_new_space;
-}
-
-
 void Heap::IterateAndMarkPointersToFromSpace(Address start,
                                              Address end,
                                              ObjectSlotCallback callback) {
   Address slot_address = start;
-  Page* page = Page::FromAddress(start);
 
-  uint32_t marks = page->GetRegionMarks();
+  // We are not collecting slots on new space objects during mutation
+  // thus we have to scan for pointers to evacuation candidates when we
+  // promote objects. But we should not record any slots in non-black
+  // objects. Grey object's slots would be rescanned.
+  // White object might not survive until the end of collection
+  // it would be a violation of the invariant to record it's slots.
+  bool record_slots = false;
+  if (incremental_marking()->IsCompacting()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
+    record_slots = Marking::IsBlack(mark_bit);
+  }
 
   while (slot_address < end) {
     Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (InFromSpace(*slot)) {
-      ASSERT((*slot)->IsHeapObject());
-      callback(reinterpret_cast<HeapObject**>(slot));
-      if (InNewSpace(*slot)) {
-        ASSERT((*slot)->IsHeapObject());
-        marks |= page->GetRegionMaskForAddress(slot_address);
+    Object* object = *slot;
+    // If the store buffer becomes overfull we mark pages as being exempt from
+    // the store buffer.  These pages are scanned to find pointers that point
+    // to the new space.  In that case we may hit newly promoted objects and
+    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
+    if (object->IsHeapObject()) {
+      if (Heap::InFromSpace(object)) {
+        callback(reinterpret_cast<HeapObject**>(slot),
+                 HeapObject::cast(object));
+        Object* new_object = *slot;
+        if (InNewSpace(new_object)) {
+          SLOW_ASSERT(Heap::InToSpace(new_object));
+          SLOW_ASSERT(new_object->IsHeapObject());
+          store_buffer_.EnterDirectlyIntoStoreBuffer(
+              reinterpret_cast<Address>(slot));
+        }
+        SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
+      } else if (record_slots &&
+                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
+        mark_compact_collector()->RecordSlot(slot, slot, object);
       }
     }
     slot_address += kPointerSize;
   }
-
-  page->SetRegionMarks(marks);
 }
 
 
-uint32_t Heap::IterateDirtyRegions(
-    uint32_t marks,
-    Address area_start,
-    Address area_end,
-    DirtyRegionCallback visit_dirty_region,
-    ObjectSlotCallback copy_object_func) {
-  uint32_t newmarks = 0;
-  uint32_t mask = 1;
+#ifdef DEBUG
+typedef bool (*CheckStoreBufferFilter)(Object** addr);
 
-  if (area_start >= area_end) {
-    return newmarks;
-  }
 
-  Address region_start = area_start;
-
-  // area_start does not necessarily coincide with start of the first region.
-  // Thus to calculate the beginning of the next region we have to align
-  // area_start by Page::kRegionSize.
-  Address second_region =
-      reinterpret_cast<Address>(
-          reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
-          ~Page::kRegionAlignmentMask);
-
-  // Next region might be beyond area_end.
-  Address region_end = Min(second_region, area_end);
-
-  if (marks & mask) {
-    if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
-      newmarks |= mask;
-    }
-  }
-  mask <<= 1;
-
-  // Iterate subsequent regions which fully lay inside [area_start, area_end[.
-  region_start = region_end;
-  region_end = region_start + Page::kRegionSize;
-
-  while (region_end <= area_end) {
-    if (marks & mask) {
-      if (visit_dirty_region(this,
-                             region_start,
-                             region_end,
-                             copy_object_func)) {
-        newmarks |= mask;
-      }
-    }
-
-    region_start = region_end;
-    region_end = region_start + Page::kRegionSize;
-
-    mask <<= 1;
-  }
-
-  if (region_start != area_end) {
-    // A small piece of area left uniterated because area_end does not coincide
-    // with region end. Check whether region covering last part of area is
-    // dirty.
-    if (marks & mask) {
-      if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
-        newmarks |= mask;
-      }
-    }
-  }
-
-  return newmarks;
+bool IsAMapPointerAddress(Object** addr) {
+  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
+  int mod = a % Map::kSize;
+  return mod >= Map::kPointerFieldsBeginOffset &&
+         mod < Map::kPointerFieldsEndOffset;
 }
 
 
+bool EverythingsAPointer(Object** addr) {
+  return true;
+}
 
-void Heap::IterateDirtyRegions(
-    PagedSpace* space,
-    DirtyRegionCallback visit_dirty_region,
-    ObjectSlotCallback copy_object_func,
-    ExpectedPageWatermarkState expected_page_watermark_state) {
 
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-
-  while (it.has_next()) {
-    Page* page = it.next();
-    uint32_t marks = page->GetRegionMarks();
-
-    if (marks != Page::kAllRegionsCleanMarks) {
-      Address start = page->ObjectAreaStart();
-
-      // Do not try to visit pointers beyond page allocation watermark.
-      // Page can contain garbage pointers there.
-      Address end;
-
-      if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
-          page->IsWatermarkValid()) {
-        end = page->AllocationWatermark();
-      } else {
-        end = page->CachedAllocationWatermark();
-      }
-
-      ASSERT(space == old_pointer_space_ ||
-             (space == map_space_ &&
-              ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
-
-      page->SetRegionMarks(IterateDirtyRegions(marks,
-                                               start,
-                                               end,
-                                               visit_dirty_region,
-                                               copy_object_func));
+static void CheckStoreBuffer(Heap* heap,
+                             Object** current,
+                             Object** limit,
+                             Object**** store_buffer_position,
+                             Object*** store_buffer_top,
+                             CheckStoreBufferFilter filter,
+                             Address special_garbage_start,
+                             Address special_garbage_end) {
+  Map* free_space_map = heap->free_space_map();
+  for ( ; current < limit; current++) {
+    Object* o = *current;
+    Address current_address = reinterpret_cast<Address>(current);
+    // Skip free space.
+    if (o == free_space_map) {
+      Address current_address = reinterpret_cast<Address>(current);
+      FreeSpace* free_space =
+          FreeSpace::cast(HeapObject::FromAddress(current_address));
+      int skip = free_space->Size();
+      ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
+      ASSERT(skip > 0);
+      current_address += skip - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
     }
-
-    // Mark page watermark as invalid to maintain watermark validity invariant.
-    // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
-    page->InvalidateWatermark(true);
+    // Skip the current linear allocation space between top and limit which is
+    // unmarked with the free space map, but can contain junk.
+    if (current_address == special_garbage_start &&
+        special_garbage_end != special_garbage_start) {
+      current_address = special_garbage_end - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
+    }
+    if (!(*filter)(current)) continue;
+    ASSERT(current_address < special_garbage_start ||
+           current_address >= special_garbage_end);
+    ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
+    // We have to check that the pointer does not point into new space
+    // without trying to cast it to a heap object since the hash field of
+    // a string can contain values like 1 and 3 which are tagged null
+    // pointers.
+    if (!heap->InNewSpace(o)) continue;
+    while (**store_buffer_position < current &&
+           *store_buffer_position < store_buffer_top) {
+      (*store_buffer_position)++;
+    }
+    if (**store_buffer_position != current ||
+        *store_buffer_position == store_buffer_top) {
+      Object** obj_start = current;
+      while (!(*obj_start)->IsMap()) obj_start--;
+      UNREACHABLE();
+    }
   }
 }
 
 
+// Check that the store buffer contains all intergenerational pointers by
+// scanning a page and ensuring that all pointers to young space are in the
+// store buffer.
+void Heap::OldPointerSpaceCheckStoreBuffer() {
+  OldSpace* space = old_pointer_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->area_start());
+
+    Address end = page->area_end();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this,
+                     current,
+                     limit,
+                     &store_buffer_position,
+                     store_buffer_top,
+                     &EverythingsAPointer,
+                     space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::MapSpaceCheckStoreBuffer() {
+  MapSpace* space = map_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->area_start());
+
+    Address end = page->area_end();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this,
+                     current,
+                     limit,
+                     &store_buffer_position,
+                     store_buffer_top,
+                     &IsAMapPointerAddress,
+                     space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::LargeObjectSpaceCheckStoreBuffer() {
+  LargeObjectIterator it(lo_space());
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    // We only have code, sequential strings, or fixed arrays in large
+    // object space, and only fixed arrays can possibly contain pointers to
+    // the young generation.
+    if (object->IsFixedArray()) {
+      Object*** store_buffer_position = store_buffer()->Start();
+      Object*** store_buffer_top = store_buffer()->Top();
+      Object** current = reinterpret_cast<Object**>(object->address());
+      Object** limit =
+          reinterpret_cast<Object**>(object->address() + object->Size());
+      CheckStoreBuffer(this,
+                       current,
+                       limit,
+                       &store_buffer_position,
+                       store_buffer_top,
+                       &EverythingsAPointer,
+                       NULL,
+                       NULL);
+    }
+  }
+}
+#endif
+
+
 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
   IterateStrongRoots(v, mode);
   IterateWeakRoots(v, mode);
@@ -4900,29 +5461,29 @@
 
 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
-  v->Synchronize("symbol_table");
+  v->Synchronize(VisitorSynchronization::kSymbolTable);
   if (mode != VISIT_ALL_IN_SCAVENGE &&
       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     // Scavenge collections have special processing for this.
     external_string_table_.Iterate(v);
   }
-  v->Synchronize("external_string_table");
+  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
 }
 
 
 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
-  v->Synchronize("strong_root_list");
+  v->Synchronize(VisitorSynchronization::kStrongRootList);
 
   v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
-  v->Synchronize("symbol");
+  v->Synchronize(VisitorSynchronization::kSymbol);
 
   isolate_->bootstrapper()->Iterate(v);
-  v->Synchronize("bootstrapper");
+  v->Synchronize(VisitorSynchronization::kBootstrapper);
   isolate_->Iterate(v);
-  v->Synchronize("top");
+  v->Synchronize(VisitorSynchronization::kTop);
   Relocatable::Iterate(v);
-  v->Synchronize("relocatable");
+  v->Synchronize(VisitorSynchronization::kRelocatable);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate_->debug()->Iterate(v);
@@ -4930,22 +5491,21 @@
     isolate_->deoptimizer_data()->Iterate(v);
   }
 #endif
-  v->Synchronize("debug");
+  v->Synchronize(VisitorSynchronization::kDebug);
   isolate_->compilation_cache()->Iterate(v);
-  v->Synchronize("compilationcache");
+  v->Synchronize(VisitorSynchronization::kCompilationCache);
 
   // Iterate over local handles in handle scopes.
   isolate_->handle_scope_implementer()->Iterate(v);
-  v->Synchronize("handlescope");
+  v->Synchronize(VisitorSynchronization::kHandleScope);
 
   // Iterate over the builtin code objects and code stubs in the
   // heap. Note that it is not necessary to iterate over code objects
   // on scavenge collections.
-  if (mode != VISIT_ALL_IN_SCAVENGE &&
-      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+  if (mode != VISIT_ALL_IN_SCAVENGE) {
     isolate_->builtins()->IterateBuiltins(v);
   }
-  v->Synchronize("builtins");
+  v->Synchronize(VisitorSynchronization::kBuiltins);
 
   // Iterate over global handles.
   switch (mode) {
@@ -4960,11 +5520,11 @@
       isolate_->global_handles()->IterateAllRoots(v);
       break;
   }
-  v->Synchronize("globalhandles");
+  v->Synchronize(VisitorSynchronization::kGlobalHandles);
 
   // Iterate over pointers being held by inactive threads.
   isolate_->thread_manager()->Iterate(v);
-  v->Synchronize("threadmanager");
+  v->Synchronize(VisitorSynchronization::kThreadManager);
 
   // Iterate over the pointers the Serialization/Deserialization code is
   // holding.
@@ -4986,11 +5546,20 @@
 // and through the API, we should gracefully handle the case that the heap
 // size is not big enough to fit all the initial objects.
 bool Heap::ConfigureHeap(int max_semispace_size,
-                         int max_old_gen_size,
-                         int max_executable_size) {
-  if (HasBeenSetup()) return false;
+                         intptr_t max_old_gen_size,
+                         intptr_t max_executable_size) {
+  if (HasBeenSetUp()) return false;
 
-  if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
+  if (max_semispace_size > 0) {
+    if (max_semispace_size < Page::kPageSize) {
+      max_semispace_size = Page::kPageSize;
+      if (FLAG_trace_gc) {
+        PrintF("Max semispace size cannot be less than %dkbytes\n",
+               Page::kPageSize >> 10);
+      }
+    }
+    max_semispace_size_ = max_semispace_size;
+  }
 
   if (Snapshot::IsEnabled()) {
     // If we are using a snapshot we always reserve the default amount
@@ -5000,6 +5569,10 @@
     // than the default reserved semispace size.
     if (max_semispace_size_ > reserved_semispace_size_) {
       max_semispace_size_ = reserved_semispace_size_;
+      if (FLAG_trace_gc) {
+        PrintF("Max semispace size cannot be more than %dkbytes\n",
+               reserved_semispace_size_ >> 10);
+      }
     }
   } else {
     // If we are not using snapshots we reserve space for the actual
@@ -5025,8 +5598,12 @@
   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
   external_allocation_limit_ = 10 * max_semispace_size_;
 
-  // The old generation is paged.
-  max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
+  // The old generation is paged and needs at least one page for each space.
+  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+  max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
+                                                       Page::kPageSize),
+                                 RoundUp(max_old_generation_size_,
+                                         Page::kPageSize));
 
   configured_ = true;
   return true;
@@ -5034,9 +5611,9 @@
 
 
 bool Heap::ConfigureHeapDefault() {
-  return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
-                       FLAG_max_old_space_size * MB,
-                       FLAG_max_executable_size * MB);
+  return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
+                       static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
+                       static_cast<intptr_t>(FLAG_max_executable_size) * MB);
 }
 
 
@@ -5045,15 +5622,15 @@
   *stats->end_marker = HeapStats::kEndMarker;
   *stats->new_space_size = new_space_.SizeAsInt();
   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
-  *stats->old_pointer_space_size = old_pointer_space_->Size();
+  *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
-  *stats->old_data_space_size = old_data_space_->Size();
+  *stats->old_data_space_size = old_data_space_->SizeOfObjects();
   *stats->old_data_space_capacity = old_data_space_->Capacity();
-  *stats->code_space_size = code_space_->Size();
+  *stats->code_space_size = code_space_->SizeOfObjects();
   *stats->code_space_capacity = code_space_->Capacity();
-  *stats->map_space_size = map_space_->Size();
+  *stats->map_space_size = map_space_->SizeOfObjects();
   *stats->map_space_capacity = map_space_->Capacity();
-  *stats->cell_space_size = cell_space_->Size();
+  *stats->cell_space_size = cell_space_->SizeOfObjects();
   *stats->cell_space_capacity = cell_space_->Capacity();
   *stats->lo_space_size = lo_space_->Size();
   isolate_->global_handles()->RecordStats(stats);
@@ -5064,7 +5641,7 @@
   *stats->os_error = OS::GetLastError();
       isolate()->memory_allocator()->Available();
   if (take_snapshot) {
-    HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+    HeapIterator iterator;
     for (HeapObject* obj = iterator.next();
          obj != NULL;
          obj = iterator.next()) {
@@ -5087,6 +5664,16 @@
 }
 
 
+intptr_t Heap::PromotedSpaceSizeOfObjects() {
+  return old_pointer_space_->SizeOfObjects()
+      + old_data_space_->SizeOfObjects()
+      + code_space_->SizeOfObjects()
+      + map_space_->SizeOfObjects()
+      + cell_space_->SizeOfObjects()
+      + lo_space_->SizeOfObjects();
+}
+
+
 int Heap::PromotedExternalMemorySize() {
   if (amount_of_external_allocated_memory_
       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
@@ -5147,7 +5734,7 @@
 
     Address map_addr = map_p->address();
 
-    obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+    obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
 
     MarkObjectRecursively(&map);
 
@@ -5194,7 +5781,7 @@
 
     HeapObject* map_p = HeapObject::FromAddress(map_addr);
 
-    obj->set_map(reinterpret_cast<Map*>(map_p));
+    obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
 
     UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
 
@@ -5260,8 +5847,9 @@
 
 #endif
 
-bool Heap::Setup(bool create_heap_objects) {
+bool Heap::SetUp(bool create_heap_objects) {
 #ifdef DEBUG
+  allocation_timeout_ = FLAG_gc_interval;
   debug_utils_ = new HeapDebugUtils(this);
 #endif
 
@@ -5269,7 +5857,7 @@
   // goes wrong, just return false. The caller should check the results and
   // call Heap::TearDown() to release allocated memory.
   //
-  // If the heap is not yet configured (eg, through the API), configure it.
+  // If the heap is not yet configured (e.g. through the API), configure it.
   // Configuration is based on the flags new-space-size (really the semispace
   // size) and old-space-size if set or the initial values of semispace_size_
   // and old_generation_size_ otherwise.
@@ -5277,34 +5865,24 @@
     if (!ConfigureHeapDefault()) return false;
   }
 
-  gc_initializer_mutex->Lock();
+  gc_initializer_mutex.Pointer()->Lock();
   static bool initialized_gc = false;
   if (!initialized_gc) {
-    initialized_gc = true;
-    InitializeScavengingVisitorsTables();
-    NewSpaceScavenger::Initialize();
-    MarkCompactCollector::Initialize();
+      initialized_gc = true;
+      InitializeScavengingVisitorsTables();
+      NewSpaceScavenger::Initialize();
+      MarkCompactCollector::Initialize();
   }
-  gc_initializer_mutex->Unlock();
+  gc_initializer_mutex.Pointer()->Unlock();
 
   MarkMapPointersAsEncoded(false);
 
-  // Setup memory allocator and reserve a chunk of memory for new
-  // space.  The chunk is double the size of the requested reserved
-  // new space size to ensure that we can find a pair of semispaces that
-  // are contiguous and aligned to their size.
-  if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
+  // Set up memory allocator.
+  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
       return false;
-  void* chunk =
-      isolate_->memory_allocator()->ReserveInitialChunk(
-          4 * reserved_semispace_size_);
-  if (chunk == NULL) return false;
 
-  // Align the pair of semispaces to their size, which must be a power
-  // of 2.
-  Address new_space_start =
-      RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
-  if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
+  // Set up new space.
+  if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
     return false;
   }
 
@@ -5315,7 +5893,7 @@
                    OLD_POINTER_SPACE,
                    NOT_EXECUTABLE);
   if (old_pointer_space_ == NULL) return false;
-  if (!old_pointer_space_->Setup(NULL, 0)) return false;
+  if (!old_pointer_space_->SetUp()) return false;
 
   // Initialize old data space.
   old_data_space_ =
@@ -5324,14 +5902,14 @@
                    OLD_DATA_SPACE,
                    NOT_EXECUTABLE);
   if (old_data_space_ == NULL) return false;
-  if (!old_data_space_->Setup(NULL, 0)) return false;
+  if (!old_data_space_->SetUp()) return false;
 
   // Initialize the code space, set its maximum capacity to the old
   // generation size. It needs executable memory.
   // On 64-bit platform(s), we put all code objects in a 2 GB range of
   // virtual address space, so that they can call each other with near calls.
   if (code_range_size_ > 0) {
-    if (!isolate_->code_range()->Setup(code_range_size_)) {
+    if (!isolate_->code_range()->SetUp(code_range_size_)) {
       return false;
     }
   }
@@ -5339,30 +5917,26 @@
   code_space_ =
       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
   if (code_space_ == NULL) return false;
-  if (!code_space_->Setup(NULL, 0)) return false;
+  if (!code_space_->SetUp()) return false;
 
   // Initialize map space.
-  map_space_ = new MapSpace(this, FLAG_use_big_map_space
-      ? max_old_generation_size_
-      : MapSpace::kMaxMapPageIndex * Page::kPageSize,
-      FLAG_max_map_space_pages,
-      MAP_SPACE);
+  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
   if (map_space_ == NULL) return false;
-  if (!map_space_->Setup(NULL, 0)) return false;
+  if (!map_space_->SetUp()) return false;
 
   // Initialize global property cell space.
   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
   if (cell_space_ == NULL) return false;
-  if (!cell_space_->Setup(NULL, 0)) return false;
+  if (!cell_space_->SetUp()) return false;
 
   // The large object code space may contain code or data.  We set the memory
   // to be non-executable here for safety, but this means we need to enable it
   // explicitly when allocating large code objects.
-  lo_space_ = new LargeObjectSpace(this, LO_SPACE);
+  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
   if (lo_space_ == NULL) return false;
-  if (!lo_space_->Setup()) return false;
+  if (!lo_space_->SetUp()) return false;
 
-  // Setup the seed that is used to randomize the string hash function.
+  // Set up the seed that is used to randomize the string hash function.
   ASSERT(hash_seed() == 0);
   if (FLAG_randomize_hashes) {
     if (FLAG_hash_seed == 0) {
@@ -5387,6 +5961,8 @@
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
 
+  store_buffer()->SetUp();
+
   return true;
 }
 
@@ -5413,7 +5989,6 @@
     PrintF("\n\n");
     PrintF("gc_count=%d ", gc_count_);
     PrintF("mark_sweep_count=%d ", ms_count_);
-    PrintF("mark_compact_count=%d ", mc_count_);
     PrintF("max_gc_pause=%d ", get_max_gc_pause());
     PrintF("min_in_mutator=%d ", get_min_in_mutator());
     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
@@ -5463,6 +6038,9 @@
     lo_space_ = NULL;
   }
 
+  store_buffer()->TearDown();
+  incremental_marking()->TearDown();
+
   isolate_->memory_allocator()->TearDown();
 
 #ifdef DEBUG
@@ -5475,8 +6053,11 @@
 void Heap::Shrink() {
   // Try to shrink all paged spaces.
   PagedSpaces spaces;
-  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
-    space->Shrink();
+  for (PagedSpace* space = spaces.next();
+       space != NULL;
+       space = spaces.next()) {
+    space->ReleaseAllUnusedPages();
+  }
 }
 
 
@@ -5679,98 +6260,54 @@
 };
 
 
-class FreeListNodesFilter : public HeapObjectsFilter {
- public:
-  FreeListNodesFilter() {
-    MarkFreeListNodes();
-  }
-
-  bool SkipObject(HeapObject* object) {
-    if (object->IsMarked()) {
-      object->ClearMark();
-      return true;
-    } else {
-      return false;
-    }
-  }
-
- private:
-  void MarkFreeListNodes() {
-    Heap* heap = HEAP;
-    heap->old_pointer_space()->MarkFreeListNodes();
-    heap->old_data_space()->MarkFreeListNodes();
-    MarkCodeSpaceFreeListNodes(heap);
-    heap->map_space()->MarkFreeListNodes();
-    heap->cell_space()->MarkFreeListNodes();
-  }
-
-  void MarkCodeSpaceFreeListNodes(Heap* heap) {
-    // For code space, using FreeListNode::IsFreeListNode is OK.
-    HeapObjectIterator iter(heap->code_space());
-    for (HeapObject* obj = iter.next_object();
-         obj != NULL;
-         obj = iter.next_object()) {
-      if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
-    }
-  }
-
-  AssertNoAllocation no_alloc;
-};
-
-
 class UnreachableObjectsFilter : public HeapObjectsFilter {
  public:
   UnreachableObjectsFilter() {
-    MarkUnreachableObjects();
+    MarkReachableObjects();
+  }
+
+  ~UnreachableObjectsFilter() {
+    Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
   }
 
   bool SkipObject(HeapObject* object) {
-    if (object->IsMarked()) {
-      object->ClearMark();
-      return true;
-    } else {
-      return false;
-    }
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    return !mark_bit.Get();
   }
 
  private:
-  class UnmarkingVisitor : public ObjectVisitor {
+  class MarkingVisitor : public ObjectVisitor {
    public:
-    UnmarkingVisitor() : list_(10) {}
+    MarkingVisitor() : marking_stack_(10) {}
 
     void VisitPointers(Object** start, Object** end) {
       for (Object** p = start; p < end; p++) {
         if (!(*p)->IsHeapObject()) continue;
         HeapObject* obj = HeapObject::cast(*p);
-        if (obj->IsMarked()) {
-          obj->ClearMark();
-          list_.Add(obj);
+        MarkBit mark_bit = Marking::MarkBitFrom(obj);
+        if (!mark_bit.Get()) {
+          mark_bit.Set();
+          marking_stack_.Add(obj);
         }
       }
     }
 
-    bool can_process() { return !list_.is_empty(); }
-
-    void ProcessNext() {
-      HeapObject* obj = list_.RemoveLast();
-      obj->Iterate(this);
+    void TransitiveClosure() {
+      while (!marking_stack_.is_empty()) {
+        HeapObject* obj = marking_stack_.RemoveLast();
+        obj->Iterate(this);
+      }
     }
 
    private:
-    List<HeapObject*> list_;
+    List<HeapObject*> marking_stack_;
   };
 
-  void MarkUnreachableObjects() {
-    HeapIterator iterator;
-    for (HeapObject* obj = iterator.next();
-         obj != NULL;
-         obj = iterator.next()) {
-      obj->SetMark();
-    }
-    UnmarkingVisitor visitor;
-    HEAP->IterateRoots(&visitor, VISIT_ALL);
-    while (visitor.can_process())
-      visitor.ProcessNext();
+  void MarkReachableObjects() {
+    Heap* heap = Isolate::Current()->heap();
+    MarkingVisitor visitor;
+    heap->IterateRoots(&visitor, VISIT_ALL);
+    visitor.TransitiveClosure();
   }
 
   AssertNoAllocation no_alloc;
@@ -5798,12 +6335,8 @@
 
 void HeapIterator::Init() {
   // Start the iteration.
-  space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
-      new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
+  space_iterator_ = new SpaceIterator;
   switch (filtering_) {
-    case kFilterFreeListNodes:
-      filter_ = new FreeListNodesFilter;
-      break;
     case kFilterUnreachable:
       filter_ = new UnreachableObjectsFilter;
       break;
@@ -5939,6 +6472,11 @@
 }
 
 
+static bool SafeIsGlobalContext(HeapObject* obj) {
+  return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
+}
+
+
 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
   if (!(*p)->IsHeapObject()) return;
 
@@ -5957,14 +6495,14 @@
     return;
   }
 
-  bool is_global_context = obj->IsGlobalContext();
+  bool is_global_context = SafeIsGlobalContext(obj);
 
   // not visited yet
   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
 
   Address map_addr = map_p->address();
 
-  obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
 
   // Scan the object body.
   if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
@@ -6006,7 +6544,7 @@
 
   HeapObject* map_p = HeapObject::FromAddress(map_addr);
 
-  obj->set_map(reinterpret_cast<Map*>(map_p));
+  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
 
   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
 
@@ -6065,31 +6603,30 @@
   for (OldSpace* space = spaces.next();
        space != NULL;
        space = spaces.next()) {
-    holes_size += space->Waste() + space->AvailableFree();
+    holes_size += space->Waste() + space->Available();
   }
   return holes_size;
 }
 
 
-GCTracer::GCTracer(Heap* heap)
+GCTracer::GCTracer(Heap* heap,
+                   const char* gc_reason,
+                   const char* collector_reason)
     : start_time_(0.0),
-      start_size_(0),
+      start_object_size_(0),
+      start_memory_size_(0),
       gc_count_(0),
       full_gc_count_(0),
-      is_compacting_(false),
-      marked_count_(0),
       allocated_since_last_gc_(0),
       spent_in_mutator_(0),
       promoted_objects_size_(0),
-      heap_(heap) {
-  // These two fields reflect the state of the previous full collection.
-  // Set them before they are changed by the collector.
-  previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
-  previous_marked_count_ =
-      heap_->mark_compact_collector_.previous_marked_count();
+      heap_(heap),
+      gc_reason_(gc_reason),
+      collector_reason_(collector_reason) {
   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
   start_time_ = OS::TimeCurrentMillis();
-  start_size_ = heap_->SizeOfObjects();
+  start_object_size_ = heap_->SizeOfObjects();
+  start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
 
   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
     scopes_[i] = 0;
@@ -6103,6 +6640,14 @@
   if (heap_->last_gc_end_timestamp_ > 0) {
     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
   }
+
+  steps_count_ = heap_->incremental_marking()->steps_count();
+  steps_took_ = heap_->incremental_marking()->steps_took();
+  longest_step_ = heap_->incremental_marking()->longest_step();
+  steps_count_since_last_gc_ =
+      heap_->incremental_marking()->steps_count_since_last_gc();
+  steps_took_since_last_gc_ =
+      heap_->incremental_marking()->steps_took_since_last_gc();
 }
 
 
@@ -6128,16 +6673,46 @@
     }
   }
 
+  PrintF("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
+
   if (!FLAG_trace_gc_nvp) {
     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
 
-    PrintF("%s %.1f -> %.1f MB, ",
+    double end_memory_size_mb =
+        static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
+
+    PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
            CollectorString(),
-           static_cast<double>(start_size_) / MB,
-           SizeOfHeapObjects());
+           static_cast<double>(start_object_size_) / MB,
+           static_cast<double>(start_memory_size_) / MB,
+           SizeOfHeapObjects(),
+           end_memory_size_mb);
 
     if (external_time > 0) PrintF("%d / ", external_time);
-    PrintF("%d ms.\n", time);
+    PrintF("%d ms", time);
+    if (steps_count_ > 0) {
+      if (collector_ == SCAVENGER) {
+        PrintF(" (+ %d ms in %d steps since last GC)",
+               static_cast<int>(steps_took_since_last_gc_),
+               steps_count_since_last_gc_);
+      } else {
+        PrintF(" (+ %d ms in %d steps since start of marking, "
+                   "biggest step %f ms)",
+               static_cast<int>(steps_took_),
+               steps_count_,
+               longest_step_);
+      }
+    }
+
+    if (gc_reason_ != NULL) {
+      PrintF(" [%s]", gc_reason_);
+    }
+
+    if (collector_reason_ != NULL) {
+      PrintF(" [%s]", collector_reason_);
+    }
+
+    PrintF(".\n");
   } else {
     PrintF("pause=%d ", time);
     PrintF("mutator=%d ",
@@ -6149,8 +6724,7 @@
         PrintF("s");
         break;
       case MARK_COMPACTOR:
-        PrintF("%s",
-               heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
+        PrintF("ms");
         break;
       default:
         UNREACHABLE();
@@ -6161,9 +6735,21 @@
     PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
     PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
     PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
-    PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
+    PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
+    PrintF("new_new=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
+    PrintF("root_new=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
+    PrintF("old_new=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
+    PrintF("compaction_ptrs=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
+    PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
+        Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
+    PrintF("misc_compaction=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
 
-    PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
+    PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
            in_free_list_or_wasted_before_gc_);
@@ -6172,6 +6758,14 @@
     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
 
+    if (collector_ == SCAVENGER) {
+      PrintF("stepscount=%d ", steps_count_since_last_gc_);
+      PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
+    } else {
+      PrintF("stepscount=%d ", steps_count_);
+      PrintF("stepstook=%d ", static_cast<int>(steps_took_));
+    }
+
     PrintF("\n");
   }
 
@@ -6184,8 +6778,7 @@
     case SCAVENGER:
       return "Scavenge";
     case MARK_COMPACTOR:
-      return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
-                                                           : "Mark-sweep";
+      return "Mark-sweep";
   }
   return "Unknown GC";
 }
@@ -6200,10 +6793,12 @@
 
 
 int KeyedLookupCache::Lookup(Map* map, String* name) {
-  int index = Hash(map, name);
-  Key& key = keys_[index];
-  if ((key.map == map) && key.name->Equals(name)) {
-    return field_offsets_[index];
+  int index = (Hash(map, name) & kHashMask);
+  for (int i = 0; i < kEntriesPerBucket; i++) {
+    Key& key = keys_[index + i];
+    if ((key.map == map) && key.name->Equals(name)) {
+      return field_offsets_[index + i];
+    }
   }
   return kNotFound;
 }
@@ -6212,7 +6807,29 @@
 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
   String* symbol;
   if (HEAP->LookupSymbolIfExists(name, &symbol)) {
-    int index = Hash(map, symbol);
+    int index = (Hash(map, symbol) & kHashMask);
+    // After a GC there will be free slots, so we use them in order (this may
+    // help to get the most frequently used one in position 0).
+    for (int i = 0; i< kEntriesPerBucket; i++) {
+      Key& key = keys_[index];
+      Object* free_entry_indicator = NULL;
+      if (key.map == free_entry_indicator) {
+        key.map = map;
+        key.name = symbol;
+        field_offsets_[index + i] = field_offset;
+        return;
+      }
+    }
+    // No free entry found in this bucket, so we move them all down one and
+    // put the new entry at position zero.
+    for (int i = kEntriesPerBucket - 1; i > 0; i--) {
+      Key& key = keys_[index + i];
+      Key& key2 = keys_[index + i - 1];
+      key = key2;
+      field_offsets_[index + i] = field_offsets_[index + i - 1];
+    }
+
+    // Write the new first entry.
     Key& key = keys_[index];
     key.map = map;
     key.name = symbol;
@@ -6267,7 +6884,9 @@
 void ExternalStringTable::CleanUp() {
   int last = 0;
   for (int i = 0; i < new_space_strings_.length(); ++i) {
-    if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+    if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+      continue;
+    }
     if (heap_->InNewSpace(new_space_strings_[i])) {
       new_space_strings_[last++] = new_space_strings_[i];
     } else {
@@ -6277,12 +6896,16 @@
   new_space_strings_.Rewind(last);
   last = 0;
   for (int i = 0; i < old_space_strings_.length(); ++i) {
-    if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+    if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+      continue;
+    }
     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
     old_space_strings_[last++] = old_space_strings_[i];
   }
   old_space_strings_.Rewind(last);
-  Verify();
+  if (FLAG_verify_heap) {
+    Verify();
+  }
 }
 
 
@@ -6292,4 +6915,72 @@
 }
 
 
+void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
+  chunk->set_next_chunk(chunks_queued_for_free_);
+  chunks_queued_for_free_ = chunk;
+}
+
+
+void Heap::FreeQueuedChunks() {
+  if (chunks_queued_for_free_ == NULL) return;
+  MemoryChunk* next;
+  MemoryChunk* chunk;
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+
+    if (chunk->owner()->identity() == LO_SPACE) {
+      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
+      // If FromAnyPointerAddress encounters a slot that belongs to a large
+      // chunk queued for deletion it will fail to find the chunk because
+      // it try to perform a search in the list of pages owned by of the large
+      // object space and queued chunks were detached from that list.
+      // To work around this we split large chunk into normal kPageSize aligned
+      // pieces and initialize size, owner and flags field of every piece.
+      // If FromAnyPointerAddress encounters a slot that belongs to one of
+      // these smaller pieces it will treat it as a slot on a normal Page.
+      Address chunk_end = chunk->address() + chunk->size();
+      MemoryChunk* inner = MemoryChunk::FromAddress(
+          chunk->address() + Page::kPageSize);
+      MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
+      while (inner <= inner_last) {
+        // Size of a large chunk is always a multiple of
+        // OS::AllocateAlignment() so there is always
+        // enough space for a fake MemoryChunk header.
+        Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
+        // Guard against overflow.
+        if (area_end < inner->address()) area_end = chunk_end;
+        inner->SetArea(inner->address(), area_end);
+        inner->set_size(Page::kPageSize);
+        inner->set_owner(lo_space());
+        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+        inner = MemoryChunk::FromAddress(
+            inner->address() + Page::kPageSize);
+      }
+    }
+  }
+  isolate_->heap()->store_buffer()->Compact();
+  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    isolate_->memory_allocator()->Free(chunk);
+  }
+  chunks_queued_for_free_ = NULL;
+}
+
+
+void Heap::RememberUnmappedPage(Address page, bool compacted) {
+  uintptr_t p = reinterpret_cast<uintptr_t>(page);
+  // Tag the page pointer to make it findable in the dump file.
+  if (compacted) {
+    p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
+  } else {
+    p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
+  }
+  remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
+      reinterpret_cast<Address>(p);
+  remembered_unmapped_pages_index_++;
+  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
+}
+
 } }  // namespace v8::internal
diff --git a/src/heap.h b/src/heap.h
index b1948a9..2bd037f 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,46 +32,55 @@
 
 #include "allocation.h"
 #include "globals.h"
+#include "incremental-marking.h"
 #include "list.h"
 #include "mark-compact.h"
+#include "objects-visiting.h"
 #include "spaces.h"
 #include "splay-tree-inl.h"
+#include "store-buffer.h"
 #include "v8-counters.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
 
-// TODO(isolates): remove HEAP here
-#define HEAP (_inline_get_heap_())
-class Heap;
-inline Heap* _inline_get_heap_();
-
-
 // Defines all the roots in Heap.
-#define STRONG_ROOT_LIST(V)                                      \
-  /* Put the byte array map early.  We need it to be in place by the time   */ \
-  /* the deserializer hits the next page, since it wants to put a byte      */ \
-  /* array in the unused space at the end of the page.                      */ \
+#define STRONG_ROOT_LIST(V)                                                    \
   V(Map, byte_array_map, ByteArrayMap)                                         \
+  V(Map, free_space_map, FreeSpaceMap)                                         \
   V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
   V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
   /* Cluster the most popular ones in a few cache lines here at the top.    */ \
-  V(Object, undefined_value, UndefinedValue)                                   \
-  V(Object, the_hole_value, TheHoleValue)                                      \
-  V(Object, null_value, NullValue)                                             \
-  V(Object, true_value, TrueValue)                                             \
-  V(Object, false_value, FalseValue)                                           \
-  V(Object, arguments_marker, ArgumentsMarker)                                 \
+  V(Smi, store_buffer_top, StoreBufferTop)                                     \
+  V(Oddball, undefined_value, UndefinedValue)                                  \
+  V(Oddball, the_hole_value, TheHoleValue)                                     \
+  V(Oddball, null_value, NullValue)                                            \
+  V(Oddball, true_value, TrueValue)                                            \
+  V(Oddball, false_value, FalseValue)                                          \
+  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
+  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
+  V(Map, meta_map, MetaMap)                                                    \
+  V(Map, ascii_symbol_map, AsciiSymbolMap)                                     \
+  V(Map, ascii_string_map, AsciiStringMap)                                     \
   V(Map, heap_number_map, HeapNumberMap)                                       \
   V(Map, global_context_map, GlobalContextMap)                                 \
   V(Map, fixed_array_map, FixedArrayMap)                                       \
-  V(Map, serialized_scope_info_map, SerializedScopeInfoMap)                    \
+  V(Map, code_map, CodeMap)                                                    \
+  V(Map, scope_info_map, ScopeInfoMap)                                         \
   V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
   V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
   V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel)       \
-  V(Map, meta_map, MetaMap)                                                    \
   V(Map, hash_table_map, HashTableMap)                                         \
+  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
+  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
+  V(String, empty_string, EmptyString)                                         \
+  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
   V(Smi, stack_limit, StackLimit)                                              \
+  V(Oddball, arguments_marker, ArgumentsMarker)                                \
+  /* The first 32 roots above this line should be boring from a GC point of */ \
+  /* view.  This means they are never in new space and never on a page that */ \
+  /* is being compacted.                                                    */ \
   V(FixedArray, number_string_cache, NumberStringCache)                        \
   V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
   V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
@@ -80,19 +89,12 @@
   V(FixedArray, string_split_cache, StringSplitCache)                          \
   V(Object, termination_exception, TerminationException)                       \
   V(Smi, hash_seed, HashSeed)                                                  \
-  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
-  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
-  V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray)         \
-  V(String, empty_string, EmptyString)                                         \
-  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
   V(Map, string_map, StringMap)                                                \
-  V(Map, ascii_string_map, AsciiStringMap)                                     \
   V(Map, symbol_map, SymbolMap)                                                \
   V(Map, cons_string_map, ConsStringMap)                                       \
   V(Map, cons_ascii_string_map, ConsAsciiStringMap)                            \
   V(Map, sliced_string_map, SlicedStringMap)                                   \
   V(Map, sliced_ascii_string_map, SlicedAsciiStringMap)                        \
-  V(Map, ascii_symbol_map, AsciiSymbolMap)                                     \
   V(Map, cons_symbol_map, ConsSymbolMap)                                       \
   V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap)                            \
   V(Map, external_symbol_map, ExternalSymbolMap)                               \
@@ -101,6 +103,16 @@
   V(Map, external_string_map, ExternalStringMap)                               \
   V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap)  \
   V(Map, external_ascii_string_map, ExternalAsciiStringMap)                    \
+  V(Map, short_external_symbol_map, ShortExternalSymbolMap)                    \
+  V(Map,                                                                       \
+    short_external_symbol_with_ascii_data_map,                                 \
+    ShortExternalSymbolWithAsciiDataMap)                                       \
+  V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap)         \
+  V(Map, short_external_string_map, ShortExternalStringMap)                    \
+  V(Map,                                                                       \
+    short_external_string_with_ascii_data_map,                                 \
+    ShortExternalStringWithAsciiDataMap)                                       \
+  V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap)         \
   V(Map, undetectable_string_map, UndetectableStringMap)                       \
   V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap)            \
   V(Map, external_pixel_array_map, ExternalPixelArrayMap)                      \
@@ -117,14 +129,13 @@
   V(Map, catch_context_map, CatchContextMap)                                   \
   V(Map, with_context_map, WithContextMap)                                     \
   V(Map, block_context_map, BlockContextMap)                                   \
-  V(Map, code_map, CodeMap)                                                    \
+  V(Map, module_context_map, ModuleContextMap)                                 \
   V(Map, oddball_map, OddballMap)                                              \
-  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
-  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, message_object_map, JSMessageObjectMap)                               \
   V(Map, foreign_map, ForeignMap)                                              \
-  V(Object, nan_value, NanValue)                                               \
-  V(Object, minus_zero_value, MinusZeroValue)                                  \
+  V(HeapNumber, nan_value, NanValue)                                           \
+  V(HeapNumber, infinity_value, InfinityValue)                                 \
+  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
   V(Map, neander_map, NeanderMap)                                              \
   V(JSObject, message_listeners, MessageListeners)                             \
   V(Foreign, prototype_accessors, PrototypeAccessors)                          \
@@ -138,6 +149,8 @@
   V(Script, empty_script, EmptyScript)                                         \
   V(Smi, real_stack_limit, RealStackLimit)                                     \
   V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames)        \
+  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)     \
+  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)
 
 #define ROOT_LIST(V)                                  \
   STRONG_ROOT_LIST(V)                                 \
@@ -164,6 +177,7 @@
   V(eval_symbol, "eval")                                                 \
   V(function_symbol, "function")                                         \
   V(length_symbol, "length")                                             \
+  V(module_symbol, "module")                                             \
   V(name_symbol, "name")                                                 \
   V(native_symbol, "native")                                             \
   V(null_symbol, "null")                                                 \
@@ -192,12 +206,10 @@
   V(InitializeConstGlobal_symbol, "InitializeConstGlobal")               \
   V(KeyedLoadElementMonomorphic_symbol,                                  \
     "KeyedLoadElementMonomorphic")                                       \
-  V(KeyedLoadElementPolymorphic_symbol,                                  \
-    "KeyedLoadElementPolymorphic")                                       \
   V(KeyedStoreElementMonomorphic_symbol,                                 \
     "KeyedStoreElementMonomorphic")                                      \
-  V(KeyedStoreElementPolymorphic_symbol,                                 \
-    "KeyedStoreElementPolymorphic")                                      \
+  V(KeyedStoreAndGrowElementMonomorphic_symbol,                          \
+    "KeyedStoreAndGrowElementMonomorphic")                               \
   V(stack_overflow_symbol, "kStackOverflowBoilerplate")                  \
   V(illegal_access_symbol, "illegal access")                             \
   V(out_of_memory_symbol, "out-of-memory")                               \
@@ -227,7 +239,11 @@
   V(closure_symbol, "(closure)")                                         \
   V(use_strict, "use strict")                                            \
   V(dot_symbol, ".")                                                     \
-  V(anonymous_function_symbol, "(anonymous function)")
+  V(anonymous_function_symbol, "(anonymous function)")                   \
+  V(compare_ic_symbol, ".compare_ic")                                    \
+  V(infinity_symbol, "Infinity")                                         \
+  V(minus_infinity_symbol, "-Infinity")                                  \
+  V(hidden_stack_trace_symbol, "v8::hidden_stack_trace")
 
 // Forward declarations.
 class GCTracer;
@@ -239,10 +255,26 @@
 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
                                                       Object** pointer);
 
-typedef bool (*DirtyRegionCallback)(Heap* heap,
-                                    Address start,
-                                    Address end,
-                                    ObjectSlotCallback copy_object_func);
+class StoreBufferRebuilder {
+ public:
+  explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer) {
+  }
+
+  void Callback(MemoryChunk* page, StoreBufferEvent event);
+
+ private:
+  StoreBuffer* store_buffer_;
+
+  // We record in this variable how full the store buffer was when we started
+  // iterating over the current page, finding pointers to new space.  If the
+  // store buffer overflows again we can exempt the page from the store buffer
+  // by rewinding to this point instead of having to search the store buffer.
+  Object*** start_of_current_page_;
+  // The current page we are scanning in the store buffer iterator.
+  MemoryChunk* current_page_;
+};
+
 
 
 // The all static Heap captures the interface to the global object heap.
@@ -257,32 +289,103 @@
 // by it's size to avoid dereferencing a map pointer for scanning.
 class PromotionQueue {
  public:
-  PromotionQueue() : front_(NULL), rear_(NULL) { }
+  explicit PromotionQueue(Heap* heap)
+      : front_(NULL),
+        rear_(NULL),
+        limit_(NULL),
+        emergency_stack_(0),
+        heap_(heap) { }
 
-  void Initialize(Address start_address) {
-    front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
+  void Initialize();
+
+  void Destroy() {
+    ASSERT(is_empty());
+    delete emergency_stack_;
+    emergency_stack_ = NULL;
   }
 
-  bool is_empty() { return front_ <= rear_; }
+  inline void ActivateGuardIfOnTheSamePage();
+
+  Page* GetHeadPage() {
+    return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+  }
+
+  void SetNewLimit(Address limit) {
+    if (!guard_) {
+      return;
+    }
+
+    ASSERT(GetHeadPage() == Page::FromAllocationTop(limit));
+    limit_ = reinterpret_cast<intptr_t*>(limit);
+
+    if (limit_ <= rear_) {
+      return;
+    }
+
+    RelocateQueueHead();
+  }
+
+  bool is_empty() {
+    return (front_ == rear_) &&
+        (emergency_stack_ == NULL || emergency_stack_->length() == 0);
+  }
 
   inline void insert(HeapObject* target, int size);
 
   void remove(HeapObject** target, int* size) {
+    ASSERT(!is_empty());
+    if (front_ == rear_) {
+      Entry e = emergency_stack_->RemoveLast();
+      *target = e.obj_;
+      *size = e.size_;
+      return;
+    }
+
+    if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
+      NewSpacePage* front_page =
+          NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
+      ASSERT(!front_page->prev_page()->is_anchor());
+      front_ =
+          reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
+    }
     *target = reinterpret_cast<HeapObject*>(*(--front_));
     *size = static_cast<int>(*(--front_));
     // Assert no underflow.
-    ASSERT(front_ >= rear_);
+    SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
+                                reinterpret_cast<Address>(front_));
   }
 
  private:
-  // The front of the queue is higher in memory than the rear.
+  // The front of the queue is higher in the memory page chain than the rear.
   intptr_t* front_;
   intptr_t* rear_;
+  intptr_t* limit_;
+
+  bool guard_;
+
+  static const int kEntrySizeInWords = 2;
+
+  struct Entry {
+    Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
+
+    HeapObject* obj_;
+    int size_;
+  };
+  List<Entry>* emergency_stack_;
+
+  Heap* heap_;
+
+  void RelocateQueueHead();
 
   DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
 };
 
 
+typedef void (*ScavengingCallback)(Map* map,
+                                   HeapObject** slot,
+                                   HeapObject* object);
+
+
 // External strings table is a place where all external strings are
 // registered.  We need to keep track of such strings to properly
 // finalize them.
@@ -323,19 +426,24 @@
 };
 
 
+enum ArrayStorageAllocationMode {
+  DONT_INITIALIZE_ARRAY_ELEMENTS,
+  INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+};
+
 class Heap {
  public:
   // Configure heap size before setup. Return false if the heap has been
-  // setup already.
+  // set up already.
   bool ConfigureHeap(int max_semispace_size,
-                     int max_old_gen_size,
-                     int max_executable_size);
+                     intptr_t max_old_gen_size,
+                     intptr_t max_executable_size);
   bool ConfigureHeapDefault();
 
   // Initializes the global object heap. If create_heap_objects is true,
   // also creates the basic non-mutable objects.
   // Returns whether it succeeded.
-  bool Setup(bool create_heap_objects);
+  bool SetUp(bool create_heap_objects);
 
   // Destroys all memory allocated by the heap.
   void TearDown();
@@ -345,8 +453,8 @@
   // jslimit_/real_jslimit_ variable in the StackGuard.
   void SetStackLimits();
 
-  // Returns whether Setup has been called.
-  bool HasBeenSetup();
+  // Returns whether SetUp has been called.
+  bool HasBeenSetUp();
 
   // Returns the maximum amount of memory reserved for the heap.  For
   // the young generation, we reserve 4 times the amount needed for a
@@ -377,9 +485,6 @@
   // all available bytes. Check MaxHeapObjectSize() instead.
   intptr_t Available();
 
-  // Returns the maximum object size in paged space.
-  inline int MaxObjectSizeInPagedSpace();
-
   // Returns of size of all objects residing in the heap.
   intptr_t SizeOfObjects();
 
@@ -424,6 +529,30 @@
   MUST_USE_RESULT MaybeObject* AllocateJSObject(
       JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
 
+  // Allocate a JSArray with no elements
+  MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
+      ElementsKind elements_kind,
+      PretenureFlag pretenure = NOT_TENURED) {
+    return AllocateJSArrayAndStorage(elements_kind, 0, 0,
+                                     DONT_INITIALIZE_ARRAY_ELEMENTS,
+                                     pretenure);
+  }
+
+  // Allocate a JSArray with a specified length but elements that are left
+  // uninitialized.
+  MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
+      ElementsKind elements_kind,
+      int length,
+      int capacity,
+      ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
+      PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocate a JSArray with no elements
+  MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
+      FixedArrayBase* array_base,
+      ElementsKind elements_kind,
+      PretenureFlag pretenure = NOT_TENURED);
+
   // Allocates and initializes a new global object based on a constructor.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
@@ -457,6 +586,7 @@
   // size, but keeping the original prototype.  The receiver must have at least
   // the size of the new object.  The object is reinitialized and behaves as an
   // object that has been freshly allocated.
+  // Returns failure if an error occured, otherwise object.
   MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
                                                       InstanceType type,
                                                       int size);
@@ -485,8 +615,10 @@
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this function does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
-                                           int instance_size);
+  MUST_USE_RESULT MaybeObject* AllocateMap(
+      InstanceType instance_type,
+      int instance_size,
+      ElementsKind elements_kind = FAST_ELEMENTS);
 
   // Allocates a partial map for bootstrapping.
   MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@@ -499,11 +631,20 @@
   MUST_USE_RESULT MaybeObject* AllocateCodeCache();
 
   // Allocates a serialized scope info.
-  MUST_USE_RESULT MaybeObject* AllocateSerializedScopeInfo(int length);
+  MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
 
   // Allocates an empty PolymorphicCodeCache.
   MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
 
+  // Allocates a pre-tenured empty AccessorPair.
+  MUST_USE_RESULT MaybeObject* AllocateAccessorPair();
+
+  // Allocates an empty TypeFeedbackInfo.
+  MUST_USE_RESULT MaybeObject* AllocateTypeFeedbackInfo();
+
+  // Allocates an AliasedArgumentsEntry.
+  MUST_USE_RESULT MaybeObject* AllocateAliasedArgumentsEntry(int slot);
+
   // Clear the Instanceof cache (used when a prototype changes).
   inline void ClearInstanceofCache();
 
@@ -576,7 +717,7 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   // Computes a single character string where the character has code.
-  // A cache is used for ascii codes.
+  // A cache is used for ASCII codes.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed. Please note this does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode(
@@ -664,6 +805,13 @@
       int length,
       PretenureFlag pretenure = NOT_TENURED);
 
+  // Allocates a fixed double array with hole values. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  // Please note this does not perform a garbage collection.
+  MUST_USE_RESULT MaybeObject* AllocateFixedDoubleArrayWithHoles(
+      int length,
+      PretenureFlag pretenure = NOT_TENURED);
+
   // AllocateHashTable is identical to AllocateFixedArray except
   // that the resulting object has hash_table_map as map.
   MUST_USE_RESULT MaybeObject* AllocateHashTable(
@@ -689,7 +837,7 @@
   // Allocate a block context.
   MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
                                                     Context* previous,
-                                                    SerializedScopeInfo* info);
+                                                    ScopeInfo* info);
 
   // Allocates a new utility object in the old generation.
   MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
@@ -738,13 +886,15 @@
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* NumberFromInt32(int32_t value);
+  MUST_USE_RESULT inline MaybeObject* NumberFromInt32(
+      int32_t value, PretenureFlag pretenure = NOT_TENURED);
 
   // Converts an int into either a Smi or a HeapNumber object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value);
+  MUST_USE_RESULT inline MaybeObject* NumberFromUint32(
+      uint32_t value, PretenureFlag pretenure = NOT_TENURED);
 
   // Allocates a new foreign object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -797,9 +947,9 @@
   // failed.
   // Please note this does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
-      ExternalAsciiString::Resource* resource);
+      const ExternalAsciiString::Resource* resource);
   MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
-      ExternalTwoByteString::Resource* resource);
+      const ExternalTwoByteString::Resource* resource);
 
   // Finalizes an external string by deleting the associated external
   // data and clearing the resource pointer.
@@ -878,19 +1028,41 @@
   // Performs garbage collection operation.
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
-  bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
+  bool CollectGarbage(AllocationSpace space,
+                      GarbageCollector collector,
+                      const char* gc_reason,
+                      const char* collector_reason);
 
   // Performs garbage collection operation.
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
-  inline bool CollectGarbage(AllocationSpace space);
+  inline bool CollectGarbage(AllocationSpace space,
+                             const char* gc_reason = NULL);
 
-  // Performs a full garbage collection. Force compaction if the
-  // parameter is true.
-  void CollectAllGarbage(bool force_compaction);
+  static const int kNoGCFlags = 0;
+  static const int kSweepPreciselyMask = 1;
+  static const int kReduceMemoryFootprintMask = 2;
+  static const int kAbortIncrementalMarkingMask = 4;
+
+  // Making the heap iterable requires us to sweep precisely and abort any
+  // incremental marking as well.
+  static const int kMakeHeapIterableMask =
+      kSweepPreciselyMask | kAbortIncrementalMarkingMask;
+
+  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
+  // non-zero, then the slower precise sweeper is used, which leaves the heap
+  // in a state where we can iterate over the heap visiting all objects.
+  void CollectAllGarbage(int flags, const char* gc_reason = NULL);
 
   // Last hope GC, should try to squeeze as much as possible.
-  void CollectAllAvailableGarbage();
+  void CollectAllAvailableGarbage(const char* gc_reason = NULL);
+
+  // Check whether the heap is currently iterable.
+  bool IsHeapIterable();
+
+  // Ensure that we have swept all spaces in such a way that we can iterate
+  // over all objects.  May cause a GC.
+  void EnsureHeapIsIterable();
 
   // Notify the heap that a context has been disposed.
   int NotifyContextDisposed() { return ++contexts_disposed_; }
@@ -899,6 +1071,20 @@
   // ensure correct callback for weak global handles.
   void PerformScavenge();
 
+  inline void increment_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_++;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
+  inline void decrement_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_--;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
   PromotionQueue* promotion_queue() { return &promotion_queue_; }
 
 #ifdef DEBUG
@@ -925,6 +1111,8 @@
 
   // Heap root getters.  We have versions with and without type::cast() here.
   // You can't use type::cast during GC because the assert fails.
+  // TODO(1490): Try removing the unchecked accessors, now that GC marking does
+  // not corrupt the map.
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
   type* name() {                                                               \
     return type::cast(roots_[k##camel_name##RootIndex]);                       \
@@ -958,6 +1146,9 @@
   }
   Object* global_contexts_list() { return global_contexts_list_; }
 
+  // Number of mark-sweeps.
+  int ms_count() { return ms_count_; }
+
   // Iterates over all roots in the heap.
   void IterateRoots(ObjectVisitor* v, VisitMode mode);
   // Iterates over all strong roots in the heap.
@@ -965,60 +1156,16 @@
   // Iterates over all the other roots in the heap.
   void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
 
-  enum ExpectedPageWatermarkState {
-    WATERMARK_SHOULD_BE_VALID,
-    WATERMARK_CAN_BE_INVALID
-  };
-
-  // For each dirty region on a page in use from an old space call
-  // visit_dirty_region callback.
-  // If either visit_dirty_region or callback can cause an allocation
-  // in old space and changes in allocation watermark then
-  // can_preallocate_during_iteration should be set to true.
-  // All pages will be marked as having invalid watermark upon
-  // iteration completion.
-  void IterateDirtyRegions(
-      PagedSpace* space,
-      DirtyRegionCallback visit_dirty_region,
-      ObjectSlotCallback callback,
-      ExpectedPageWatermarkState expected_page_watermark_state);
-
-  // Interpret marks as a bitvector of dirty marks for regions of size
-  // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
-  // memory interval from start to top. For each dirty region call a
-  // visit_dirty_region callback. Return updated bitvector of dirty marks.
-  uint32_t IterateDirtyRegions(uint32_t marks,
-                               Address start,
-                               Address end,
-                               DirtyRegionCallback visit_dirty_region,
-                               ObjectSlotCallback callback);
-
   // Iterate pointers to from semispace of new space found in memory interval
   // from start to end.
-  // Update dirty marks for page containing start address.
   void IterateAndMarkPointersToFromSpace(Address start,
                                          Address end,
                                          ObjectSlotCallback callback);
 
-  // Iterate pointers to new space found in memory interval from start to end.
-  // Return true if pointers to new space was found.
-  static bool IteratePointersInDirtyRegion(Heap* heap,
-                                           Address start,
-                                           Address end,
-                                           ObjectSlotCallback callback);
-
-
-  // Iterate pointers to new space found in memory interval from start to end.
-  // This interval is considered to belong to the map space.
-  // Return true if pointers to new space was found.
-  static bool IteratePointersInDirtyMapsRegion(Heap* heap,
-                                               Address start,
-                                               Address end,
-                                               ObjectSlotCallback callback);
-
-
   // Returns whether the object resides in new space.
   inline bool InNewSpace(Object* object);
+  inline bool InNewSpace(Address addr);
+  inline bool InNewSpacePage(Address addr);
   inline bool InFromSpace(Object* object);
   inline bool InToSpace(Object* object);
 
@@ -1057,11 +1204,19 @@
     roots_[kEmptyScriptRootIndex] = script;
   }
 
+  void public_set_store_buffer_top(Address* top) {
+    roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
+  }
+
   // Update the next script id.
   inline void SetLastScriptId(Object* last_script_id);
 
   // Generated code can embed this address to get access to the roots.
-  Object** roots_address() { return roots_; }
+  Object** roots_array_start() { return roots_; }
+
+  Address* store_buffer_top_address() {
+    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
+  }
 
   // Get address of global contexts list for serialization support.
   Object** global_contexts_list_address() {
@@ -1075,6 +1230,14 @@
   // Verify the heap is in its normal state before or after a GC.
   void Verify();
 
+  // Verify that AccessorPairs are not shared, i.e. make sure that they have
+  // exactly one pointer to them.
+  void VerifyNoAccessorPairSharing();
+
+  void OldPointerSpaceCheckStoreBuffer();
+  void MapSpaceCheckStoreBuffer();
+  void LargeObjectSpaceCheckStoreBuffer();
+
   // Report heap statistics.
   void ReportHeapStatistics(const char* title);
   void ReportCodeStatistics(const char* title);
@@ -1170,26 +1333,63 @@
   MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
                                                      PretenureFlag pretenure);
 
+  inline intptr_t PromotedTotalSize() {
+    return PromotedSpaceSize() + PromotedExternalMemorySize();
+  }
+
   // True if we have reached the allocation limit in the old generation that
   // should force the next GC (caused normally) to be a full one.
-  bool OldGenerationPromotionLimitReached() {
-    return (PromotedSpaceSize() + PromotedExternalMemorySize())
-           > old_gen_promotion_limit_;
+  inline bool OldGenerationPromotionLimitReached() {
+    return PromotedTotalSize() > old_gen_promotion_limit_;
   }
 
-  intptr_t OldGenerationSpaceAvailable() {
-    return old_gen_allocation_limit_ -
-           (PromotedSpaceSize() + PromotedExternalMemorySize());
+  inline intptr_t OldGenerationSpaceAvailable() {
+    return old_gen_allocation_limit_ - PromotedTotalSize();
   }
 
-  // True if we have reached the allocation limit in the old generation that
-  // should artificially cause a GC right now.
-  bool OldGenerationAllocationLimitReached() {
-    return OldGenerationSpaceAvailable() < 0;
+  inline intptr_t OldGenerationCapacityAvailable() {
+    return max_old_generation_size_ - PromotedTotalSize();
   }
 
-  // Can be called when the embedding application is idle.
-  bool IdleNotification();
+  static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
+  static const intptr_t kMinimumAllocationLimit =
+      8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
+
+  // When we sweep lazily we initially guess that there is no garbage on the
+  // heap and set the limits for the next GC accordingly.  As we sweep we find
+  // out that some of the pages contained garbage and we have to adjust
+  // downwards the size of the heap.  This means the limits that control the
+  // timing of the next GC also need to be adjusted downwards.
+  void LowerOldGenLimits(intptr_t adjustment) {
+    size_of_old_gen_at_last_old_space_gc_ -= adjustment;
+    old_gen_promotion_limit_ =
+        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+    old_gen_allocation_limit_ =
+        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+  }
+
+  intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
+    const int divisor = FLAG_stress_compaction ? 10 : 3;
+    intptr_t limit =
+        Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
+    limit += new_space_.Capacity();
+    limit *= old_gen_limit_factor_;
+    intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+    return Min(limit, halfway_to_the_max);
+  }
+
+  intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
+    const int divisor = FLAG_stress_compaction ? 8 : 2;
+    intptr_t limit =
+        Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
+    limit += new_space_.Capacity();
+    limit *= old_gen_limit_factor_;
+    intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+    return Min(limit, halfway_to_the_max);
+  }
+
+  // Implements the corresponding V8 API function.
+  bool IdleNotification(int hint);
 
   // Declare all the root indices.
   enum RootListIndex {
@@ -1213,6 +1413,8 @@
 
   MUST_USE_RESULT MaybeObject* NumberToString(
       Object* number, bool check_number_string_cache = true);
+  MUST_USE_RESULT MaybeObject* Uint32ToString(
+      uint32_t value, bool check_number_string_cache = true);
 
   Map* MapForExternalArrayType(ExternalArrayType array_type);
   RootListIndex RootIndexForExternalArrayType(
@@ -1224,31 +1426,48 @@
   // by pointer size.
   static inline void CopyBlock(Address dst, Address src, int byte_size);
 
-  inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                      Address src,
-                                                      int byte_size);
-
   // Optimized version of memmove for blocks with pointer size aligned sizes and
   // pointer size aligned addresses.
   static inline void MoveBlock(Address dst, Address src, int byte_size);
 
-  inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                      Address src,
-                                                      int byte_size);
-
   // Check new space expansion criteria and expand semispaces if it was hit.
   void CheckNewSpaceExpansionCriteria();
 
   inline void IncrementYoungSurvivorsCounter(int survived) {
+    ASSERT(survived >= 0);
     young_survivors_after_last_gc_ = survived;
     survived_since_last_expansion_ += survived;
   }
 
+  inline bool NextGCIsLikelyToBeFull() {
+    if (FLAG_gc_global) return true;
+
+    intptr_t total_promoted = PromotedTotalSize();
+
+    intptr_t adjusted_promotion_limit =
+        old_gen_promotion_limit_ - new_space_.Capacity();
+
+    if (total_promoted >= adjusted_promotion_limit) return true;
+
+    intptr_t adjusted_allocation_limit =
+        old_gen_allocation_limit_ - new_space_.Capacity() / 5;
+
+    if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
+
+    return false;
+  }
+
+
   void UpdateNewSpaceReferencesInExternalStringTable(
       ExternalStringTableUpdaterCallback updater_func);
 
+  void UpdateReferencesInExternalStringTable(
+      ExternalStringTableUpdaterCallback updater_func);
+
   void ProcessWeakReferences(WeakObjectRetainer* retainer);
 
+  void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
+
   // Helper function that governs the promotion policy from new space to
   // old.  If the object's old address lies below the new space's age
   // mark or if we've already filled the bottom 1/16th of the to space,
@@ -1261,8 +1480,19 @@
 
   void ClearNormalizedMapCaches();
 
+  // Clears the cache of ICs related to this map.
+  void ClearCacheOnMap(Map* map) {
+    if (FLAG_cleanup_code_caches_at_gc) {
+      map->ClearCodeCache(this);
+    }
+  }
+
   GCTracer* tracer() { return tracer_; }
 
+  // Returns the size of objects residing in non new spaces.
+  intptr_t PromotedSpaceSize();
+  intptr_t PromotedSpaceSizeOfObjects();
+
   double total_regexp_code_generated() { return total_regexp_code_generated_; }
   void IncreaseTotalRegexpCodeGenerated(int size) {
     total_regexp_code_generated_ += size;
@@ -1281,6 +1511,29 @@
     return &mark_compact_collector_;
   }
 
+  StoreBuffer* store_buffer() {
+    return &store_buffer_;
+  }
+
+  Marking* marking() {
+    return &marking_;
+  }
+
+  IncrementalMarking* incremental_marking() {
+    return &incremental_marking_;
+  }
+
+  bool IsSweepingComplete() {
+    return old_data_space()->IsSweepingComplete() &&
+           old_pointer_space()->IsSweepingComplete();
+  }
+
+  bool AdvanceSweepers(int step_size) {
+    bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
+    sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
+    return sweeping_complete;
+  }
+
   ExternalStringTable* external_string_table() {
     return &external_string_table_;
   }
@@ -1291,22 +1544,64 @@
   }
 
   inline Isolate* isolate();
-  bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
 
-  void CallGlobalGCPrologueCallback() {
+  inline void CallGlobalGCPrologueCallback() {
     if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
   }
 
-  void CallGlobalGCEpilogueCallback() {
+  inline void CallGlobalGCEpilogueCallback() {
     if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
   }
 
+  inline bool OldGenerationAllocationLimitReached();
+
+  inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+    scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+  }
+
+  void QueueMemoryChunkForFree(MemoryChunk* chunk);
+  void FreeQueuedChunks();
+
+  // Completely clear the Instanceof cache (to stop it keeping objects alive
+  // around a GC).
+  inline void CompletelyClearInstanceofCache();
+
+  // The roots that have an index less than this are always in old space.
+  static const int kOldSpaceRoots = 0x20;
+
+  bool idle_notification_will_schedule_next_gc() {
+    return idle_notification_will_schedule_next_gc_;
+  }
+
   uint32_t HashSeed() {
     uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
     ASSERT(FLAG_randomize_hashes || seed == 0);
     return seed;
   }
 
+  void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
+    ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
+    set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
+  }
+
+  void SetConstructStubDeoptPCOffset(int pc_offset) {
+    ASSERT(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
+    set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+  }
+
+  // For post mortem debugging.
+  void RememberUnmappedPage(Address page, bool compacted);
+
+  // Global inline caching age: it is incremented on some GCs after context
+  // disposal. We use it to flush inline caches.
+  int global_ic_age() {
+    return global_ic_age_;
+  }
+
+  void AgeInlineCaches() {
+    ++global_ic_age_;
+  }
+
  private:
   Heap();
 
@@ -1314,12 +1609,12 @@
   // more expedient to get at the isolate directly from within Heap methods.
   Isolate* isolate_;
 
+  intptr_t code_range_size_;
   int reserved_semispace_size_;
   int max_semispace_size_;
   int initial_semispace_size_;
   intptr_t max_old_generation_size_;
   intptr_t max_executable_size_;
-  intptr_t code_range_size_;
 
   // For keeping track of how much data has survived
   // scavenge since last new space expansion.
@@ -1334,6 +1629,10 @@
   // For keeping track of context disposals.
   int contexts_disposed_;
 
+  int global_ic_age_;
+
+  int scan_on_scavenge_pages_;
+
 #if defined(V8_TARGET_ARCH_X64)
   static const int kMaxObjectSizeInNewSpace = 1024*KB;
 #else
@@ -1350,21 +1649,25 @@
   HeapState gc_state_;
   int gc_post_processing_depth_;
 
-  // Returns the size of object residing in non new spaces.
-  intptr_t PromotedSpaceSize();
-
   // Returns the amount of external memory registered since last global gc.
   int PromotedExternalMemorySize();
 
-  int mc_count_;  // how many mark-compact collections happened
   int ms_count_;  // how many mark-sweep collections happened
   unsigned int gc_count_;  // how many gc happened
 
+  // For post mortem debugging.
+  static const int kRememberedUnmappedPages = 128;
+  int remembered_unmapped_pages_index_;
+  Address remembered_unmapped_pages_[kRememberedUnmappedPages];
+
   // Total length of the strings we failed to flatten since the last GC.
   int unflattened_strings_length_;
 
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
-  inline void set_##name(type* value) {                                 \
+  inline void set_##name(type* value) {                                        \
+    /* The deserializer makes use of the fact that these common roots are */   \
+    /* never in new space and never on a page that is being compacted.    */   \
+    ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value));  \
     roots_[k##camel_name##RootIndex] = value;                                  \
   }
   ROOT_LIST(ROOT_ACCESSOR)
@@ -1385,6 +1688,10 @@
   HeapDebugUtils* debug_utils_;
 #endif  // DEBUG
 
+  // Indicates that the new space should be kept small due to high promotion
+  // rates caused by the mutator allocating a lot of long-lived objects.
+  bool new_space_high_promotion_mode_active_;
+
   // Limit that triggers a global GC on the next (normally caused) GC.  This
   // is checked when we have already decided to do a GC to help determine
   // which collector to invoke.
@@ -1395,6 +1702,13 @@
   // every allocation in large object space.
   intptr_t old_gen_allocation_limit_;
 
+  // Sometimes the heuristics dictate that those limits are increased.  This
+  // variable records that fact.
+  int old_gen_limit_factor_;
+
+  // Used to adjust the limits that control the timing of the next GC.
+  intptr_t size_of_old_gen_at_last_old_space_gc_;
+
   // Limit on the amount of externally allocated memory allowed
   // between global GCs. If reached a global GC is forced.
   intptr_t external_allocation_limit_;
@@ -1414,6 +1728,8 @@
 
   Object* global_contexts_list_;
 
+  StoreBufferRebuilder store_buffer_rebuilder_;
+
   struct StringTypeTable {
     InstanceType type;
     int size;
@@ -1471,17 +1787,16 @@
   // Support for computing object sizes during GC.
   HeapObjectCallback gc_safe_size_of_old_object_;
   static int GcSafeSizeOfOldObject(HeapObject* object);
-  static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
 
   // Update the GC state. Called from the mark-compact collector.
   void MarkMapPointersAsEncoded(bool encoded) {
-    gc_safe_size_of_old_object_ = encoded
-        ? &GcSafeSizeOfOldObjectWithEncodedMap
-        : &GcSafeSizeOfOldObject;
+    ASSERT(!encoded);
+    gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
   }
 
   // Checks whether a global GC is necessary
-  GarbageCollector SelectGarbageCollector(AllocationSpace space);
+  GarbageCollector SelectGarbageCollector(AllocationSpace space,
+                                          const char** reason);
 
   // Performs garbage collection
   // Returns whether there is a chance another major GC could
@@ -1489,8 +1804,6 @@
   bool PerformGarbageCollection(GarbageCollector collector,
                                 GCTracer* tracer);
 
-  static const intptr_t kMinimumPromotionLimit = 2 * MB;
-  static const intptr_t kMinimumAllocationLimit = 8 * MB;
 
   inline void UpdateOldSpaceLimits();
 
@@ -1522,14 +1835,17 @@
                              Object* to_number,
                              byte kind);
 
+  // Allocate a JSArray with no elements
+  MUST_USE_RESULT MaybeObject* AllocateJSArray(
+      ElementsKind elements_kind,
+      PretenureFlag pretenure = NOT_TENURED);
+
   // Allocate empty fixed array.
   MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
 
   // Allocate empty fixed double array.
   MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
 
-  void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
-
   // Performs a minor collection in new generation.
   void Scavenge();
 
@@ -1538,16 +1854,15 @@
       Object** pointer);
 
   Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+  static void ScavengeStoreBufferCallback(Heap* heap,
+                                          MemoryChunk* page,
+                                          StoreBufferEvent event);
 
   // Performs a major collection in the whole heap.
   void MarkCompact(GCTracer* tracer);
 
   // Code to be run before and after mark-compact.
-  void MarkCompactPrologue(bool is_compacting);
-
-  // Completely clear the Instanceof cache (to stop it keeping objects alive
-  // around a GC).
-  inline void CompletelyClearInstanceofCache();
+  void MarkCompactPrologue();
 
   // Record statistics before and after garbage collection.
   void ReportStatisticsBeforeGC();
@@ -1557,12 +1872,11 @@
   static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
 
   // Initializes a function with a shared part and prototype.
-  // Returns the function.
   // Note: this code was factored out of AllocateFunction such that
   // other parts of the VM could use it. Specifically, a function that creates
   // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* InitializeFunction(
+  inline void InitializeFunction(
       JSFunction* function,
       SharedFunctionInfo* shared,
       Object* prototype);
@@ -1573,8 +1887,13 @@
   GCTracer* tracer_;
 
 
-  // Initializes the number to string cache based on the max semispace size.
-  MUST_USE_RESULT MaybeObject* InitializeNumberStringCache();
+  // Allocates a small number to string cache.
+  MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache();
+  // Creates and installs the full-sized number string cache.
+  void AllocateFullSizeNumberStringCache();
+  // Get the length of the number to string cache based on the max semispace
+  // size.
+  int FullSizeNumberStringCacheLength();
   // Flush the number to string cache.
   void FlushNumberStringCache();
 
@@ -1582,11 +1901,13 @@
 
   enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
 
-  static const int kYoungSurvivalRateThreshold = 90;
+  static const int kYoungSurvivalRateHighThreshold = 90;
+  static const int kYoungSurvivalRateLowThreshold = 10;
   static const int kYoungSurvivalRateAllowedDeviation = 15;
 
   int young_survivors_after_last_gc_;
   int high_survival_rate_period_length_;
+  int low_survival_rate_period_length_;
   double survival_rate_;
   SurvivalRateTrend previous_survival_rate_trend_;
   SurvivalRateTrend survival_rate_trend_;
@@ -1619,6 +1940,16 @@
     }
   }
 
+  bool IsStableOrDecreasingSurvivalTrend() {
+    switch (survival_rate_trend()) {
+      case STABLE:
+      case DECREASING:
+        return true;
+      default:
+        return false;
+    }
+  }
+
   bool IsIncreasingSurvivalTrend() {
     return survival_rate_trend() == INCREASING;
   }
@@ -1627,8 +1958,54 @@
     return high_survival_rate_period_length_ > 0;
   }
 
+  bool IsLowSurvivalRate() {
+    return low_survival_rate_period_length_ > 0;
+  }
+
+  void SelectScavengingVisitorsTable();
+
+  void StartIdleRound() {
+    mark_sweeps_since_idle_round_started_ = 0;
+    ms_count_at_last_idle_notification_ = ms_count_;
+  }
+
+  void FinishIdleRound() {
+    mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
+    scavenges_since_last_idle_round_ = 0;
+  }
+
+  bool EnoughGarbageSinceLastIdleRound() {
+    return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
+  }
+
+  bool WorthStartingGCWhenIdle() {
+    if (contexts_disposed_ > 0) {
+      return true;
+    }
+    return incremental_marking()->WorthActivating();
+  }
+
+  // Estimates how many milliseconds a Mark-Sweep would take to complete.
+  // In idle notification handler we assume that this function will return:
+  // - a number less than 10 for small heaps, which are less than 8Mb.
+  // - a number greater than 10 for large heaps, which are greater than 32Mb.
+  int TimeMarkSweepWouldTakeInMs() {
+    // Rough estimate of how many megabytes of heap can be processed in 1 ms.
+    static const int kMbPerMs = 2;
+
+    int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
+    return heap_size_mb / kMbPerMs;
+  }
+
+  // Returns true if no more GC work is left.
+  bool IdleGlobalGC();
+
+  void AdvanceIdleIncrementalMarking(intptr_t step_size);
+
+
   static const int kInitialSymbolTableSize = 2048;
   static const int kInitialEvalCacheSize = 64;
+  static const int kInitialNumberStringCacheSize = 256;
 
   // Maximum GC pause.
   int max_gc_pause_;
@@ -1646,25 +2023,37 @@
 
   MarkCompactCollector mark_compact_collector_;
 
-  // This field contains the meaning of the WATERMARK_INVALIDATED flag.
-  // Instead of clearing this flag from all pages we just flip
-  // its meaning at the beginning of a scavenge.
-  intptr_t page_watermark_invalidated_mark_;
+  StoreBuffer store_buffer_;
+
+  Marking marking_;
+
+  IncrementalMarking incremental_marking_;
 
   int number_idle_notifications_;
   unsigned int last_idle_notification_gc_count_;
   bool last_idle_notification_gc_count_init_;
 
+  bool idle_notification_will_schedule_next_gc_;
+  int mark_sweeps_since_idle_round_started_;
+  int ms_count_at_last_idle_notification_;
+  unsigned int gc_count_at_last_idle_gc_;
+  int scavenges_since_last_idle_round_;
+
+  static const int kMaxMarkSweepsInIdleRound = 7;
+  static const int kIdleScavengeThreshold = 5;
+
   // Shared state read by the scavenge collector and set by ScavengeObject.
   PromotionQueue promotion_queue_;
 
   // Flag is set when the heap has been configured.  The heap can be repeatedly
-  // configured through the API until it is setup.
+  // configured through the API until it is set up.
   bool configured_;
 
   ExternalStringTable external_string_table_;
 
-  bool is_safe_to_read_maps_;
+  VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+  MemoryChunk* chunks_queued_for_free_;
 
   friend class Factory;
   friend class GCTracer;
@@ -1716,32 +2105,15 @@
 
 class AlwaysAllocateScope {
  public:
-  AlwaysAllocateScope() {
-    // We shouldn't hit any nested scopes, because that requires
-    // non-handle code to call handle code. The code still works but
-    // performance will degrade, so we want to catch this situation
-    // in debug mode.
-    ASSERT(HEAP->always_allocate_scope_depth_ == 0);
-    HEAP->always_allocate_scope_depth_++;
-  }
-
-  ~AlwaysAllocateScope() {
-    HEAP->always_allocate_scope_depth_--;
-    ASSERT(HEAP->always_allocate_scope_depth_ == 0);
-  }
+  inline AlwaysAllocateScope();
+  inline ~AlwaysAllocateScope();
 };
 
 
 class LinearAllocationScope {
  public:
-  LinearAllocationScope() {
-    HEAP->linear_allocation_scope_depth_++;
-  }
-
-  ~LinearAllocationScope() {
-    HEAP->linear_allocation_scope_depth_--;
-    ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
-  }
+  inline LinearAllocationScope();
+  inline ~LinearAllocationScope();
 };
 
 
@@ -1753,38 +2125,7 @@
 // objects in a heap space but above the allocation pointer.
 class VerifyPointersVisitor: public ObjectVisitor {
  public:
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        HeapObject* object = HeapObject::cast(*current);
-        ASSERT(HEAP->Contains(object));
-        ASSERT(object->map()->IsMap());
-      }
-    }
-  }
-};
-
-
-// Visitor class to verify interior pointers in spaces that use region marks
-// to keep track of intergenerational references.
-// As VerifyPointersVisitor but also checks that dirty marks are set
-// for regions covering intergenerational references.
-class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
- public:
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        HeapObject* object = HeapObject::cast(*current);
-        ASSERT(HEAP->Contains(object));
-        ASSERT(object->map()->IsMap());
-        if (HEAP->InNewSpace(object)) {
-          ASSERT(HEAP->InToSpace(object));
-          Address addr = reinterpret_cast<Address>(current);
-          ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
-        }
-      }
-    }
-  }
+  inline void VisitPointers(Object** start, Object** end);
 };
 #endif
 
@@ -1860,7 +2201,6 @@
  public:
   enum HeapObjectsFiltering {
     kNoFiltering,
-    kFilterFreeListNodes,
     kFilterUnreachable
   };
 
@@ -1900,11 +2240,17 @@
   // Clear the cache.
   void Clear();
 
-  static const int kLength = 64;
+  static const int kLength = 256;
   static const int kCapacityMask = kLength - 1;
-  static const int kMapHashShift = 2;
+  static const int kMapHashShift = 5;
+  static const int kHashMask = -4;  // Zero the last two bits.
+  static const int kEntriesPerBucket = 4;
   static const int kNotFound = -1;
 
+  // kEntriesPerBucket should be a power of 2.
+  STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
+  STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
+
  private:
   KeyedLookupCache() {
     for (int i = 0; i < kLength; ++i) {
@@ -2005,6 +2351,18 @@
 };
 
 
+#ifdef DEBUG
+class DisallowAllocationFailure {
+ public:
+  inline DisallowAllocationFailure();
+  inline ~DisallowAllocationFailure();
+
+ private:
+  bool old_state_;
+};
+#endif
+
+
 // A helper class to document/test C++ scopes where we do not
 // expect a GC. Usage:
 //
@@ -2012,65 +2370,28 @@
 // { AssertNoAllocation nogc;
 //   ...
 // }
+class AssertNoAllocation {
+ public:
+  inline AssertNoAllocation();
+  inline ~AssertNoAllocation();
 
 #ifdef DEBUG
-
-class DisallowAllocationFailure {
- public:
-  DisallowAllocationFailure() {
-    old_state_ = HEAP->disallow_allocation_failure_;
-    HEAP->disallow_allocation_failure_ = true;
-  }
-  ~DisallowAllocationFailure() {
-    HEAP->disallow_allocation_failure_ = old_state_;
-  }
  private:
   bool old_state_;
-};
-
-class AssertNoAllocation {
- public:
-  AssertNoAllocation() {
-    old_state_ = HEAP->allow_allocation(false);
-  }
-
-  ~AssertNoAllocation() {
-    HEAP->allow_allocation(old_state_);
-  }
-
- private:
-  bool old_state_;
-};
-
-class DisableAssertNoAllocation {
- public:
-  DisableAssertNoAllocation() {
-    old_state_ = HEAP->allow_allocation(true);
-  }
-
-  ~DisableAssertNoAllocation() {
-    HEAP->allow_allocation(old_state_);
-  }
-
- private:
-  bool old_state_;
-};
-
-#else  // ndef DEBUG
-
-class AssertNoAllocation {
- public:
-  AssertNoAllocation() { }
-  ~AssertNoAllocation() { }
-};
-
-class DisableAssertNoAllocation {
- public:
-  DisableAssertNoAllocation() { }
-  ~DisableAssertNoAllocation() { }
-};
-
 #endif
+};
+
+
+class DisableAssertNoAllocation {
+ public:
+  inline DisableAssertNoAllocation();
+  inline ~DisableAssertNoAllocation();
+
+#ifdef DEBUG
+ private:
+  bool old_state_;
+#endif
+};
 
 // GCTracer collects and prints ONE line after each garbage collector
 // invocation IFF --trace_gc is used.
@@ -2084,7 +2405,13 @@
       MC_MARK,
       MC_SWEEP,
       MC_SWEEP_NEWSPACE,
-      MC_COMPACT,
+      MC_EVACUATE_PAGES,
+      MC_UPDATE_NEW_TO_NEW_POINTERS,
+      MC_UPDATE_ROOT_TO_NEW_POINTERS,
+      MC_UPDATE_OLD_TO_NEW_POINTERS,
+      MC_UPDATE_POINTERS_TO_EVACUATED,
+      MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
+      MC_UPDATE_MISC_POINTERS,
       MC_FLUSH_CODE,
       kNumberOfScopes
     };
@@ -2106,7 +2433,9 @@
     double start_time_;
   };
 
-  explicit GCTracer(Heap* heap);
+  explicit GCTracer(Heap* heap,
+                    const char* gc_reason,
+                    const char* collector_reason);
   ~GCTracer();
 
   // Sets the collector.
@@ -2118,16 +2447,6 @@
   // Sets the full GC count.
   void set_full_gc_count(int count) { full_gc_count_ = count; }
 
-  // Sets the flag that this is a compacting full GC.
-  void set_is_compacting() { is_compacting_ = true; }
-  bool is_compacting() const { return is_compacting_; }
-
-  // Increment and decrement the count of marked objects.
-  void increment_marked_count() { ++marked_count_; }
-  void decrement_marked_count() { --marked_count_; }
-
-  int marked_count() { return marked_count_; }
-
   void increment_promoted_objects_size(int object_size) {
     promoted_objects_size_ += object_size;
   }
@@ -2137,38 +2456,27 @@
   const char* CollectorString();
 
   // Returns size of object in heap (in MB).
-  double SizeOfHeapObjects() {
-    return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
-  }
+  inline double SizeOfHeapObjects();
 
-  double start_time_;  // Timestamp set in the constructor.
-  intptr_t start_size_;  // Size of objects in heap set in constructor.
-  GarbageCollector collector_;  // Type of collector.
+  // Timestamp set in the constructor.
+  double start_time_;
 
-  // A count (including this one, eg, the first collection is 1) of the
+  // Size of objects in heap set in constructor.
+  intptr_t start_object_size_;
+
+  // Size of memory allocated from OS set in constructor.
+  intptr_t start_memory_size_;
+
+  // Type of collector.
+  GarbageCollector collector_;
+
+  // A count (including this one, e.g. the first collection is 1) of the
   // number of garbage collections.
   unsigned int gc_count_;
 
   // A count (including this one) of the number of full garbage collections.
   int full_gc_count_;
 
-  // True if the current GC is a compacting full collection, false
-  // otherwise.
-  bool is_compacting_;
-
-  // True if the *previous* full GC cwas a compacting collection (will be
-  // false if there has not been a previous full GC).
-  bool previous_has_compacted_;
-
-  // On a full GC, a count of the number of marked objects.  Incremented
-  // when an object is marked and decremented when an object's mark bit is
-  // cleared.  Will be zero on a scavenge collection.
-  int marked_count_;
-
-  // The count from the end of the previous full GC.  Will be zero if there
-  // was no previous full GC.
-  int previous_marked_count_;
-
   // Amounts of time spent in different scopes during GC.
   double scopes_[Scope::kNumberOfScopes];
 
@@ -2187,7 +2495,17 @@
   // Size of objects promoted during the current collection.
   intptr_t promoted_objects_size_;
 
+  // Incremental marking steps counters.
+  int steps_count_;
+  double steps_took_;
+  double longest_step_;
+  int steps_count_since_last_gc_;
+  double steps_took_since_last_gc_;
+
   Heap* heap_;
+
+  const char* gc_reason_;
+  const char* collector_reason_;
 };
 
 
@@ -2298,6 +2616,46 @@
 };
 
 
+// Intrusive object marking uses least significant bit of
+// heap object's map word to mark objects.
+// Normally all map words have least significant bit set
+// because they contain tagged map pointer.
+// If the bit is not set object is marked.
+// All objects should be unmarked before resuming
+// JavaScript execution.
+class IntrusiveMarking {
+ public:
+  static bool IsMarked(HeapObject* object) {
+    return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
+  }
+
+  static void ClearMark(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
+    ASSERT(!IsMarked(object));
+  }
+
+  static void SetMark(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
+    ASSERT(IsMarked(object));
+  }
+
+  static Map* MapOfMarkedObject(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
+  }
+
+  static int SizeOfMarkedObject(HeapObject* object) {
+    return object->SizeFromMap(MapOfMarkedObject(object));
+  }
+
+ private:
+  static const uintptr_t kNotMarkedBit = 0x1;
+  STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
+};
+
+
 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
 // Helper class for tracing paths to a search target Object from all roots.
 // The TracePathFrom() method can be used to trace paths from a specific
@@ -2352,13 +2710,11 @@
 
   AssertNoAllocation no_alloc;  // i.e. no gc allowed.
 
+ private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
 };
 #endif  // DEBUG || LIVE_OBJECT_LIST
 
-
 } }  // namespace v8::internal
 
-#undef HEAP
-
 #endif  // V8_HEAP_H_
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 5630ce3..f698da4 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -67,6 +67,14 @@
 }
 
 
+int HValue::LoopWeight() const {
+  const int w = FLAG_loop_weight;
+  static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
+  return weights[Min(block()->LoopNestingDepth(),
+                     static_cast<int>(ARRAY_SIZE(weights)-1))];
+}
+
+
 void HValue::AssumeRepresentation(Representation r) {
   if (CheckFlag(kFlexibleRepresentation)) {
     ChangeRepresentation(r);
@@ -126,7 +134,9 @@
   bool may_overflow = false;  // Overflow is ignored here.
   lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
   upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
+#ifdef DEBUG
   Verify();
+#endif
 }
 
 
@@ -173,7 +183,9 @@
   lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
   upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
   KeepOrder();
+#ifdef DEBUG
   Verify();
+#endif
   return may_overflow;
 }
 
@@ -183,7 +195,9 @@
   lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
   upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
   KeepOrder();
+#ifdef DEBUG
   Verify();
+#endif
   return may_overflow;
 }
 
@@ -197,9 +211,11 @@
 }
 
 
+#ifdef DEBUG
 void Range::Verify() const {
   ASSERT(lower_ <= upper_);
 }
+#endif
 
 
 bool Range::MulAndCheckOverflow(Range* other) {
@@ -210,7 +226,9 @@
   int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
   lower_ = Min(Min(v1, v2), Min(v3, v4));
   upper_ = Max(Max(v1, v2), Max(v3, v4));
+#ifdef DEBUG
   Verify();
+#endif
   return may_overflow;
 }
 
@@ -234,25 +252,6 @@
 }
 
 
-const char* HType::ToShortString() {
-  switch (type_) {
-    case kTagged: return "t";
-    case kTaggedPrimitive: return "p";
-    case kTaggedNumber: return "n";
-    case kSmi: return "m";
-    case kHeapNumber: return "h";
-    case kString: return "s";
-    case kBoolean: return "b";
-    case kNonPrimitive: return "r";
-    case kJSArray: return "a";
-    case kJSObject: return "o";
-    case kUninitialized: return "z";
-  }
-  UNREACHABLE();
-  return "Unreachable code";
-}
-
-
 HType HType::TypeFromValue(Handle<Object> value) {
   HType result = HType::Tagged();
   if (value->IsSmi()) {
@@ -277,6 +276,23 @@
 }
 
 
+HUseListNode* HUseListNode::tail() {
+  // Skip and remove dead items in the use list.
+  while (tail_ != NULL && tail_->value()->CheckFlag(HValue::kIsDead)) {
+    tail_ = tail_->tail_;
+  }
+  return tail_;
+}
+
+
+bool HValue::CheckUsesForFlag(Flag f) {
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    if (!it.value()->CheckFlag(f)) return false;
+  }
+  return true;
+}
+
+
 HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
   Advance();
 }
@@ -375,7 +391,7 @@
   // We replace all uses first, so Delete can assert that there are none.
   if (other != NULL) ReplaceAllUsesWith(other);
   ASSERT(HasNoUses());
-  ClearOperands();
+  Kill();
   DeleteFromGraph();
 }
 
@@ -393,9 +409,17 @@
 }
 
 
-void HValue::ClearOperands() {
+void HValue::Kill() {
+  // Instead of going through the entire use list of each operand, we only
+  // check the first item in each use list and rely on the tail() method to
+  // skip dead items, removing them lazily next time we traverse the list.
+  SetFlag(kIsDead);
   for (int i = 0; i < OperandCount(); ++i) {
-    SetOperandAt(i, NULL);
+    HValue* operand = OperandAt(i);
+    HUseListNode* first = operand->use_list_;
+    if (first != NULL && first->value() == this && first->index() == i) {
+      operand->use_list_ = first->tail();
+    }
   }
 }
 
@@ -425,18 +449,18 @@
 
 
 void HValue::PrintChangesTo(StringStream* stream) {
-  int changes_flags = ChangesFlags();
-  if (changes_flags == 0) return;
+  GVNFlagSet changes_flags = ChangesFlags();
+  if (changes_flags.IsEmpty()) return;
   stream->Add(" changes[");
-  if (changes_flags == AllSideEffects()) {
+  if (changes_flags == AllSideEffectsFlagSet()) {
     stream->Add("*");
   } else {
     bool add_comma = false;
-#define PRINT_DO(type)                         \
-    if (changes_flags & (1 << kChanges##type)) { \
-      if (add_comma) stream->Add(",");           \
-      add_comma = true;                          \
-      stream->Add(#type);                        \
+#define PRINT_DO(type)                            \
+    if (changes_flags.Contains(kChanges##type)) { \
+      if (add_comma) stream->Add(",");            \
+      add_comma = true;                           \
+      stream->Add(#type);                         \
     }
     GVN_FLAG_LIST(PRINT_DO);
 #undef PRINT_DO
@@ -479,9 +503,9 @@
 }
 
 
-void HValue::AddNewRange(Range* r) {
-  if (!HasRange()) ComputeInitialRange();
-  if (!HasRange()) range_ = new Range();
+void HValue::AddNewRange(Range* r, Zone* zone) {
+  if (!HasRange()) ComputeInitialRange(zone);
+  if (!HasRange()) range_ = new(zone) Range();
   ASSERT(HasRange());
   r->StackUpon(range_);
   range_ = r;
@@ -495,9 +519,9 @@
 }
 
 
-void HValue::ComputeInitialRange() {
+void HValue::ComputeInitialRange(Zone* zone) {
   ASSERT(!HasRange());
-  range_ = InferRange();
+  range_ = InferRange(zone);
   ASSERT(HasRange());
 }
 
@@ -564,7 +588,7 @@
   // followed by a simulate instruction, we need to insert after the
   // simulate instruction instead.
   HInstruction* next = previous->next_;
-  if (previous->HasSideEffects() && next != NULL) {
+  if (previous->HasObservableSideEffects() && next != NULL) {
     ASSERT(next->IsSimulate());
     previous = next;
     next = previous->next_;
@@ -587,11 +611,10 @@
     HBasicBlock* other_block = other_operand->block();
     if (cur_block == other_block) {
       if (!other_operand->IsPhi()) {
-        HInstruction* cur = cur_block->first();
+        HInstruction* cur = this->previous();
         while (cur != NULL) {
-          ASSERT(cur != this);  // We should reach other_operand before!
           if (cur == other_operand) break;
-          cur = cur->next();
+          cur = cur->previous();
         }
         // Must reach other operand in the same block!
         ASSERT(cur == other_operand);
@@ -605,7 +628,7 @@
 
   // Verify that instructions that may have side-effects are followed
   // by a simulate instruction.
-  if (HasSideEffects() && !IsOsrEntry()) {
+  if (HasObservableSideEffects() && !IsOsrEntry()) {
     ASSERT(next()->IsSimulate());
   }
 
@@ -707,6 +730,14 @@
 }
 
 
+void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
+  value()->PrintNameTo(stream);
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
 void HReturn::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
 }
@@ -775,17 +806,106 @@
 
 void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
-  stream->Add(" == ");
-  stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
+  stream->Add(" == %o", *type_literal_);
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
+void HCheckMapValue::PrintDataTo(StringStream* stream) {
+  value()->PrintNameTo(stream);
+  stream->Add(" ");
+  map()->PrintNameTo(stream);
+}
+
+
+void HForInPrepareMap::PrintDataTo(StringStream* stream) {
+  enumerable()->PrintNameTo(stream);
+}
+
+
+void HForInCacheArray::PrintDataTo(StringStream* stream) {
+  enumerable()->PrintNameTo(stream);
+  stream->Add(" ");
+  map()->PrintNameTo(stream);
+  stream->Add("[%d]", idx_);
+}
+
+
+void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
+  object()->PrintNameTo(stream);
+  stream->Add(" ");
+  index()->PrintNameTo(stream);
+}
+
+
+HValue* HConstant::Canonicalize() {
+  return HasNoUses() ? NULL : this;
+}
+
+
+HValue* HTypeof::Canonicalize() {
+  return HasNoUses() ? NULL : this;
+}
+
+
+HValue* HBitwise::Canonicalize() {
+  if (!representation().IsInteger32()) return this;
+  // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
+  int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
+  if (left()->IsConstant() &&
+      HConstant::cast(left())->HasInteger32Value() &&
+      HConstant::cast(left())->Integer32Value() == nop_constant) {
+    return right();
+  }
+  if (right()->IsConstant() &&
+      HConstant::cast(right())->HasInteger32Value() &&
+      HConstant::cast(right())->Integer32Value() == nop_constant) {
+    return left();
+  }
+  return this;
+}
+
+
+HValue* HAdd::Canonicalize() {
+  if (!representation().IsInteger32()) return this;
+  if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
+  return this;
+}
+
+
+HValue* HSub::Canonicalize() {
+  if (!representation().IsInteger32()) return this;
+  if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
+  return this;
+}
+
+
+HValue* HChange::Canonicalize() {
+  return (from().Equals(to())) ? value() : this;
+}
+
+
+HValue* HWrapReceiver::Canonicalize() {
+  if (HasNoUses()) return NULL;
+  if (receiver()->type().IsJSObject()) {
+    return receiver();
+  }
+  return this;
+}
+
+
+void HTypeof::PrintDataTo(StringStream* stream) {
+  value()->PrintNameTo(stream);
 }
 
 
 void HChange::PrintDataTo(StringStream* stream) {
   HUnaryOperation::PrintDataTo(stream);
-  stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
+  stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
 
   if (CanTruncateToInt32()) stream->Add(" truncating-int32");
   if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
+  if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
 }
 
 
@@ -848,6 +968,13 @@
 void HCheckMap::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
   stream->Add(" %p", *map());
+  if (mode() == REQUIRE_EXACT_MAP) {
+    stream->Add(" [EXACT]");
+  } else if (!has_element_transitions_) {
+    stream->Add(" [EXACT*]");
+  } else {
+    stream->Add(" [MATCH ELEMENTS]");
+  }
 }
 
 
@@ -857,6 +984,23 @@
 }
 
 
+const char* HCheckInstanceType::GetCheckName() {
+  switch (check_) {
+    case IS_SPEC_OBJECT: return "object";
+    case IS_JS_ARRAY: return "array";
+    case IS_STRING: return "string";
+    case IS_SYMBOL: return "symbol";
+  }
+  UNREACHABLE();
+  return "";
+}
+
+void HCheckInstanceType::PrintDataTo(StringStream* stream) {
+  stream->Add("%s ", GetCheckName());
+  HUnaryOperation::PrintDataTo(stream);
+}
+
+
 void HCallStub::PrintDataTo(StringStream* stream) {
   stream->Add("%s ",
               CodeStub::MajorName(major_key_, false));
@@ -873,15 +1017,15 @@
 }
 
 
-Range* HValue::InferRange() {
+Range* HValue::InferRange(Zone* zone) {
   // Untagged integer32 cannot be -0, all other representations can.
-  Range* result = new Range();
+  Range* result = new(zone) Range();
   result->set_can_be_minus_zero(!representation().IsInteger32());
   return result;
 }
 
 
-Range* HChange::InferRange() {
+Range* HChange::InferRange(Zone* zone) {
   Range* input_range = value()->range();
   if (from().IsInteger32() &&
       to().IsTagged() &&
@@ -889,46 +1033,46 @@
     set_type(HType::Smi());
   }
   Range* result = (input_range != NULL)
-      ? input_range->Copy()
-      : HValue::InferRange();
+      ? input_range->Copy(zone)
+      : HValue::InferRange(zone);
   if (to().IsInteger32()) result->set_can_be_minus_zero(false);
   return result;
 }
 
 
-Range* HConstant::InferRange() {
+Range* HConstant::InferRange(Zone* zone) {
   if (has_int32_value_) {
-    Range* result = new Range(int32_value_, int32_value_);
+    Range* result = new(zone) Range(int32_value_, int32_value_);
     result->set_can_be_minus_zero(false);
     return result;
   }
-  return HValue::InferRange();
+  return HValue::InferRange(zone);
 }
 
 
-Range* HPhi::InferRange() {
+Range* HPhi::InferRange(Zone* zone) {
   if (representation().IsInteger32()) {
     if (block()->IsLoopHeader()) {
-      Range* range = new Range(kMinInt, kMaxInt);
+      Range* range = new(zone) Range(kMinInt, kMaxInt);
       return range;
     } else {
-      Range* range = OperandAt(0)->range()->Copy();
+      Range* range = OperandAt(0)->range()->Copy(zone);
       for (int i = 1; i < OperandCount(); ++i) {
         range->Union(OperandAt(i)->range());
       }
       return range;
     }
   } else {
-    return HValue::InferRange();
+    return HValue::InferRange(zone);
   }
 }
 
 
-Range* HAdd::InferRange() {
+Range* HAdd::InferRange(Zone* zone) {
   if (representation().IsInteger32()) {
     Range* a = left()->range();
     Range* b = right()->range();
-    Range* res = a->Copy();
+    Range* res = a->Copy(zone);
     if (!res->AddAndCheckOverflow(b)) {
       ClearFlag(kCanOverflow);
     }
@@ -936,32 +1080,32 @@
     res->set_can_be_minus_zero(m0);
     return res;
   } else {
-    return HValue::InferRange();
+    return HValue::InferRange(zone);
   }
 }
 
 
-Range* HSub::InferRange() {
+Range* HSub::InferRange(Zone* zone) {
   if (representation().IsInteger32()) {
     Range* a = left()->range();
     Range* b = right()->range();
-    Range* res = a->Copy();
+    Range* res = a->Copy(zone);
     if (!res->SubAndCheckOverflow(b)) {
       ClearFlag(kCanOverflow);
     }
     res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
     return res;
   } else {
-    return HValue::InferRange();
+    return HValue::InferRange(zone);
   }
 }
 
 
-Range* HMul::InferRange() {
+Range* HMul::InferRange(Zone* zone) {
   if (representation().IsInteger32()) {
     Range* a = left()->range();
     Range* b = right()->range();
-    Range* res = a->Copy();
+    Range* res = a->Copy(zone);
     if (!res->MulAndCheckOverflow(b)) {
       ClearFlag(kCanOverflow);
     }
@@ -970,14 +1114,14 @@
     res->set_can_be_minus_zero(m0);
     return res;
   } else {
-    return HValue::InferRange();
+    return HValue::InferRange(zone);
   }
 }
 
 
-Range* HDiv::InferRange() {
+Range* HDiv::InferRange(Zone* zone) {
   if (representation().IsInteger32()) {
-    Range* result = new Range();
+    Range* result = new(zone) Range();
     if (left()->range()->CanBeMinusZero()) {
       result->set_can_be_minus_zero(true);
     }
@@ -995,15 +1139,15 @@
     }
     return result;
   } else {
-    return HValue::InferRange();
+    return HValue::InferRange(zone);
   }
 }
 
 
-Range* HMod::InferRange() {
+Range* HMod::InferRange(Zone* zone) {
   if (representation().IsInteger32()) {
     Range* a = left()->range();
-    Range* result = new Range();
+    Range* result = new(zone) Range();
     if (a->CanBeMinusZero() || a->CanBeNegative()) {
       result->set_can_be_minus_zero(true);
     }
@@ -1012,7 +1156,7 @@
     }
     return result;
   } else {
-    return HValue::InferRange();
+    return HValue::InferRange(zone);
   }
 }
 
@@ -1085,7 +1229,7 @@
     HValue* value = it.value();
     if (!value->IsPhi()) {
       Representation rep = value->RequiredInputRepresentation(it.index());
-      ++non_phi_uses_[rep.kind()];
+      non_phi_uses_[rep.kind()] += value->LoopWeight();
     }
   }
 }
@@ -1106,15 +1250,16 @@
 
 
 void HSimulate::PrintDataTo(StringStream* stream) {
-  stream->Add("id=%d ", ast_id());
-  if (pop_count_ > 0) stream->Add("pop %d", pop_count_);
+  stream->Add("id=%d", ast_id());
+  if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
   if (values_.length() > 0) {
     if (pop_count_ > 0) stream->Add(" /");
     for (int i = 0; i < values_.length(); ++i) {
-      if (!HasAssignedIndexAt(i)) {
-        stream->Add(" push ");
-      } else {
+      if (i > 0) stream->Add(",");
+      if (HasAssignedIndexAt(i)) {
         stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
+      } else {
+        stream->Add(" push ");
       }
       values_[i]->PrintNameTo(stream);
     }
@@ -1195,7 +1340,9 @@
 
 
 bool HArrayLiteral::IsCopyOnWrite() const {
-  return constant_elements()->map() == HEAP->fixed_cow_array_map();
+  if (!boilerplate_object_->IsJSObject()) return false;
+  return Handle<JSObject>::cast(boilerplate_object_)->elements()->map() ==
+      HEAP->fixed_cow_array_map();
 }
 
 
@@ -1208,51 +1355,41 @@
 }
 
 
-Range* HBitAnd::InferRange() {
+Range* HBitwise::InferRange(Zone* zone) {
+  if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
+  const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
   int32_t left_mask = (left()->range() != NULL)
       ? left()->range()->Mask()
-      : 0xffffffff;
+      : kDefaultMask;
   int32_t right_mask = (right()->range() != NULL)
       ? right()->range()->Mask()
-      : 0xffffffff;
-  int32_t result_mask = left_mask & right_mask;
+      : kDefaultMask;
+  int32_t result_mask = (op() == Token::BIT_AND)
+      ? left_mask & right_mask
+      : left_mask | right_mask;
   return (result_mask >= 0)
-      ? new Range(0, result_mask)
-      : HValue::InferRange();
+      ? new(zone) Range(0, result_mask)
+      : HValue::InferRange(zone);
 }
 
 
-Range* HBitOr::InferRange() {
-  int32_t left_mask = (left()->range() != NULL)
-      ? left()->range()->Mask()
-      : 0xffffffff;
-  int32_t right_mask = (right()->range() != NULL)
-      ? right()->range()->Mask()
-      : 0xffffffff;
-  int32_t result_mask = left_mask | right_mask;
-  return (result_mask >= 0)
-      ? new Range(0, result_mask)
-      : HValue::InferRange();
-}
-
-
-Range* HSar::InferRange() {
+Range* HSar::InferRange(Zone* zone) {
   if (right()->IsConstant()) {
     HConstant* c = HConstant::cast(right());
     if (c->HasInteger32Value()) {
       Range* result = (left()->range() != NULL)
-          ? left()->range()->Copy()
-          : new Range();
+          ? left()->range()->Copy(zone)
+          : new(zone) Range();
       result->Sar(c->Integer32Value());
       result->set_can_be_minus_zero(false);
       return result;
     }
   }
-  return HValue::InferRange();
+  return HValue::InferRange(zone);
 }
 
 
-Range* HShr::InferRange() {
+Range* HShr::InferRange(Zone* zone) {
   if (right()->IsConstant()) {
     HConstant* c = HConstant::cast(right());
     if (c->HasInteger32Value()) {
@@ -1260,39 +1397,57 @@
       if (left()->range()->CanBeNegative()) {
         // Only compute bounds if the result always fits into an int32.
         return (shift_count >= 1)
-            ? new Range(0, static_cast<uint32_t>(0xffffffff) >> shift_count)
-            : new Range();
+            ? new(zone) Range(0,
+                              static_cast<uint32_t>(0xffffffff) >> shift_count)
+            : new(zone) Range();
       } else {
         // For positive inputs we can use the >> operator.
         Range* result = (left()->range() != NULL)
-            ? left()->range()->Copy()
-            : new Range();
+            ? left()->range()->Copy(zone)
+            : new(zone) Range();
         result->Sar(c->Integer32Value());
         result->set_can_be_minus_zero(false);
         return result;
       }
     }
   }
-  return HValue::InferRange();
+  return HValue::InferRange(zone);
 }
 
 
-Range* HShl::InferRange() {
+Range* HShl::InferRange(Zone* zone) {
   if (right()->IsConstant()) {
     HConstant* c = HConstant::cast(right());
     if (c->HasInteger32Value()) {
       Range* result = (left()->range() != NULL)
-          ? left()->range()->Copy()
-          : new Range();
+          ? left()->range()->Copy(zone)
+          : new(zone) Range();
       result->Shl(c->Integer32Value());
       result->set_can_be_minus_zero(false);
       return result;
     }
   }
-  return HValue::InferRange();
+  return HValue::InferRange(zone);
 }
 
 
+Range* HLoadKeyedSpecializedArrayElement::InferRange(Zone* zone) {
+  switch (elements_kind()) {
+    case EXTERNAL_PIXEL_ELEMENTS:
+      return new(zone) Range(0, 255);
+    case EXTERNAL_BYTE_ELEMENTS:
+      return new(zone) Range(-128, 127);
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      return new(zone) Range(0, 255);
+    case EXTERNAL_SHORT_ELEMENTS:
+      return new(zone) Range(-32768, 32767);
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      return new(zone) Range(0, 65535);
+    default:
+      return HValue::InferRange(zone);
+  }
+}
+
 
 void HCompareGeneric::PrintDataTo(StringStream* stream) {
   stream->Add(Token::Name(token()));
@@ -1301,6 +1456,13 @@
 }
 
 
+void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add(Token::Name(token()));
+  stream->Add(" ");
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
 void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add(Token::Name(token()));
   stream->Add(" ");
@@ -1311,6 +1473,14 @@
 }
 
 
+void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
+  left()->PrintNameTo(stream);
+  stream->Add(" ");
+  right()->PrintNameTo(stream);
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
 void HGoto::PrintDataTo(StringStream* stream) {
   stream->Add("B%d", SuccessorAt(0)->block_id());
 }
@@ -1319,7 +1489,22 @@
 void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
   input_representation_ = r;
   if (r.IsDouble()) {
-    SetFlag(kDeoptimizeOnUndefined);
+    // According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
+    // and !=) have special handling of undefined, e.g. undefined == undefined
+    // is 'true'. Relational comparisons have a different semantic, first
+    // calling ToPrimitive() on their arguments.  The standard Crankshaft
+    // tagged-to-double conversion to ensure the HCompareIDAndBranch's inputs
+    // are doubles caused 'undefined' to be converted to NaN. That's compatible
+    // out-of-the box with ordered relational comparisons (<, >, <=,
+    // >=). However, for equality comparisons (and for 'in' and 'instanceof'),
+    // it is not consistent with the spec. For example, it would cause undefined
+    // == undefined (should be true) to be evaluated as NaN == NaN
+    // (false). Therefore, any comparisons other than ordered relational
+    // comparisons must cause a deopt when one of their arguments is undefined.
+    // See also v8:1434
+    if (!Token::IsOrderedRelationalCompareOp(token_)) {
+      SetFlag(kDeoptimizeOnUndefined);
+    }
   } else {
     ASSERT(r.IsInteger32());
   }
@@ -1347,21 +1532,21 @@
   SetOperandAt(0, context);
   SetOperandAt(1, object);
   set_representation(Representation::Tagged());
-  SetFlag(kDependsOnMaps);
+  SetGVNFlag(kDependsOnMaps);
   for (int i = 0;
        i < types->length() && types_.length() < kMaxLoadPolymorphism;
        ++i) {
     Handle<Map> map = types->at(i);
-    LookupResult lookup;
+    LookupResult lookup(map->GetIsolate());
     map->LookupInDescriptors(NULL, *name, &lookup);
-    if (lookup.IsProperty()) {
+    if (lookup.IsFound()) {
       switch (lookup.type()) {
         case FIELD: {
           int index = lookup.GetLocalFieldIndexFromMap(*map);
           if (index < 0) {
-            SetFlag(kDependsOnInobjectFields);
+            SetGVNFlag(kDependsOnInobjectFields);
           } else {
-            SetFlag(kDependsOnBackingStoreFields);
+            SetGVNFlag(kDependsOnBackingStoreFields);
           }
           types_.Add(types->at(i));
           break;
@@ -1405,14 +1590,14 @@
 
 void HLoadNamedFieldPolymorphic::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
-  stream->Add(" .");
+  stream->Add(".");
   stream->Add(*String::cast(*name())->ToCString());
 }
 
 
 void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
-  stream->Add(" .");
+  stream->Add(".");
   stream->Add(*String::cast(*name())->ToCString());
 }
 
@@ -1425,11 +1610,16 @@
 }
 
 
-bool HLoadKeyedFastElement::RequiresHoleCheck() const {
+bool HLoadKeyedFastElement::RequiresHoleCheck() {
+  if (hole_check_mode_ == OMIT_HOLE_CHECK) {
+    return false;
+  }
+
   for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
     HValue* use = it.value();
     if (!use->IsChange()) return true;
   }
+
   return false;
 }
 
@@ -1442,11 +1632,6 @@
 }
 
 
-bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
-  return true;
-}
-
-
 void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add("[");
@@ -1455,6 +1640,39 @@
 }
 
 
+HValue* HLoadKeyedGeneric::Canonicalize() {
+  // Recognize generic keyed loads that use property name generated
+  // by for-in statement as a key and rewrite them into fast property load
+  // by index.
+  if (key()->IsLoadKeyedFastElement()) {
+    HLoadKeyedFastElement* key_load = HLoadKeyedFastElement::cast(key());
+    if (key_load->object()->IsForInCacheArray()) {
+      HForInCacheArray* names_cache =
+          HForInCacheArray::cast(key_load->object());
+
+      if (names_cache->enumerable() == object()) {
+        HForInCacheArray* index_cache =
+            names_cache->index_cache();
+        HCheckMapValue* map_check =
+            new(block()->zone()) HCheckMapValue(object(), names_cache->map());
+        HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
+            index_cache,
+            key_load->key(),
+            HLoadKeyedFastElement::OMIT_HOLE_CHECK);
+        HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
+            object(), index);
+        map_check->InsertBefore(this);
+        index->InsertBefore(this);
+        load->InsertBefore(this);
+        return load;
+      }
+    }
+  }
+
+  return this;
+}
+
+
 void HLoadKeyedSpecializedArrayElement::PrintDataTo(
     StringStream* stream) {
   external_pointer()->PrintNameTo(stream);
@@ -1488,6 +1706,7 @@
       stream->Add("pixel");
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -1513,10 +1732,10 @@
 void HStoreNamedField::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add(".");
-  ASSERT(name()->IsString());
   stream->Add(*String::cast(*name())->ToCString());
   stream->Add(" = ");
   value()->PrintNameTo(stream);
+  stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
   if (!transition().is_null()) {
     stream->Add(" (transition map %p)", *transition());
   }
@@ -1582,6 +1801,7 @@
     case EXTERNAL_PIXEL_ELEMENTS:
       stream->Add("pixel");
       break;
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
@@ -1596,9 +1816,26 @@
 }
 
 
+void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintNameTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
 void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p]", *cell());
-  if (check_hole_value()) stream->Add(" (deleteable/read-only)");
+  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
+  if (details_.IsReadOnly()) stream->Add(" (read-only)");
+}
+
+
+bool HLoadGlobalCell::RequiresHoleCheck() {
+  if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    if (!use->IsChange()) return true;
+  }
+  return false;
 }
 
 
@@ -1610,6 +1847,8 @@
 void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p] = ", *cell());
   value()->PrintNameTo(stream);
+  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
+  if (details_.IsReadOnly()) stream->Add(" (read-only)");
 }
 
 
@@ -1696,6 +1935,12 @@
 }
 
 
+HType HChange::CalculateInferredType() {
+  if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber();
+  return type();
+}
+
+
 HType HBitwiseBinaryOperation::CalculateInferredType() {
   return HType::TaggedNumber();
 }
@@ -1711,21 +1956,6 @@
 }
 
 
-HType HBitAnd::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
-HType HBitXor::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
-HType HBitOr::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
 HType HBitNot::CalculateInferredType() {
   return HType::TaggedNumber();
 }
@@ -1736,18 +1966,39 @@
 }
 
 
-HType HShl::CalculateInferredType() {
-  return HType::TaggedNumber();
+HType HStringCharFromCode::CalculateInferredType() {
+  return HType::String();
 }
 
 
-HType HShr::CalculateInferredType() {
-  return HType::TaggedNumber();
+HType HAllocateObject::CalculateInferredType() {
+  return HType::JSObject();
 }
 
 
-HType HSar::CalculateInferredType() {
-  return HType::TaggedNumber();
+HType HFastLiteral::CalculateInferredType() {
+  // TODO(mstarzinger): Be smarter, could also be JSArray here.
+  return HType::JSObject();
+}
+
+
+HType HArrayLiteral::CalculateInferredType() {
+  return HType::JSArray();
+}
+
+
+HType HObjectLiteral::CalculateInferredType() {
+  return HType::JSObject();
+}
+
+
+HType HRegExpLiteral::CalculateInferredType() {
+  return HType::JSObject();
+}
+
+
+HType HFunctionLiteral::CalculateInferredType() {
+  return HType::JSObject();
 }
 
 
@@ -1838,6 +2089,167 @@
 }
 
 
+#define H_CONSTANT_INT32(val)                                                  \
+new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED),                   \
+                    Representation::Integer32())
+#define H_CONSTANT_DOUBLE(val)                                                 \
+new(zone) HConstant(FACTORY->NewNumber(val, TENURED),                          \
+                    Representation::Double())
+
+#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op)                       \
+HInstruction* HInstr::New##HInstr(Zone* zone,                                  \
+                                  HValue* context,                             \
+                                  HValue* left,                                \
+                                  HValue* right) {                             \
+  if (left->IsConstant() && right->IsConstant()) {                             \
+    HConstant* c_left = HConstant::cast(left);                                 \
+    HConstant* c_right = HConstant::cast(right);                               \
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {             \
+      double double_res = c_left->DoubleValue() op c_right->DoubleValue();     \
+      if (TypeInfo::IsInt32Double(double_res)) {                               \
+        return H_CONSTANT_INT32(static_cast<int32_t>(double_res));             \
+      }                                                                        \
+      return H_CONSTANT_DOUBLE(double_res);                                    \
+    }                                                                          \
+  }                                                                            \
+  return new(zone) HInstr(context, left, right);                               \
+}
+
+
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *)
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
+
+#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
+
+
+HInstruction* HMod::NewHMod(Zone* zone,
+                            HValue* context,
+                            HValue* left,
+                            HValue* right) {
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
+      int32_t dividend = c_left->Integer32Value();
+      int32_t divisor = c_right->Integer32Value();
+      if (divisor != 0) {
+        int32_t res = dividend % divisor;
+        if ((res == 0) && (dividend < 0)) {
+          return H_CONSTANT_DOUBLE(-0.0);
+        }
+        return H_CONSTANT_INT32(res);
+      }
+    }
+  }
+  return new(zone) HMod(context, left, right);
+}
+
+
+HInstruction* HDiv::NewHDiv(Zone* zone,
+                            HValue* context,
+                            HValue* left,
+                            HValue* right) {
+  // If left and right are constant values, try to return a constant value.
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      if (c_right->DoubleValue() != 0) {
+        double double_res = c_left->DoubleValue() / c_right->DoubleValue();
+        if (TypeInfo::IsInt32Double(double_res)) {
+          return H_CONSTANT_INT32(static_cast<int32_t>(double_res));
+        }
+        return H_CONSTANT_DOUBLE(double_res);
+      }
+    }
+  }
+  return new(zone) HDiv(context, left, right);
+}
+
+
+HInstruction* HBitwise::NewHBitwise(Zone* zone,
+                                    Token::Value op,
+                                    HValue* context,
+                                    HValue* left,
+                                    HValue* right) {
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      int32_t result;
+      int32_t v_left = c_left->NumberValueAsInteger32();
+      int32_t v_right = c_right->NumberValueAsInteger32();
+      switch (op) {
+        case Token::BIT_XOR:
+          result = v_left ^ v_right;
+          break;
+        case Token::BIT_AND:
+          result = v_left & v_right;
+          break;
+        case Token::BIT_OR:
+          result = v_left | v_right;
+          break;
+        default:
+          result = 0;  // Please the compiler.
+          UNREACHABLE();
+      }
+      return H_CONSTANT_INT32(result);
+    }
+  }
+  return new(zone) HBitwise(op, context, left, right);
+}
+
+
+#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result)                             \
+HInstruction* HInstr::New##HInstr(Zone* zone,                                  \
+                                  HValue* context,                             \
+                                  HValue* left,                                \
+                                  HValue* right) {                             \
+  if (left->IsConstant() && right->IsConstant()) {                             \
+    HConstant* c_left = HConstant::cast(left);                                 \
+    HConstant* c_right = HConstant::cast(right);                               \
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {             \
+      return H_CONSTANT_INT32(result);                                         \
+    }                                                                          \
+  }                                                                            \
+  return new(zone) HInstr(context, left, right);                               \
+}
+
+
+DEFINE_NEW_H_BITWISE_INSTR(HSar,
+c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
+DEFINE_NEW_H_BITWISE_INSTR(HShl,
+c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
+
+#undef DEFINE_NEW_H_BITWISE_INSTR
+
+
+HInstruction* HShr::NewHShr(Zone* zone,
+                            HValue* context,
+                            HValue* left,
+                            HValue* right) {
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      int32_t left_val = c_left->NumberValueAsInteger32();
+      int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
+      if ((right_val == 0) && (left_val < 0)) {
+        return H_CONSTANT_DOUBLE(
+            static_cast<double>(static_cast<uint32_t>(left_val)));
+      }
+      return H_CONSTANT_INT32(static_cast<uint32_t>(left_val) >> right_val);
+    }
+  }
+  return new(zone) HShr(context, left, right);
+}
+
+
+#undef H_CONSTANT_INT32
+#undef H_CONSTANT_DOUBLE
+
+
 void HIn::PrintDataTo(StringStream* stream) {
   key()->PrintNameTo(stream);
   stream->Add(" ");
@@ -1845,6 +2257,46 @@
 }
 
 
+Representation HPhi::InferredRepresentation() {
+  bool double_occurred = false;
+  bool int32_occurred = false;
+  for (int i = 0; i < OperandCount(); ++i) {
+    HValue* value = OperandAt(i);
+    if (value->IsUnknownOSRValue()) {
+      HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
+      if (hint_value != NULL) {
+        Representation hint = hint_value->representation();
+        if (hint.IsDouble()) double_occurred = true;
+        if (hint.IsInteger32()) int32_occurred = true;
+      }
+      continue;
+    }
+    if (value->representation().IsDouble()) double_occurred = true;
+    if (value->representation().IsInteger32()) int32_occurred = true;
+    if (value->representation().IsTagged()) {
+      if (value->IsConstant()) {
+        HConstant* constant = HConstant::cast(value);
+        if (constant->IsConvertibleToInteger()) {
+          int32_occurred = true;
+        } else if (constant->HasNumberValue()) {
+          double_occurred = true;
+        } else {
+          return Representation::Tagged();
+        }
+      } else {
+        return Representation::Tagged();
+      }
+    }
+  }
+
+  if (double_occurred) return Representation::Double();
+
+  if (int32_occurred) return Representation::Integer32();
+
+  return Representation::None();
+}
+
+
 // Node-specific verification code is only included in debug mode.
 #ifdef DEBUG
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 0af5489..b63e647 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -62,15 +62,14 @@
   V(AbnormalExit)                              \
   V(AccessArgumentsAt)                         \
   V(Add)                                       \
+  V(AllocateObject)                            \
   V(ApplyArguments)                            \
   V(ArgumentsElements)                         \
   V(ArgumentsLength)                           \
   V(ArgumentsObject)                           \
   V(ArrayLiteral)                              \
-  V(BitAnd)                                    \
+  V(Bitwise)                                   \
   V(BitNot)                                    \
-  V(BitOr)                                     \
-  V(BitXor)                                    \
   V(BlockEntry)                                \
   V(BoundsCheck)                               \
   V(Branch)                                    \
@@ -99,11 +98,13 @@
   V(CompareConstantEqAndBranch)                \
   V(Constant)                                  \
   V(Context)                                   \
+  V(DeclareGlobals)                            \
   V(DeleteProperty)                            \
   V(Deoptimize)                                \
   V(Div)                                       \
   V(ElementsKind)                              \
   V(EnterInlined)                              \
+  V(FastLiteral)                               \
   V(FixedArrayBaseLength)                      \
   V(ForceRepresentation)                       \
   V(FunctionLiteral)                           \
@@ -118,10 +119,12 @@
   V(InstanceOfKnownGlobal)                     \
   V(InvokeFunction)                            \
   V(IsConstructCallAndBranch)                  \
-  V(IsNullAndBranch)                           \
+  V(IsNilAndBranch)                            \
   V(IsObjectAndBranch)                         \
+  V(IsStringAndBranch)                         \
   V(IsSmiAndBranch)                            \
   V(IsUndetectableAndBranch)                   \
+  V(StringCompareAndBranch)                    \
   V(JSArrayLength)                             \
   V(LeaveInlined)                              \
   V(LoadContextSlot)                           \
@@ -145,6 +148,7 @@
   V(Parameter)                                 \
   V(Power)                                     \
   V(PushArgument)                              \
+  V(Random)                                    \
   V(RegExpLiteral)                             \
   V(Return)                                    \
   V(Sar)                                       \
@@ -170,18 +174,26 @@
   V(ThisFunction)                              \
   V(Throw)                                     \
   V(ToFastProperties)                          \
-  V(ToInt32)                                   \
+  V(TransitionElementsKind)                    \
   V(Typeof)                                    \
   V(TypeofIsAndBranch)                         \
   V(UnaryMathOperation)                        \
   V(UnknownOSRValue)                           \
   V(UseConst)                                  \
-  V(ValueOf)
+  V(ValueOf)                                   \
+  V(ForInPrepareMap)                           \
+  V(ForInCacheArray)                           \
+  V(CheckMapValue)                             \
+  V(LoadFieldByIndex)                          \
+  V(DateField)                                 \
+  V(WrapReceiver)
 
 #define GVN_FLAG_LIST(V)                       \
   V(Calls)                                     \
   V(InobjectFields)                            \
   V(BackingStoreFields)                        \
+  V(ElementsKind)                              \
+  V(ElementsPointer)                           \
   V(ArrayElements)                             \
   V(DoubleArrayElements)                       \
   V(SpecializedArrayElements)                  \
@@ -225,10 +237,14 @@
   int32_t upper() const { return upper_; }
   int32_t lower() const { return lower_; }
   Range* next() const { return next_; }
-  Range* CopyClearLower() const { return new Range(kMinInt, upper_); }
-  Range* CopyClearUpper() const { return new Range(lower_, kMaxInt); }
-  Range* Copy() const {
-    Range* result = new Range(lower_, upper_);
+  Range* CopyClearLower(Zone* zone) const {
+    return new(zone) Range(kMinInt, upper_);
+  }
+  Range* CopyClearUpper(Zone* zone) const {
+    return new(zone) Range(lower_, kMaxInt);
+  }
+  Range* Copy(Zone* zone) const {
+    Range* result = new(zone) Range(lower_, upper_);
     result->set_can_be_minus_zero(CanBeMinusZero());
     return result;
   }
@@ -245,7 +261,9 @@
     return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
   }
   void KeepOrder();
+#ifdef DEBUG
   void Verify() const;
+#endif
 
   void StackUpon(Range* other) {
     Intersect(other);
@@ -397,10 +415,14 @@
     return type_ == kUninitialized;
   }
 
+  bool IsHeapObject() {
+    ASSERT(type_ != kUninitialized);
+    return IsHeapNumber() || IsString() || IsNonPrimitive();
+  }
+
   static HType TypeFromValue(Handle<Object> value);
 
   const char* ToString();
-  const char* ToShortString();
 
  private:
   enum Type {
@@ -432,7 +454,7 @@
       : tail_(tail), value_(value), index_(index) {
   }
 
-  HUseListNode* tail() const { return tail_; }
+  HUseListNode* tail();
   HValue* value() const { return value_; }
   int index() const { return index_; }
 
@@ -482,18 +504,26 @@
 };
 
 
+// There must be one corresponding kDepends flag for every kChanges flag and
+// the order of the kChanges flags must be exactly the same as of the kDepends
+// flags.
+enum GVNFlag {
+  // Declare global value numbering flags.
+#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
+  GVN_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+  kAfterLastFlag,
+  kLastFlag = kAfterLastFlag - 1
+};
+
+typedef EnumSet<GVNFlag> GVNFlagSet;
+
+
 class HValue: public ZoneObject {
  public:
   static const int kNoNumber = -1;
 
-  // There must be one corresponding kDepends flag for every kChanges flag and
-  // the order of the kChanges flags must be exactly the same as of the kDepends
-  // flags.
   enum Flag {
-    // Declare global value numbering flags.
-  #define DECLARE_DO(type) kChanges##type, kDependsOn##type,
-    GVN_FLAG_LIST(DECLARE_DO)
-  #undef DECLARE_DO
     kFlexibleRepresentation,
     // Participate in Global Value Numbering, i.e. elimination of
     // unnecessary recomputations. If an instruction sets this flag, it must
@@ -506,15 +536,16 @@
     kDeoptimizeOnUndefined,
     kIsArguments,
     kTruncatingToInt32,
-    kLastFlag = kTruncatingToInt32
+    kIsDead,
+    kLastFlag = kIsDead
   };
 
   STATIC_ASSERT(kLastFlag < kBitsPerInt);
 
   static const int kChangesToDependsFlagsLeftShift = 1;
 
-  static int ConvertChangesToDependsFlags(int flags) {
-    return flags << kChangesToDependsFlagsLeftShift;
+  static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
+    return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
   }
 
   static HValue* cast(HValue* value) { return value; }
@@ -551,6 +582,7 @@
 
   HBasicBlock* block() const { return block_; }
   void SetBlock(HBasicBlock* block);
+  int LoopWeight() const;
 
   int id() const { return id_; }
   void set_id(int id) { id_ = id; }
@@ -605,27 +637,66 @@
     return use_list_ != NULL && use_list_->tail() != NULL;
   }
   int UseCount() const;
-  void ClearOperands();
+
+  // Mark this HValue as dead and to be removed from other HValues' use lists.
+  void Kill();
 
   int flags() const { return flags_; }
   void SetFlag(Flag f) { flags_ |= (1 << f); }
   void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
   bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
 
-  void SetAllSideEffects() { flags_ |= AllSideEffects(); }
-  void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
-  bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
+  // Returns true if the flag specified is set for all uses, false otherwise.
+  bool CheckUsesForFlag(Flag f);
 
-  int ChangesFlags() const { return flags_ & ChangesFlagsMask(); }
+  GVNFlagSet gvn_flags() const { return gvn_flags_; }
+  void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
+  void ClearGVNFlag(GVNFlag f) { gvn_flags_.Remove(f); }
+  bool CheckGVNFlag(GVNFlag f) const { return gvn_flags_.Contains(f); }
+  void SetAllSideEffects() { gvn_flags_.Add(AllSideEffectsFlagSet()); }
+  void ClearAllSideEffects() {
+    gvn_flags_.Remove(AllSideEffectsFlagSet());
+  }
+  bool HasSideEffects() const {
+    return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
+  }
+  bool HasObservableSideEffects() const {
+    return gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
+  }
+
+  GVNFlagSet DependsOnFlags() const {
+    GVNFlagSet result = gvn_flags_;
+    result.Intersect(AllDependsOnFlagSet());
+    return result;
+  }
+
+  GVNFlagSet SideEffectFlags() const {
+    GVNFlagSet result = gvn_flags_;
+    result.Intersect(AllSideEffectsFlagSet());
+    return result;
+  }
+
+  GVNFlagSet ChangesFlags() const {
+    GVNFlagSet result = gvn_flags_;
+    result.Intersect(AllChangesFlagSet());
+    return result;
+  }
+
+  GVNFlagSet ObservableChangesFlags() const {
+    GVNFlagSet result = gvn_flags_;
+    result.Intersect(AllChangesFlagSet());
+    result.Intersect(AllObservableSideEffectsFlagSet());
+    return result;
+  }
 
   Range* range() const { return range_; }
   bool HasRange() const { return range_ != NULL; }
-  void AddNewRange(Range* r);
+  void AddNewRange(Range* r, Zone* zone);
   void RemoveLastAddedRange();
-  void ComputeInitialRange();
+  void ComputeInitialRange(Zone* zone);
 
   // Representation helpers.
-  virtual Representation RequiredInputRepresentation(int index) const = 0;
+  virtual Representation RequiredInputRepresentation(int index) = 0;
 
   virtual Representation InferredRepresentation() {
     return representation();
@@ -667,7 +738,7 @@
     return false;
   }
   virtual void RepresentationChanged(Representation to) { }
-  virtual Range* InferRange();
+  virtual Range* InferRange(Zone* zone);
   virtual void DeleteFromGraph() = 0;
   virtual void InternalSetOperandAt(int index, HValue* value) = 0;
   void clear_block() {
@@ -681,19 +752,39 @@
     representation_ = r;
   }
 
- private:
-  static int ChangesFlagsMask() {
-    int result = 0;
+  static GVNFlagSet AllDependsOnFlagSet() {
+    GVNFlagSet result;
     // Create changes mask.
-#define ADD_FLAG(type) result |= (1 << kChanges##type);
+#define ADD_FLAG(type) result.Add(kDependsOn##type);
+  GVN_FLAG_LIST(ADD_FLAG)
+#undef ADD_FLAG
+    return result;
+  }
+
+  static GVNFlagSet AllChangesFlagSet() {
+    GVNFlagSet result;
+    // Create changes mask.
+#define ADD_FLAG(type) result.Add(kChanges##type);
   GVN_FLAG_LIST(ADD_FLAG)
 #undef ADD_FLAG
     return result;
   }
 
   // A flag mask to mark an instruction as having arbitrary side effects.
-  static int AllSideEffects() {
-    return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
+  static GVNFlagSet AllSideEffectsFlagSet() {
+    GVNFlagSet result = AllChangesFlagSet();
+    result.Remove(kChangesOsrEntries);
+    return result;
+  }
+
+  // A flag mask of all side effects that can make observable changes in
+  // an executing program (i.e. are not safe to repeat, move or remove);
+  static GVNFlagSet AllObservableSideEffectsFlagSet() {
+    GVNFlagSet result = AllChangesFlagSet();
+    result.Remove(kChangesElementsKind);
+    result.Remove(kChangesElementsPointer);
+    result.Remove(kChangesMaps);
+    return result;
   }
 
   // Remove the matching use from the use list if present.  Returns the
@@ -713,7 +804,9 @@
   HUseListNode* use_list_;
   Range* range_;
   int flags_;
+  GVNFlagSet gvn_flags_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(HValue);
 };
 
@@ -735,6 +828,8 @@
   bool has_position() const { return position_ != RelocInfo::kNoPosition; }
   void set_position(int position) { position_ = position; }
 
+  bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
+
   virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
 
 #ifdef DEBUG
@@ -750,7 +845,7 @@
       : next_(NULL),
         previous_(NULL),
         position_(RelocInfo::kNoPosition) {
-    SetFlag(kDependsOnOsrEntries);
+    SetGVNFlag(kDependsOnOsrEntries);
   }
 
   virtual void DeleteFromGraph() { Unlink(); }
@@ -841,7 +936,7 @@
 
 class HBlockEntry: public HTemplateInstruction<0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -854,7 +949,7 @@
 // HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
 class HSoftDeoptimize: public HTemplateInstruction<0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -866,7 +961,7 @@
  public:
   explicit HDeoptimize(int environment_length) : values_(environment_length) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -908,10 +1003,10 @@
 class HGoto: public HTemplateControlInstruction<1, 0> {
  public:
   explicit HGoto(HBasicBlock* target) {
-        SetSuccessorAt(0, target);
-      }
+    SetSuccessorAt(0, target);
+  }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -951,7 +1046,7 @@
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -983,7 +1078,7 @@
 
   Handle<Map> map() const { return map_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1000,7 +1095,7 @@
     SetOperandAt(0, value);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1014,7 +1109,7 @@
 
 class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1032,10 +1127,6 @@
     return reinterpret_cast<HUnaryOperation*>(value);
   }
 
-  virtual bool CanTruncateToInt32() const {
-    return CheckFlag(kTruncatingToInt32);
-  }
-
   HValue* value() { return OperandAt(0); }
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1049,7 +1140,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1064,7 +1155,7 @@
  public:
   explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1083,7 +1174,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return representation();  // Same as the output representation.
   }
 
@@ -1094,46 +1185,43 @@
 class HChange: public HUnaryOperation {
  public:
   HChange(HValue* value,
-          Representation from,
           Representation to,
           bool is_truncating,
           bool deoptimize_on_undefined)
-      : HUnaryOperation(value),
-        from_(from),
-        deoptimize_on_undefined_(deoptimize_on_undefined) {
-    ASSERT(!from.IsNone() && !to.IsNone());
-    ASSERT(!from.Equals(to));
+      : HUnaryOperation(value) {
+    ASSERT(!value->representation().IsNone() && !to.IsNone());
+    ASSERT(!value->representation().Equals(to));
     set_representation(to);
+    set_type(HType::TaggedNumber());
     SetFlag(kUseGVN);
+    if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
     if (is_truncating) SetFlag(kTruncatingToInt32);
   }
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+  virtual HType CalculateInferredType();
+  virtual HValue* Canonicalize();
 
-  Representation from() const { return from_; }
-  Representation to() const { return representation(); }
-  bool deoptimize_on_undefined() const { return deoptimize_on_undefined_; }
-  virtual Representation RequiredInputRepresentation(int index) const {
-    return from_;
+  Representation from() { return value()->representation(); }
+  Representation to() { return representation(); }
+  bool deoptimize_on_undefined() const {
+    return CheckFlag(kDeoptimizeOnUndefined);
+  }
+  bool deoptimize_on_minus_zero() const {
+    return CheckFlag(kBailoutOnMinusZero);
+  }
+  virtual Representation RequiredInputRepresentation(int index) {
+    return from();
   }
 
-  virtual Range* InferRange();
+  virtual Range* InferRange(Zone* zone);
 
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(Change)
 
  protected:
-  virtual bool DataEquals(HValue* other) {
-    if (!other->IsChange()) return false;
-    HChange* change = HChange::cast(other);
-    return to().Equals(change->to())
-        && deoptimize_on_undefined() == change->deoptimize_on_undefined();
-  }
-
- private:
-  Representation from_;
-  bool deoptimize_on_undefined_;
+  virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
@@ -1145,7 +1233,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1156,37 +1244,6 @@
 };
 
 
-class HToInt32: public HUnaryOperation {
- public:
-  explicit HToInt32(HValue* value)
-      : HUnaryOperation(value) {
-    set_representation(Representation::Integer32());
-    SetFlag(kUseGVN);
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) const {
-    return Representation::None();
-  }
-
-  virtual bool CanTruncateToInt32() const {
-    return true;
-  }
-
-  virtual HValue* Canonicalize() {
-    if (value()->representation().IsInteger32()) {
-      return value();
-    } else {
-      return this;
-    }
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToInt32)
-
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
 class HSimulate: public HInstruction {
  public:
   HSimulate(int ast_id, int pop_count)
@@ -1223,7 +1280,7 @@
   virtual int OperandCount() { return values_.length(); }
   virtual HValue* OperandAt(int index) { return values_[index]; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1268,7 +1325,7 @@
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1293,29 +1350,42 @@
 class HEnterInlined: public HTemplateInstruction<0> {
  public:
   HEnterInlined(Handle<JSFunction> closure,
+                int arguments_count,
                 FunctionLiteral* function,
-                CallKind call_kind)
+                CallKind call_kind,
+                bool is_construct,
+                Variable* arguments)
       : closure_(closure),
+        arguments_count_(arguments_count),
         function_(function),
-        call_kind_(call_kind) {
+        call_kind_(call_kind),
+        is_construct_(is_construct),
+        arguments_(arguments) {
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
   Handle<JSFunction> closure() const { return closure_; }
+  int arguments_count() const { return arguments_count_; }
   FunctionLiteral* function() const { return function_; }
   CallKind call_kind() const { return call_kind_; }
+  bool is_construct() const { return is_construct_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
+  Variable* arguments() { return arguments_; }
+
   DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
 
  private:
   Handle<JSFunction> closure_;
+  int arguments_count_;
   FunctionLiteral* function_;
   CallKind call_kind_;
+  bool is_construct_;
+  Variable* arguments_;
 };
 
 
@@ -1323,7 +1393,7 @@
  public:
   HLeaveInlined() {}
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1337,7 +1407,7 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1349,19 +1419,27 @@
 
 class HThisFunction: public HTemplateInstruction<0> {
  public:
-  HThisFunction() {
+  explicit HThisFunction(Handle<JSFunction> closure) : closure_(closure) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
+  Handle<JSFunction> closure() const { return closure_; }
+
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
 
  protected:
-  virtual bool DataEquals(HValue* other) { return true; }
+  virtual bool DataEquals(HValue* other) {
+    HThisFunction* b = HThisFunction::cast(other);
+    return *closure() == *b->closure();
+  }
+
+ private:
+  Handle<JSFunction> closure_;
 };
 
 
@@ -1372,7 +1450,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1392,7 +1470,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(OuterContext);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1401,6 +1479,33 @@
 };
 
 
+class HDeclareGlobals: public HUnaryOperation {
+ public:
+  HDeclareGlobals(HValue* context,
+                  Handle<FixedArray> pairs,
+                  int flags)
+      : HUnaryOperation(context),
+        pairs_(pairs),
+        flags_(flags) {
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  HValue* context() { return OperandAt(0); }
+  Handle<FixedArray> pairs() const { return pairs_; }
+  int flags() const { return flags_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+ private:
+  Handle<FixedArray> pairs_;
+  int flags_;
+};
+
+
 class HGlobalObject: public HUnaryOperation {
  public:
   explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
@@ -1410,7 +1515,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1429,7 +1534,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1465,7 +1570,7 @@
     SetOperandAt(0, value);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1485,7 +1590,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1500,7 +1605,7 @@
       : HBinaryCall(context, function, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1525,7 +1630,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1542,7 +1647,7 @@
       : HBinaryCall(context, key, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1566,7 +1671,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(CallNamed)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1575,15 +1680,16 @@
 };
 
 
-class HCallFunction: public HUnaryCall {
+class HCallFunction: public HBinaryCall {
  public:
-  HCallFunction(HValue* context, int argument_count)
-      : HUnaryCall(context, argument_count) {
+  HCallFunction(HValue* context, HValue* function, int argument_count)
+      : HBinaryCall(context, function, argument_count) {
   }
 
-  HValue* context() { return value(); }
+  HValue* context() { return first(); }
+  HValue* function() { return second(); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1602,7 +1708,7 @@
   HValue* context() { return value(); }
   Handle<String> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1622,7 +1728,7 @@
 
   Handle<JSFunction> target() const { return target_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1639,7 +1745,7 @@
       : HBinaryCall(context, constructor, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1666,7 +1772,7 @@
   const Runtime::Function* function() const { return c_function_; }
   Handle<String> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1688,11 +1794,11 @@
     SetOperandAt(1, typecheck);
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnArrayLengths);
-    SetFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnArrayLengths);
+    SetGVNFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1713,10 +1819,10 @@
   explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnArrayLengths);
+    SetGVNFlag(kDependsOnArrayLengths);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1732,10 +1838,10 @@
   explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Integer32());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnElementsKind);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1754,7 +1860,7 @@
     SetFlag(kTruncatingToInt32);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Integer32();
   }
   virtual HType CalculateInferredType();
@@ -1787,6 +1893,7 @@
       case kMathLog:
       case kMathSin:
       case kMathCos:
+      case kMathTan:
         set_representation(Representation::Double());
         break;
       default:
@@ -1804,7 +1911,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     if (index == 0) {
       return Representation::Tagged();
     } else {
@@ -1817,6 +1924,7 @@
         case kMathLog:
         case kMathSin:
         case kMathCos:
+        case kMathTan:
           return Representation::Double();
         case kMathAbs:
           return representation();
@@ -1858,10 +1966,10 @@
   explicit HLoadElements(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnElementsPointer);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1884,7 +1992,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1897,18 +2005,30 @@
 
 class HCheckMap: public HTemplateInstruction<2> {
  public:
-  HCheckMap(HValue* value, Handle<Map> map, HValue* typecheck = NULL)
-      : map_(map) {
+  HCheckMap(HValue* value,
+            Handle<Map> map,
+            HValue* typecheck = NULL,
+            CompareMapMode mode = REQUIRE_EXACT_MAP)
+      : map_(map),
+        mode_(mode) {
     SetOperandAt(0, value);
     // If callers don't depend on a typecheck, they can pass in NULL. In that
     // case we use a copy of the |value| argument as a dummy value.
     SetOperandAt(1, typecheck != NULL ? typecheck : value);
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnMaps);
+    // If the map to check doesn't have the untransitioned elements, it must not
+    // be hoisted above TransitionElements instructions.
+    if (mode == REQUIRE_EXACT_MAP || !map->has_fast_smi_only_elements()) {
+      SetGVNFlag(kDependsOnElementsKind);
+    }
+    has_element_transitions_ =
+        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL) != NULL ||
+        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL) != NULL;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -1916,17 +2036,24 @@
 
   HValue* value() { return OperandAt(0); }
   Handle<Map> map() const { return map_; }
+  CompareMapMode mode() const { return mode_; }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckMap)
 
  protected:
   virtual bool DataEquals(HValue* other) {
     HCheckMap* b = HCheckMap::cast(other);
-    return map_.is_identical_to(b->map());
+    // Two CheckMaps instructions are DataEqual if their maps are identical and
+    // they have the same mode. The mode comparison can be ignored if the map
+    // has no elements transitions.
+    return map_.is_identical_to(b->map()) &&
+        (b->mode() == mode() || !has_element_transitions_);
   }
 
  private:
+  bool has_element_transitions_;
   Handle<Map> map_;
+  CompareMapMode mode_;
 };
 
 
@@ -1938,7 +2065,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -1978,7 +2105,9 @@
     return new HCheckInstanceType(value, IS_SYMBOL);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2008,6 +2137,8 @@
     LAST_INTERVAL_CHECK = IS_JS_ARRAY
   };
 
+  const char* GetCheckName();
+
   HCheckInstanceType(HValue* value, Check check)
       : HUnaryOperation(value), check_(check) {
     set_representation(Representation::Tagged());
@@ -2025,7 +2156,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2059,7 +2190,7 @@
   HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder)
       : prototype_(prototype), holder_(holder) {
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnMaps);
   }
 
 #ifdef DEBUG
@@ -2071,7 +2202,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2102,7 +2233,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual HType CalculateInferredType();
@@ -2135,23 +2266,10 @@
     SetFlag(kFlexibleRepresentation);
   }
 
-  virtual Representation InferredRepresentation() {
-    bool double_occurred = false;
-    bool int32_occurred = false;
-    for (int i = 0; i < OperandCount(); ++i) {
-      HValue* value = OperandAt(i);
-      if (value->representation().IsDouble()) double_occurred = true;
-      if (value->representation().IsInteger32()) int32_occurred = true;
-      if (value->representation().IsTagged()) return Representation::Tagged();
-    }
+  virtual Representation InferredRepresentation();
 
-    if (double_occurred) return Representation::Double();
-    if (int32_occurred) return Representation::Integer32();
-    return Representation::None();
-  }
-
-  virtual Range* InferRange();
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Range* InferRange(Zone* zone);
+  virtual Representation RequiredInputRepresentation(int index) {
     return representation();
   }
   virtual HType CalculateInferredType();
@@ -2243,7 +2361,7 @@
     SetFlag(kIsArguments);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2259,7 +2377,20 @@
 
   bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  bool ImmortalImmovable() const {
+    Heap* heap = HEAP;
+    if (*handle_ == heap->undefined_value()) return true;
+    if (*handle_ == heap->null_value()) return true;
+    if (*handle_ == heap->true_value()) return true;
+    if (*handle_ == heap->false_value()) return true;
+    if (*handle_ == heap->the_hole_value()) return true;
+    if (*handle_ == heap->minus_zero_value()) return true;
+    if (*handle_ == heap->nan_value()) return true;
+    if (*handle_ == heap->empty_string()) return true;
+    return false;
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2272,6 +2403,7 @@
   }
 
   virtual bool EmitAtUses() { return !representation().IsDouble(); }
+  virtual HValue* Canonicalize();
   virtual void PrintDataTo(StringStream* stream);
   virtual HType CalculateInferredType();
   bool IsInteger() const { return handle_->IsSmi(); }
@@ -2287,13 +2419,24 @@
     ASSERT(HasDoubleValue());
     return double_value_;
   }
+  bool HasNumberValue() const { return has_int32_value_ || has_double_value_; }
+  int32_t NumberValueAsInteger32() const {
+    ASSERT(HasNumberValue());
+    if (has_int32_value_) return int32_value_;
+    return DoubleToInt32(double_value_);
+  }
   bool HasStringValue() const { return handle_->IsString(); }
 
   bool ToBoolean() const;
 
   virtual intptr_t Hashcode() {
     ASSERT(!HEAP->allow_allocation(false));
-    return reinterpret_cast<intptr_t>(*handle());
+    intptr_t hash = reinterpret_cast<intptr_t>(*handle());
+    // Prevent smis from having fewer hash values when truncated to
+    // the least significant bits.
+    const int kShiftSize = kSmiShiftSize + kSmiTagSize;
+    STATIC_ASSERT(kShiftSize != 0);
+    return hash ^ (hash >> kShiftSize);
   }
 
 #ifdef DEBUG
@@ -2303,7 +2446,7 @@
   DECLARE_CONCRETE_INSTRUCTION(Constant)
 
  protected:
-  virtual Range* InferRange();
+  virtual Range* InferRange(Zone* zone);
 
   virtual bool DataEquals(HValue* other) {
     HConstant* other_constant = HConstant::cast(other);
@@ -2353,6 +2496,27 @@
 };
 
 
+class HWrapReceiver: public HTemplateInstruction<2> {
+ public:
+  HWrapReceiver(HValue* receiver, HValue* function) {
+    set_representation(Representation::Tagged());
+    SetOperandAt(0, receiver);
+    SetOperandAt(1, function);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  HValue* receiver() { return OperandAt(0); }
+  HValue* function() { return OperandAt(1); }
+
+  virtual HValue* Canonicalize();
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
+};
+
+
 class HApplyArguments: public HTemplateInstruction<4> {
  public:
   HApplyArguments(HValue* function,
@@ -2367,7 +2531,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The length is untagged, all other inputs are tagged.
     return (index == 2)
         ? Representation::Integer32()
@@ -2394,7 +2558,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2410,7 +2574,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2433,7 +2597,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The arguments elements is considered tagged.
     return index == 0
         ? Representation::Tagged()
@@ -2459,7 +2623,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Integer32();
   }
 
@@ -2484,7 +2648,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
         ? Representation::Tagged()
         : representation();
@@ -2522,7 +2686,7 @@
   }
 
   virtual HType CalculateInferredType();
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
         ? Representation::Tagged()
         : representation();
@@ -2549,7 +2713,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2587,7 +2751,7 @@
     return input_representation_;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return input_representation_;
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -2610,7 +2774,9 @@
   HValue* left() { return OperandAt(0); }
   HValue* right() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2629,7 +2795,7 @@
   HValue* left() { return value(); }
   int right() const { return right_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Integer32();
   }
 
@@ -2641,21 +2807,25 @@
 };
 
 
-class HIsNullAndBranch: public HUnaryControlInstruction {
+class HIsNilAndBranch: public HUnaryControlInstruction {
  public:
-  HIsNullAndBranch(HValue* value, bool is_strict)
-      : HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { }
+  HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil)
+      : HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { }
 
-  bool is_strict() const { return is_strict_; }
+  EqualityKind kind() const { return kind_; }
+  NilValue nil() const { return nil_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
 
  private:
-  bool is_strict_;
+  EqualityKind kind_;
+  NilValue nil_;
 };
 
 
@@ -2664,13 +2834,25 @@
   explicit HIsObjectAndBranch(HValue* value)
     : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
 };
 
+class HIsStringAndBranch: public HUnaryControlInstruction {
+ public:
+  explicit HIsStringAndBranch(HValue* value)
+    : HUnaryControlInstruction(value, NULL, NULL) { }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+};
+
 
 class HIsSmiAndBranch: public HUnaryControlInstruction {
  public:
@@ -2679,7 +2861,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2693,7 +2875,7 @@
   explicit HIsUndetectableAndBranch(HValue* value)
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2701,9 +2883,45 @@
 };
 
 
+class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
+ public:
+  HStringCompareAndBranch(HValue* context,
+                           HValue* left,
+                           HValue* right,
+                           Token::Value token)
+      : token_(token) {
+    ASSERT(Token::IsCompareOp(token));
+    SetOperandAt(0, context);
+    SetOperandAt(1, left);
+    SetOperandAt(2, right);
+    set_representation(Representation::Tagged());
+  }
+
+  HValue* context() { return OperandAt(0); }
+  HValue* left() { return OperandAt(1); }
+  HValue* right() { return OperandAt(2); }
+  Token::Value token() const { return token_; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  Representation GetInputRepresentation() const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
+
+ private:
+  Token::Value token_;
+};
+
+
 class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2725,7 +2943,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2742,7 +2960,7 @@
   explicit HHasCachedArrayIndexAndBranch(HValue* value)
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2757,7 +2975,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2776,7 +2994,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2800,7 +3018,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2817,7 +3035,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2845,7 +3063,7 @@
   HValue* left() { return OperandAt(1); }
   Handle<JSFunction> function() { return function_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2870,7 +3088,7 @@
   HValue* left() { return OperandAt(0); }
   HValue* right() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
       ? Representation::Double()
       : Representation::None();
@@ -2883,6 +3101,23 @@
 };
 
 
+class HRandom: public HTemplateInstruction<1> {
+ public:
+  explicit HRandom(HValue* global_object) {
+    SetOperandAt(0, global_object);
+    set_representation(Representation::Double());
+  }
+
+  HValue* global_object() { return OperandAt(0); }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Random)
+};
+
+
 class HAdd: public HArithmeticBinaryOperation {
  public:
   HAdd(HValue* context, HValue* left, HValue* right)
@@ -2898,14 +3133,21 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+  static HInstruction* NewHAdd(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   virtual HType CalculateInferredType();
 
+  virtual HValue* Canonicalize();
+
   DECLARE_CONCRETE_INSTRUCTION(Add)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange();
+  virtual Range* InferRange(Zone* zone);
 };
 
 
@@ -2918,12 +3160,19 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+  virtual HValue* Canonicalize();
+
+  static HInstruction* NewHSub(Zone* zone,
+                              HValue* context,
+                              HValue* left,
+                              HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Sub)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange();
+  virtual Range* InferRange(Zone* zone);
 };
 
 
@@ -2941,12 +3190,17 @@
     return !representation().IsTagged();
   }
 
+  static HInstruction* NewHMul(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Mul)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange();
+  virtual Range* InferRange(Zone* zone);
 };
 
 
@@ -2969,12 +3223,17 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+  static HInstruction* NewHMod(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Mod)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange();
+  virtual Range* InferRange(Zone* zone);
 };
 
 
@@ -2988,61 +3247,52 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+  static HInstruction* NewHDiv(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Div)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange();
+  virtual Range* InferRange(Zone* zone);
 };
 
 
-class HBitAnd: public HBitwiseBinaryOperation {
+class HBitwise: public HBitwiseBinaryOperation {
  public:
-  HBitAnd(HValue* context, HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(context, left, right) { }
+  HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right), op_(op) {
+        ASSERT(op == Token::BIT_AND ||
+               op == Token::BIT_OR ||
+               op == Token::BIT_XOR);
+      }
+
+  Token::Value op() const { return op_; }
 
   virtual bool IsCommutative() const { return true; }
-  virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitAnd)
+  virtual HValue* Canonicalize();
+
+  static HInstruction* NewHBitwise(Zone* zone,
+                                   Token::Value op,
+                                   HValue* context,
+                                   HValue* left,
+                                   HValue* right);
+
+  DECLARE_CONCRETE_INSTRUCTION(Bitwise)
 
  protected:
-  virtual bool DataEquals(HValue* other) { return true; }
+  virtual bool DataEquals(HValue* other) {
+    return op() == HBitwise::cast(other)->op();
+  }
 
-  virtual Range* InferRange();
-};
+  virtual Range* InferRange(Zone* zone);
 
-
-class HBitXor: public HBitwiseBinaryOperation {
- public:
-  HBitXor(HValue* context, HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(context, left, right) { }
-
-  virtual bool IsCommutative() const { return true; }
-  virtual HType CalculateInferredType();
-
-  DECLARE_CONCRETE_INSTRUCTION(BitXor)
-
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HBitOr: public HBitwiseBinaryOperation {
- public:
-  HBitOr(HValue* context, HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(context, left, right) { }
-
-  virtual bool IsCommutative() const { return true; }
-  virtual HType CalculateInferredType();
-
-  DECLARE_CONCRETE_INSTRUCTION(BitOr)
-
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
-
-  virtual Range* InferRange();
+ private:
+  Token::Value op_;
 };
 
 
@@ -3051,8 +3301,12 @@
   HShl(HValue* context, HValue* left, HValue* right)
       : HBitwiseBinaryOperation(context, left, right) { }
 
-  virtual Range* InferRange();
-  virtual HType CalculateInferredType();
+  virtual Range* InferRange(Zone* zone);
+
+  static HInstruction* NewHShl(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
 
   DECLARE_CONCRETE_INSTRUCTION(Shl)
 
@@ -3066,8 +3320,12 @@
   HShr(HValue* context, HValue* left, HValue* right)
       : HBitwiseBinaryOperation(context, left, right) { }
 
-  virtual Range* InferRange();
-  virtual HType CalculateInferredType();
+  virtual Range* InferRange(Zone* zone);
+
+  static HInstruction* NewHShr(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
 
   DECLARE_CONCRETE_INSTRUCTION(Shr)
 
@@ -3081,8 +3339,12 @@
   HSar(HValue* context, HValue* left, HValue* right)
       : HBitwiseBinaryOperation(context, left, right) { }
 
-  virtual Range* InferRange();
-  virtual HType CalculateInferredType();
+  virtual Range* InferRange(Zone* zone);
+
+  static HInstruction* NewHSar(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
 
   DECLARE_CONCRETE_INSTRUCTION(Sar)
 
@@ -3094,12 +3356,12 @@
 class HOsrEntry: public HTemplateInstruction<0> {
  public:
   explicit HOsrEntry(int ast_id) : ast_id_(ast_id) {
-    SetFlag(kChangesOsrEntries);
+    SetGVNFlag(kChangesOsrEntries);
   }
 
   int ast_id() const { return ast_id_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3120,7 +3382,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3152,7 +3414,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3166,27 +3428,41 @@
 
 class HUnknownOSRValue: public HTemplateInstruction<0> {
  public:
-  HUnknownOSRValue() { set_representation(Representation::Tagged()); }
+  HUnknownOSRValue()
+      : incoming_value_(NULL) {
+    set_representation(Representation::Tagged());
+  }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
+  void set_incoming_value(HPhi* value) {
+    incoming_value_ = value;
+  }
+
+  HPhi* incoming_value() {
+    return incoming_value_;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
+
+ private:
+  HPhi* incoming_value_;
 };
 
 
 class HLoadGlobalCell: public HTemplateInstruction<0> {
  public:
-  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
-      : cell_(cell), check_hole_value_(check_hole_value) {
+  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
+      : cell_(cell), details_(details) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnGlobalVars);
+    SetGVNFlag(kDependsOnGlobalVars);
   }
 
   Handle<JSGlobalPropertyCell>  cell() const { return cell_; }
-  bool check_hole_value() const { return check_hole_value_; }
+  bool RequiresHoleCheck();
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -3195,7 +3471,7 @@
     return reinterpret_cast<intptr_t>(*cell_);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3209,7 +3485,7 @@
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
-  bool check_hole_value_;
+  PropertyDetails details_;
 };
 
 
@@ -3234,7 +3510,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3246,21 +3522,33 @@
 };
 
 
+inline bool StoringValueNeedsWriteBarrier(HValue* value) {
+  return !value->type().IsBoolean()
+      && !value->type().IsSmi()
+      && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
+}
+
+
 class HStoreGlobalCell: public HUnaryOperation {
  public:
   HStoreGlobalCell(HValue* value,
                    Handle<JSGlobalPropertyCell> cell,
-                   bool check_hole_value)
+                   PropertyDetails details)
       : HUnaryOperation(value),
         cell_(cell),
-        check_hole_value_(check_hole_value) {
-    SetFlag(kChangesGlobalVars);
+        details_(details) {
+    SetGVNFlag(kChangesGlobalVars);
   }
 
   Handle<JSGlobalPropertyCell> cell() const { return cell_; }
-  bool check_hole_value() const { return check_hole_value_; }
+  bool RequiresHoleCheck() {
+    return !details_.IsDontDelete() || details_.IsReadOnly();
+  }
+  bool NeedsWriteBarrier() {
+    return StoringValueNeedsWriteBarrier(value());
+  }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3269,7 +3557,7 @@
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
-  bool check_hole_value_;
+  PropertyDetails details_;
 };
 
 
@@ -3279,9 +3567,9 @@
                       HValue* global_object,
                       Handle<Object> name,
                       HValue* value,
-                      bool strict_mode)
+                      StrictModeFlag strict_mode_flag)
       : name_(name),
-        strict_mode_(strict_mode) {
+        strict_mode_flag_(strict_mode_flag) {
     SetOperandAt(0, context);
     SetOperandAt(1, global_object);
     SetOperandAt(2, value);
@@ -3293,11 +3581,11 @@
   HValue* global_object() { return OperandAt(1); }
   Handle<Object> name() const { return name_; }
   HValue* value() { return OperandAt(2); }
-  bool strict_mode() { return strict_mode_; }
+  StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3305,22 +3593,56 @@
 
  private:
   Handle<Object> name_;
-  bool strict_mode_;
+  StrictModeFlag strict_mode_flag_;
 };
 
 
 class HLoadContextSlot: public HUnaryOperation {
  public:
-  HLoadContextSlot(HValue* context , int slot_index)
-      : HUnaryOperation(context), slot_index_(slot_index) {
+  enum Mode {
+    // Perform a normal load of the context slot without checking its value.
+    kNoCheck,
+    // Load and check the value of the context slot. Deoptimize if it's the
+    // hole value. This is used for checking for loading of uninitialized
+    // harmony bindings where we deoptimize into full-codegen generated code
+    // which will subsequently throw a reference error.
+    kCheckDeoptimize,
+    // Load and check the value of the context slot. Return undefined if it's
+    // the hole value. This is used for non-harmony const assignments
+    kCheckReturnUndefined
+  };
+
+  HLoadContextSlot(HValue* context, Variable* var)
+      : HUnaryOperation(context), slot_index_(var->index()) {
+    ASSERT(var->IsContextSlot());
+    switch (var->mode()) {
+      case LET:
+      case CONST_HARMONY:
+        mode_ = kCheckDeoptimize;
+        break;
+      case CONST:
+        mode_ = kCheckReturnUndefined;
+        break;
+      default:
+        mode_ = kNoCheck;
+    }
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnContextSlots);
+    SetGVNFlag(kDependsOnContextSlots);
   }
 
   int slot_index() const { return slot_index_; }
+  Mode mode() const { return mode_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  bool DeoptimizesOnHole() {
+    return mode_ == kCheckDeoptimize;
+  }
+
+  bool RequiresHoleCheck() {
+    return mode_ != kNoCheck;
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3336,34 +3658,50 @@
 
  private:
   int slot_index_;
+  Mode mode_;
 };
 
 
-static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
-  return !value->type().IsBoolean()
-      && !value->type().IsSmi()
-      && !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
-}
-
-
 class HStoreContextSlot: public HTemplateInstruction<2> {
  public:
-  HStoreContextSlot(HValue* context, int slot_index, HValue* value)
-      : slot_index_(slot_index) {
+  enum Mode {
+    // Perform a normal store to the context slot without checking its previous
+    // value.
+    kNoCheck,
+    // Check the previous value of the context slot and deoptimize if it's the
+    // hole value. This is used for checking for assignments to uninitialized
+    // harmony bindings where we deoptimize into full-codegen generated code
+    // which will subsequently throw a reference error.
+    kCheckDeoptimize,
+    // Check the previous value and ignore assignment if it isn't a hole value
+    kCheckIgnoreAssignment
+  };
+
+  HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
+      : slot_index_(slot_index), mode_(mode) {
     SetOperandAt(0, context);
     SetOperandAt(1, value);
-    SetFlag(kChangesContextSlots);
+    SetGVNFlag(kChangesContextSlots);
   }
 
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
   int slot_index() const { return slot_index_; }
+  Mode mode() const { return mode_; }
 
   bool NeedsWriteBarrier() {
     return StoringValueNeedsWriteBarrier(value());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  bool DeoptimizesOnHole() {
+    return mode_ == kCheckDeoptimize;
+  }
+
+  bool RequiresHoleCheck() {
+    return mode_ != kNoCheck;
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3373,6 +3711,7 @@
 
  private:
   int slot_index_;
+  Mode mode_;
 };
 
 
@@ -3384,11 +3723,11 @@
         offset_(offset) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnMaps);
     if (is_in_object) {
-      SetFlag(kDependsOnInobjectFields);
+      SetGVNFlag(kDependsOnInobjectFields);
     } else {
-      SetFlag(kDependsOnBackingStoreFields);
+      SetGVNFlag(kDependsOnBackingStoreFields);
     }
   }
 
@@ -3396,7 +3735,7 @@
   bool is_in_object() const { return is_in_object_; }
   int offset() const { return offset_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3428,7 +3767,7 @@
   Handle<String> name() { return name_; }
   bool need_generic() { return need_generic_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3463,7 +3802,7 @@
   HValue* object() { return OperandAt(1); }
   Handle<Object> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3482,12 +3821,12 @@
       : HUnaryOperation(function) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnCalls);
+    SetGVNFlag(kDependsOnCalls);
   }
 
   HValue* function() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3500,18 +3839,23 @@
 
 class HLoadKeyedFastElement: public HTemplateInstruction<2> {
  public:
-  HLoadKeyedFastElement(HValue* obj, HValue* key) {
+  enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
+
+  HLoadKeyedFastElement(HValue* obj,
+                        HValue* key,
+                        HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
+      : hole_check_mode_(hole_check_mode) {
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
     set_representation(Representation::Tagged());
-    SetFlag(kDependsOnArrayElements);
+    SetGVNFlag(kDependsOnArrayElements);
     SetFlag(kUseGVN);
   }
 
   HValue* object() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
     return index == 0
       ? Representation::Tagged()
@@ -3520,12 +3864,19 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  bool RequiresHoleCheck() const;
+  bool RequiresHoleCheck();
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
 
  protected:
-  virtual bool DataEquals(HValue* other) { return true; }
+  virtual bool DataEquals(HValue* other) {
+    if (!other->IsLoadKeyedFastElement()) return false;
+    HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
+    return hole_check_mode_ == other_load->hole_check_mode_;
+  }
+
+ private:
+  HoleCheckMode hole_check_mode_;
 };
 
 
@@ -3535,14 +3886,14 @@
     SetOperandAt(0, elements);
     SetOperandAt(1, key);
     set_representation(Representation::Double());
-    SetFlag(kDependsOnDoubleArrayElements);
+    SetGVNFlag(kDependsOnDoubleArrayElements);
     SetFlag(kUseGVN);
   }
 
   HValue* elements() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
     return index == 0
       ? Representation::Tagged()
@@ -3551,8 +3902,6 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  bool RequiresHoleCheck() const;
-
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
 
  protected:
@@ -3574,15 +3923,15 @@
     } else {
       set_representation(Representation::Integer32());
     }
-    SetFlag(kDependsOnSpecializedArrayElements);
+    SetGVNFlag(kDependsOnSpecializedArrayElements);
     // Native code could change the specialized array.
-    SetFlag(kDependsOnCalls);
+    SetGVNFlag(kDependsOnCalls);
     SetFlag(kUseGVN);
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32, but the base pointer
     // for the element load is a naked pointer.
     return index == 0
@@ -3594,6 +3943,8 @@
   HValue* key() { return OperandAt(1); }
   ElementsKind elements_kind() const { return elements_kind_; }
 
+  virtual Range* InferRange(Zone* zone);
+
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
 
  protected:
@@ -3625,10 +3976,12 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
+  virtual HValue* Canonicalize();
+
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
 };
 
@@ -3646,15 +3999,15 @@
     SetOperandAt(0, obj);
     SetOperandAt(1, val);
     if (is_in_object_) {
-      SetFlag(kChangesInobjectFields);
+      SetGVNFlag(kChangesInobjectFields);
     } else {
-      SetFlag(kChangesBackingStoreFields);
+      SetGVNFlag(kChangesBackingStoreFields);
     }
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3686,9 +4039,9 @@
                      HValue* object,
                      Handle<String> name,
                      HValue* value,
-                     bool strict_mode)
+                     StrictModeFlag strict_mode_flag)
       : name_(name),
-        strict_mode_(strict_mode) {
+        strict_mode_flag_(strict_mode_flag) {
     SetOperandAt(0, object);
     SetOperandAt(1, value);
     SetOperandAt(2, context);
@@ -3699,11 +4052,11 @@
   HValue* value() { return OperandAt(1); }
   HValue* context() { return OperandAt(2); }
   Handle<String> name() { return name_; }
-  bool strict_mode() { return strict_mode_; }
+  StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3711,20 +4064,22 @@
 
  private:
   Handle<String> name_;
-  bool strict_mode_;
+  StrictModeFlag strict_mode_flag_;
 };
 
 
 class HStoreKeyedFastElement: public HTemplateInstruction<3> {
  public:
-  HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) {
+  HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
+                         ElementsKind elements_kind = FAST_ELEMENTS)
+      : elements_kind_(elements_kind) {
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
-    SetFlag(kChangesArrayElements);
+    SetGVNFlag(kChangesArrayElements);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
     return index == 1
         ? Representation::Integer32()
@@ -3734,14 +4089,24 @@
   HValue* object() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
+  bool value_is_smi() {
+    return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
+  }
 
   bool NeedsWriteBarrier() {
-    return StoringValueNeedsWriteBarrier(value());
+    if (value_is_smi()) {
+      return false;
+    } else {
+      return StoringValueNeedsWriteBarrier(value());
+    }
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
+
+ private:
+  ElementsKind elements_kind_;
 };
 
 
@@ -3753,10 +4118,10 @@
     SetOperandAt(0, elements);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
-    SetFlag(kChangesDoubleArrayElements);
+    SetGVNFlag(kChangesDoubleArrayElements);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     if (index == 1) {
       return Representation::Integer32();
     } else if (index == 2) {
@@ -3787,7 +4152,7 @@
                                      HValue* val,
                                      ElementsKind elements_kind)
       : elements_kind_(elements_kind) {
-    SetFlag(kChangesSpecializedArrayElements);
+    SetGVNFlag(kChangesSpecializedArrayElements);
     SetOperandAt(0, external_elements);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
@@ -3795,7 +4160,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     if (index == 0) {
       return Representation::External();
     } else {
@@ -3828,8 +4193,8 @@
                      HValue* object,
                      HValue* key,
                      HValue* value,
-                     bool strict_mode)
-      : strict_mode_(strict_mode) {
+                     StrictModeFlag strict_mode_flag)
+      : strict_mode_flag_(strict_mode_flag) {
     SetOperandAt(0, object);
     SetOperandAt(1, key);
     SetOperandAt(2, value);
@@ -3841,9 +4206,9 @@
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
   HValue* context() { return OperandAt(3); }
-  bool strict_mode() { return strict_mode_; }
+  StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3852,7 +4217,46 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
 
  private:
-  bool strict_mode_;
+  StrictModeFlag strict_mode_flag_;
+};
+
+
+class HTransitionElementsKind: public HTemplateInstruction<1> {
+ public:
+  HTransitionElementsKind(HValue* object,
+                          Handle<Map> original_map,
+                          Handle<Map> transitioned_map)
+      : original_map_(original_map),
+        transitioned_map_(transitioned_map) {
+    SetOperandAt(0, object);
+    SetFlag(kUseGVN);
+    SetGVNFlag(kChangesElementsKind);
+    SetGVNFlag(kChangesElementsPointer);
+    set_representation(Representation::Tagged());
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  HValue* object() { return OperandAt(0); }
+  Handle<Map> original_map() { return original_map_; }
+  Handle<Map> transitioned_map() { return transitioned_map_; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
+
+ protected:
+  virtual bool DataEquals(HValue* other) {
+    HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
+    return original_map_.is_identical_to(instr->original_map()) &&
+        transitioned_map_.is_identical_to(instr->transitioned_map());
+  }
+
+ private:
+  Handle<Map> original_map_;
+  Handle<Map> transitioned_map_;
 };
 
 
@@ -3862,10 +4266,10 @@
       : HBinaryOperation(context, left, right) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3888,10 +4292,10 @@
     SetOperandAt(2, index);
     set_representation(Representation::Integer32());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The index is supposed to be Integer32.
     return index == 2
         ? Representation::Integer32()
@@ -3907,8 +4311,8 @@
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange() {
-    return new Range(0, String::kMaxUC16CharCode);
+  virtual Range* InferRange(Zone* zone) {
+    return new(zone) Range(0, String::kMaxUtf16CodeUnit);
   }
 };
 
@@ -3918,15 +4322,16 @@
   HStringCharFromCode(HValue* context, HValue* char_code) {
     SetOperandAt(0, context);
     SetOperandAt(1, char_code);
-     set_representation(Representation::Tagged());
+    set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
         ? Representation::Tagged()
         : Representation::Integer32();
   }
+  virtual HType CalculateInferredType();
 
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
@@ -3942,10 +4347,10 @@
   explicit HStringLength(HValue* string) : HUnaryOperation(string) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3959,12 +4364,35 @@
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange() {
-    return new Range(0, String::kMaxLength);
+  virtual Range* InferRange(Zone* zone) {
+    return new(zone) Range(0, String::kMaxLength);
   }
 };
 
 
+class HAllocateObject: public HTemplateInstruction<1> {
+ public:
+  HAllocateObject(HValue* context, Handle<JSFunction> constructor)
+      : constructor_(constructor) {
+    SetOperandAt(0, context);
+    set_representation(Representation::Tagged());
+  }
+
+  HValue* context() { return OperandAt(0); }
+  Handle<JSFunction> constructor() { return constructor_; }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+  virtual HType CalculateInferredType();
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
+
+ private:
+  Handle<JSFunction> constructor_;
+};
+
+
 template <int V>
 class HMaterializedLiteral: public HTemplateInstruction<V> {
  public:
@@ -3982,34 +4410,76 @@
 };
 
 
+class HFastLiteral: public HMaterializedLiteral<1> {
+ public:
+  HFastLiteral(HValue* context,
+               Handle<JSObject> boilerplate,
+               int total_size,
+               int literal_index,
+               int depth)
+      : HMaterializedLiteral<1>(literal_index, depth),
+        boilerplate_(boilerplate),
+        total_size_(total_size) {
+    SetOperandAt(0, context);
+  }
+
+  // Maximum depth and total number of elements and properties for literal
+  // graphs to be considered for fast deep-copying.
+  static const int kMaxLiteralDepth = 3;
+  static const int kMaxLiteralProperties = 8;
+
+  HValue* context() { return OperandAt(0); }
+  Handle<JSObject> boilerplate() const { return boilerplate_; }
+  int total_size() const { return total_size_; }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+  virtual HType CalculateInferredType();
+
+  DECLARE_CONCRETE_INSTRUCTION(FastLiteral)
+
+ private:
+  Handle<JSObject> boilerplate_;
+  int total_size_;
+};
+
+
 class HArrayLiteral: public HMaterializedLiteral<1> {
  public:
   HArrayLiteral(HValue* context,
-                Handle<FixedArray> constant_elements,
+                Handle<HeapObject> boilerplate_object,
                 int length,
                 int literal_index,
                 int depth)
       : HMaterializedLiteral<1>(literal_index, depth),
         length_(length),
-        constant_elements_(constant_elements) {
+        boilerplate_object_(boilerplate_object) {
     SetOperandAt(0, context);
   }
 
   HValue* context() { return OperandAt(0); }
-  Handle<FixedArray> constant_elements() const { return constant_elements_; }
+  ElementsKind boilerplate_elements_kind() const {
+    if (!boilerplate_object_->IsJSObject()) {
+      return FAST_ELEMENTS;
+    }
+    return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
+  }
+  Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
   int length() const { return length_; }
 
   bool IsCopyOnWrite() const;
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
+  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
 
  private:
   int length_;
-  Handle<FixedArray> constant_elements_;
+  Handle<HeapObject> boilerplate_object_;
 };
 
 
@@ -4035,9 +4505,10 @@
   bool fast_elements() const { return fast_elements_; }
   bool has_function() const { return has_function_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
+  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
 
@@ -4065,9 +4536,10 @@
   Handle<String> pattern() { return pattern_; }
   Handle<String> flags() { return flags_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
+  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
 
@@ -4089,9 +4561,10 @@
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
+  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
 
@@ -4115,7 +4588,10 @@
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual HValue* Canonicalize();
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4129,11 +4605,11 @@
     // This instruction is not marked as having side effects, but
     // changes the map of the input operand. Use it only when creating
     // object literals.
-    ASSERT(value->IsObjectLiteral());
+    ASSERT(value->IsObjectLiteral() || value->IsFastLiteral());
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4147,7 +4623,7 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4155,6 +4631,26 @@
 };
 
 
+class HDateField: public HUnaryOperation {
+ public:
+  HDateField(HValue* date, Smi* index)
+      : HUnaryOperation(date), index_(index) {
+    set_representation(Representation::Tagged());
+  }
+
+  Smi* index() const { return index_; }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DateField)
+
+ private:
+  Smi* index_;
+};
+
+
 class HDeleteProperty: public HBinaryOperation {
  public:
   HDeleteProperty(HValue* context, HValue* obj, HValue* key)
@@ -4163,7 +4659,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4190,7 +4686,7 @@
   HValue* key() { return OperandAt(1); }
   HValue* object() { return OperandAt(2); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4203,6 +4699,134 @@
   DECLARE_CONCRETE_INSTRUCTION(In)
 };
 
+
+class HCheckMapValue: public HTemplateInstruction<2> {
+ public:
+  HCheckMapValue(HValue* value,
+                 HValue* map) {
+    SetOperandAt(0, value);
+    SetOperandAt(1, map);
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetGVNFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnElementsKind);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual HType CalculateInferredType() {
+    return HType::Tagged();
+  }
+
+  HValue* value() { return OperandAt(0); }
+  HValue* map() { return OperandAt(1); }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
+
+ protected:
+  virtual bool DataEquals(HValue* other) {
+    return true;
+  }
+};
+
+
+class HForInPrepareMap : public HTemplateInstruction<2> {
+ public:
+  HForInPrepareMap(HValue* context,
+                   HValue* object) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, object);
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  HValue* context() { return OperandAt(0); }
+  HValue* enumerable() { return OperandAt(1); }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual HType CalculateInferredType() {
+    return HType::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap);
+};
+
+
+class HForInCacheArray : public HTemplateInstruction<2> {
+ public:
+  HForInCacheArray(HValue* enumerable,
+                   HValue* keys,
+                   int idx) : idx_(idx) {
+    SetOperandAt(0, enumerable);
+    SetOperandAt(1, keys);
+    set_representation(Representation::Tagged());
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  HValue* enumerable() { return OperandAt(0); }
+  HValue* map() { return OperandAt(1); }
+  int idx() { return idx_; }
+
+  HForInCacheArray* index_cache() {
+    return index_cache_;
+  }
+
+  void set_index_cache(HForInCacheArray* index_cache) {
+    index_cache_ = index_cache;
+  }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual HType CalculateInferredType() {
+    return HType::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray);
+
+ private:
+  int idx_;
+  HForInCacheArray* index_cache_;
+};
+
+
+class HLoadFieldByIndex : public HTemplateInstruction<2> {
+ public:
+  HLoadFieldByIndex(HValue* object,
+                    HValue* index) {
+    SetOperandAt(0, object);
+    SetOperandAt(1, index);
+    set_representation(Representation::Tagged());
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  HValue* object() { return OperandAt(0); }
+  HValue* index() { return OperandAt(1); }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual HType CalculateInferredType() {
+    return HType::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
+};
+
+
 #undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index c625fba..921aba1 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -70,7 +70,8 @@
       deleted_phis_(4),
       parent_loop_header_(NULL),
       is_inline_return_target_(false),
-      is_deoptimizing_(false) { }
+      is_deoptimizing_(false),
+      dominates_loop_successors_(false) { }
 
 
 void HBasicBlock::AttachLoopInformation() {
@@ -96,7 +97,7 @@
   ASSERT(phi->block() == this);
   ASSERT(phis_.Contains(phi));
   ASSERT(phi->HasNoUses() || !phi->is_live());
-  phi->ClearOperands();
+  phi->Kill();
   phis_.RemoveElement(phi);
   phi->SetBlock(NULL);
 }
@@ -164,10 +165,10 @@
 }
 
 
-void HBasicBlock::Goto(HBasicBlock* block) {
+void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
   if (block->IsInlineReturnTarget()) {
     AddInstruction(new(zone()) HLeaveInlined);
-    last_environment_ = last_environment()->outer();
+    last_environment_ = last_environment()->DiscardInlined(drop_extra);
   }
   AddSimulate(AstNode::kNoNumber);
   HGoto* instr = new(zone()) HGoto(block);
@@ -175,11 +176,13 @@
 }
 
 
-void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
+void HBasicBlock::AddLeaveInlined(HValue* return_value,
+                                  HBasicBlock* target,
+                                  bool drop_extra) {
   ASSERT(target->IsInlineReturnTarget());
   ASSERT(return_value != NULL);
   AddInstruction(new(zone()) HLeaveInlined);
-  last_environment_ = last_environment()->outer();
+  last_environment_ = last_environment()->DiscardInlined(drop_extra);
   last_environment()->Push(return_value);
   AddSimulate(AstNode::kNoNumber);
   HGoto* instr = new(zone()) HGoto(target);
@@ -313,6 +316,62 @@
 }
 
 
+void HBasicBlock::AssignLoopSuccessorDominators() {
+  // Mark blocks that dominate all subsequent reachable blocks inside their
+  // loop. Exploit the fact that blocks are sorted in reverse post order. When
+  // the loop is visited in increasing block id order, if the number of
+  // non-loop-exiting successor edges at the dominator_candidate block doesn't
+  // exceed the number of previously encountered predecessor edges, there is no
+  // path from the loop header to any block with higher id that doesn't go
+  // through the dominator_candidate block. In this case, the
+  // dominator_candidate block is guaranteed to dominate all blocks reachable
+  // from it with higher ids.
+  HBasicBlock* last = loop_information()->GetLastBackEdge();
+  int outstanding_successors = 1;  // one edge from the pre-header
+  // Header always dominates everything.
+  MarkAsLoopSuccessorDominator();
+  for (int j = block_id(); j <= last->block_id(); ++j) {
+    HBasicBlock* dominator_candidate = graph_->blocks()->at(j);
+    for (HPredecessorIterator it(dominator_candidate); !it.Done();
+         it.Advance()) {
+      HBasicBlock* predecessor = it.Current();
+      // Don't count back edges.
+      if (predecessor->block_id() < dominator_candidate->block_id()) {
+        outstanding_successors--;
+      }
+    }
+
+    // If more successors than predecessors have been seen in the loop up to
+    // now, it's not possible to guarantee that the current block dominates
+    // all of the blocks with higher IDs. In this case, assume conservatively
+    // that those paths through loop that don't go through the current block
+    // contain all of the loop's dependencies. Also be careful to record
+    // dominator information about the current loop that's being processed,
+    // and not nested loops, which will be processed when
+    // AssignLoopSuccessorDominators gets called on their header.
+    ASSERT(outstanding_successors >= 0);
+    HBasicBlock* parent_loop_header = dominator_candidate->parent_loop_header();
+    if (outstanding_successors == 0 &&
+        (parent_loop_header == this && !dominator_candidate->IsLoopHeader())) {
+      dominator_candidate->MarkAsLoopSuccessorDominator();
+    }
+    HControlInstruction* end = dominator_candidate->end();
+    for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
+      HBasicBlock* successor = it.Current();
+      // Only count successors that remain inside the loop and don't loop back
+      // to a loop header.
+      if (successor->block_id() > dominator_candidate->block_id() &&
+          successor->block_id() <= last->block_id()) {
+        // Backwards edges must land on loop headers.
+        ASSERT(successor->block_id() > dominator_candidate->block_id() ||
+               successor->IsLoopHeader());
+        outstanding_successors++;
+      }
+    }
+  }
+}
+
+
 int HBasicBlock::PredecessorIndexOf(HBasicBlock* predecessor) const {
   for (int i = 0; i < predecessors_.length(); ++i) {
     if (predecessors_[i] == predecessor) return i;
@@ -387,7 +446,7 @@
                        HBasicBlock* dont_visit)
       : visited_count_(0),
         stack_(16),
-        reachable_(block_count),
+        reachable_(block_count, ZONE),
         dont_visit_(dont_visit) {
     PushBlock(entry_block);
     Analyze();
@@ -422,7 +481,7 @@
 };
 
 
-void HGraph::Verify() const {
+void HGraph::Verify(bool do_full_verify) const {
   for (int i = 0; i < blocks_.length(); i++) {
     HBasicBlock* block = blocks_.at(i);
 
@@ -473,25 +532,27 @@
   // Check special property of first block to have no predecessors.
   ASSERT(blocks_.at(0)->predecessors()->is_empty());
 
-  // Check that the graph is fully connected.
-  ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
-  ASSERT(analyzer.visited_count() == blocks_.length());
+  if (do_full_verify) {
+    // Check that the graph is fully connected.
+    ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
+    ASSERT(analyzer.visited_count() == blocks_.length());
 
-  // Check that entry block dominator is NULL.
-  ASSERT(entry_block_->dominator() == NULL);
+    // Check that entry block dominator is NULL.
+    ASSERT(entry_block_->dominator() == NULL);
 
-  // Check dominators.
-  for (int i = 0; i < blocks_.length(); ++i) {
-    HBasicBlock* block = blocks_.at(i);
-    if (block->dominator() == NULL) {
-      // Only start block may have no dominator assigned to.
-      ASSERT(i == 0);
-    } else {
-      // Assert that block is unreachable if dominator must not be visited.
-      ReachabilityAnalyzer dominator_analyzer(entry_block_,
-                                              blocks_.length(),
-                                              block->dominator());
-      ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+    // Check dominators.
+    for (int i = 0; i < blocks_.length(); ++i) {
+      HBasicBlock* block = blocks_.at(i);
+      if (block->dominator() == NULL) {
+        // Only start block may have no dominator assigned to.
+        ASSERT(i == 0);
+      } else {
+        // Assert that block is unreachable if dominator must not be visited.
+        ReachabilityAnalyzer dominator_analyzer(entry_block_,
+                                                blocks_.length(),
+                                                block->dominator());
+        ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+      }
     }
   }
 }
@@ -539,7 +600,7 @@
 HGraphBuilder::HGraphBuilder(CompilationInfo* info,
                              TypeFeedbackOracle* oracle)
     : function_state_(NULL),
-      initial_function_state_(this, info, oracle),
+      initial_function_state_(this, info, oracle, NORMAL_RETURN),
       ast_context_(NULL),
       break_scope_(NULL),
       graph_(NULL),
@@ -621,28 +682,28 @@
 
 Handle<Code> HGraph::Compile(CompilationInfo* info) {
   int values = GetMaximumValueID();
-  if (values > LAllocator::max_initial_value_ids()) {
-    if (FLAG_trace_bailout) PrintF("Function is too big\n");
+  if (values > LUnallocated::kMaxVirtualRegisters) {
+    if (FLAG_trace_bailout) {
+      PrintF("Not enough virtual registers for (values).\n");
+    }
     return Handle<Code>::null();
   }
-
   LAllocator allocator(values, this);
   LChunkBuilder builder(info, this, &allocator);
   LChunk* chunk = builder.Build();
   if (chunk == NULL) return Handle<Code>::null();
 
-  if (!FLAG_alloc_lithium) return Handle<Code>::null();
-
-  allocator.Allocate(chunk);
-
-  if (!FLAG_use_lithium) return Handle<Code>::null();
+  if (!allocator.Allocate(chunk)) {
+    if (FLAG_trace_bailout) {
+      PrintF("Not enough virtual registers (regalloc).\n");
+    }
+    return Handle<Code>::null();
+  }
 
   MacroAssembler assembler(info->isolate(), NULL, 0);
   LCodeGen generator(chunk, &assembler, info);
 
-  if (FLAG_eliminate_empty_blocks) {
-    chunk->MarkEmptyBlocks();
-  }
+  chunk->MarkEmptyBlocks();
 
   if (generator.GenerateCode()) {
     if (FLAG_trace_codegen) {
@@ -669,7 +730,7 @@
 
 void HGraph::Canonicalize() {
   if (!FLAG_use_canonicalizing) return;
-  HPhase phase("Canonicalize", this);
+  HPhase phase("H_Canonicalize", this);
   for (int i = 0; i < blocks()->length(); ++i) {
     HInstruction* instr = blocks()->at(i)->first();
     while (instr != NULL) {
@@ -682,8 +743,8 @@
 
 
 void HGraph::OrderBlocks() {
-  HPhase phase("Block ordering");
-  BitVector visited(blocks_.length());
+  HPhase phase("H_Block ordering");
+  BitVector visited(blocks_.length(), zone());
 
   ZoneList<HBasicBlock*> reverse_result(8);
   HBasicBlock* start = blocks_[0];
@@ -728,6 +789,7 @@
       Postorder(it.Current(), visited, order, block);
     }
   } else {
+    ASSERT(block->IsFinished());
     for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
       Postorder(it.Current(), visited, order, loop_header);
     }
@@ -743,14 +805,16 @@
 
 
 void HGraph::AssignDominators() {
-  HPhase phase("Assign dominators", this);
+  HPhase phase("H_Assign dominators", this);
   for (int i = 0; i < blocks_.length(); ++i) {
-    if (blocks_[i]->IsLoopHeader()) {
+    HBasicBlock* block = blocks_[i];
+    if (block->IsLoopHeader()) {
       // Only the first predecessor of a loop header is from outside the loop.
       // All others are back edges, and thus cannot dominate the loop header.
-      blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->first());
+      block->AssignCommonDominator(block->predecessors()->first());
+      block->AssignLoopSuccessorDominators();
     } else {
-      for (int j = 0; j < blocks_[i]->predecessors()->length(); ++j) {
+      for (int j = blocks_[i]->predecessors()->length() - 1; j >= 0; --j) {
         blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
       }
     }
@@ -760,7 +824,7 @@
 // Mark all blocks that are dominated by an unconditional soft deoptimize to
 // prevent code motion across those blocks.
 void HGraph::PropagateDeoptimizingMark() {
-  HPhase phase("Propagate deoptimizing mark", this);
+  HPhase phase("H_Propagate deoptimizing mark", this);
   MarkAsDeoptimizingRecursively(entry_block());
 }
 
@@ -773,7 +837,7 @@
 }
 
 void HGraph::EliminateRedundantPhis() {
-  HPhase phase("Redundant phi elimination", this);
+  HPhase phase("H_Redundant phi elimination", this);
 
   // Worklist of phis that can potentially be eliminated. Initialized with
   // all phi nodes. When elimination of a phi node modifies another phi node
@@ -807,7 +871,7 @@
 
 
 void HGraph::EliminateUnreachablePhis() {
-  HPhase phase("Unreachable phi elimination", this);
+  HPhase phase("H_Unreachable phi elimination", this);
 
   // Initialize worklist.
   ZoneList<HPhi*> phi_list(blocks_.length());
@@ -850,7 +914,7 @@
 }
 
 
-bool HGraph::CheckPhis() {
+bool HGraph::CheckArgumentsPhiUses() {
   int block_count = blocks_.length();
   for (int i = 0; i < block_count; ++i) {
     for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
@@ -863,13 +927,11 @@
 }
 
 
-bool HGraph::CollectPhis() {
+bool HGraph::CheckConstPhiUses() {
   int block_count = blocks_.length();
-  phi_list_ = new ZoneList<HPhi*>(block_count);
   for (int i = 0; i < block_count; ++i) {
     for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
       HPhi* phi = blocks_[i]->phis()->at(j);
-      phi_list_->Add(phi);
       // Check for the hole value (from an uninitialized const).
       for (int k = 0; k < phi->OperandCount(); k++) {
         if (phi->OperandAt(k) == GetConstantHole()) return false;
@@ -880,8 +942,20 @@
 }
 
 
+void HGraph::CollectPhis() {
+  int block_count = blocks_.length();
+  phi_list_ = new ZoneList<HPhi*>(block_count);
+  for (int i = 0; i < block_count; ++i) {
+    for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+      HPhi* phi = blocks_[i]->phis()->at(j);
+      phi_list_->Add(phi);
+    }
+  }
+}
+
+
 void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
-  BitVector in_worklist(GetMaximumValueID());
+  BitVector in_worklist(GetMaximumValueID(), zone());
   for (int i = 0; i < worklist->length(); ++i) {
     ASSERT(!in_worklist.Contains(worklist->at(i)->id()));
     in_worklist.Add(worklist->at(i)->id());
@@ -905,7 +979,8 @@
 
 class HRangeAnalysis BASE_EMBEDDED {
  public:
-  explicit HRangeAnalysis(HGraph* graph) : graph_(graph), changed_ranges_(16) {}
+  explicit HRangeAnalysis(HGraph* graph) :
+      graph_(graph), zone_(graph->isolate()->zone()), changed_ranges_(16) { }
 
   void Analyze();
 
@@ -919,6 +994,7 @@
   void AddRange(HValue* value, Range* range);
 
   HGraph* graph_;
+  Zone* zone_;
   ZoneList<HValue*> changed_ranges_;
 };
 
@@ -934,7 +1010,7 @@
 
 
 void HRangeAnalysis::Analyze() {
-  HPhase phase("Range analysis", graph_);
+  HPhase phase("H_Range analysis", graph_);
   Analyze(graph_->entry_block());
 }
 
@@ -1005,14 +1081,14 @@
 
   if (op == Token::EQ || op == Token::EQ_STRICT) {
     // The same range has to apply for value.
-    new_range = range->Copy();
+    new_range = range->Copy(zone_);
   } else if (op == Token::LT || op == Token::LTE) {
-    new_range = range->CopyClearLower();
+    new_range = range->CopyClearLower(zone_);
     if (op == Token::LT) {
       new_range->AddConstant(-1);
     }
   } else if (op == Token::GT || op == Token::GTE) {
-    new_range = range->CopyClearUpper();
+    new_range = range->CopyClearUpper(zone_);
     if (op == Token::GT) {
       new_range->AddConstant(1);
     }
@@ -1027,7 +1103,7 @@
 void HRangeAnalysis::InferRange(HValue* value) {
   ASSERT(!value->HasRange());
   if (!value->representation().IsNone()) {
-    value->ComputeInitialRange();
+    value->ComputeInitialRange(zone_);
     Range* range = value->range();
     TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
                value->id(),
@@ -1048,7 +1124,7 @@
 
 void HRangeAnalysis::AddRange(HValue* value, Range* range) {
   Range* original_range = value->range();
-  value->AddNewRange(range);
+  value->AddNewRange(range, zone_);
   changed_ranges_.Add(value);
   Range* new_range = value->range();
   TraceRange("Updated range of %d set to [%d,%d]\n",
@@ -1089,10 +1165,10 @@
 }
 
 
-void HValueMap::Kill(int flags) {
-  int depends_flags = HValue::ConvertChangesToDependsFlags(flags);
-  if ((present_flags_ & depends_flags) == 0) return;
-  present_flags_ = 0;
+void HValueMap::Kill(GVNFlagSet flags) {
+  GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
+  if (!present_flags_.ContainsAnyOf(depends_flags)) return;
+  present_flags_.RemoveAll();
   for (int i = 0; i < array_size_; ++i) {
     HValue* value = array_[i].value;
     if (value != NULL) {
@@ -1101,7 +1177,8 @@
       int next;
       for (int current = array_[i].next; current != kNil; current = next) {
         next = lists_[current].next;
-        if ((lists_[current].value->flags() & depends_flags) != 0) {
+        HValue* value = lists_[current].value;
+        if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
           // Drop it.
           count_--;
           lists_[current].next = free_list_head_;
@@ -1110,13 +1187,14 @@
           // Keep it.
           lists_[current].next = kept;
           kept = current;
-          present_flags_ |= lists_[current].value->flags();
+          present_flags_.Add(value->gvn_flags());
         }
       }
       array_[i].next = kept;
 
       // Now possibly drop directly indexed element.
-      if ((array_[i].value->flags() & depends_flags) != 0) {  // Drop it.
+      value = array_[i].value;
+      if (value->gvn_flags().ContainsAnyOf(depends_flags)) {  // Drop it.
         count_--;
         int head = array_[i].next;
         if (head == kNil) {
@@ -1128,7 +1206,7 @@
           free_list_head_ = head;
         }
       } else {
-        present_flags_ |= array_[i].value->flags();  // Keep it.
+        present_flags_.Add(value->gvn_flags());  // Keep it.
       }
     }
   }
@@ -1330,28 +1408,33 @@
   explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
       : graph_(graph),
         info_(info),
+        removed_side_effects_(false),
         block_side_effects_(graph->blocks()->length()),
         loop_side_effects_(graph->blocks()->length()),
         visited_on_paths_(graph->zone(), graph->blocks()->length()) {
     ASSERT(info->isolate()->heap()->allow_allocation(false));
-    block_side_effects_.AddBlock(0, graph_->blocks()->length());
-    loop_side_effects_.AddBlock(0, graph_->blocks()->length());
+    block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length());
+    loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length());
   }
   ~HGlobalValueNumberer() {
     ASSERT(!info_->isolate()->heap()->allow_allocation(true));
   }
 
-  void Analyze();
+  // Returns true if values with side effects are removed.
+  bool Analyze();
 
  private:
-  int CollectSideEffectsOnPathsToDominatedBlock(HBasicBlock* dominator,
-                                                HBasicBlock* dominated);
+  GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
+      HBasicBlock* dominator,
+      HBasicBlock* dominated);
   void AnalyzeBlock(HBasicBlock* block, HValueMap* map);
   void ComputeBlockSideEffects();
   void LoopInvariantCodeMotion();
   void ProcessLoopBlock(HBasicBlock* block,
                         HBasicBlock* before_loop,
-                        int loop_kills);
+                        GVNFlagSet loop_kills,
+                        GVNFlagSet* accumulated_first_time_depends,
+                        GVNFlagSet* accumulated_first_time_changes);
   bool AllowCodeMotion();
   bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
 
@@ -1361,12 +1444,13 @@
 
   HGraph* graph_;
   CompilationInfo* info_;
+  bool removed_side_effects_;
 
   // A map of block IDs to their side effects.
-  ZoneList<int> block_side_effects_;
+  ZoneList<GVNFlagSet> block_side_effects_;
 
   // A map of loop header block IDs to their loop's side effects.
-  ZoneList<int> loop_side_effects_;
+  ZoneList<GVNFlagSet> loop_side_effects_;
 
   // Used when collecting side effects on paths from dominator to
   // dominated.
@@ -1374,39 +1458,53 @@
 };
 
 
-void HGlobalValueNumberer::Analyze() {
+bool HGlobalValueNumberer::Analyze() {
+  removed_side_effects_ = false;
   ComputeBlockSideEffects();
   if (FLAG_loop_invariant_code_motion) {
     LoopInvariantCodeMotion();
   }
   HValueMap* map = new(zone()) HValueMap();
   AnalyzeBlock(graph_->entry_block(), map);
+  return removed_side_effects_;
 }
 
 
 void HGlobalValueNumberer::ComputeBlockSideEffects() {
+  // The Analyze phase of GVN can be called multiple times. Clear loop side
+  // effects before computing them to erase the contents from previous Analyze
+  // passes.
+  for (int i = 0; i < loop_side_effects_.length(); ++i) {
+    loop_side_effects_[i].RemoveAll();
+  }
   for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
     // Compute side effects for the block.
     HBasicBlock* block = graph_->blocks()->at(i);
     HInstruction* instr = block->first();
     int id = block->block_id();
-    int side_effects = 0;
+    GVNFlagSet side_effects;
     while (instr != NULL) {
-      side_effects |= instr->ChangesFlags();
+      side_effects.Add(instr->ChangesFlags());
+      if (instr->IsSoftDeoptimize()) {
+        block_side_effects_[id].RemoveAll();
+        side_effects.RemoveAll();
+        break;
+      }
       instr = instr->next();
     }
-    block_side_effects_[id] |= side_effects;
+    block_side_effects_[id].Add(side_effects);
 
     // Loop headers are part of their loop.
     if (block->IsLoopHeader()) {
-      loop_side_effects_[id] |= side_effects;
+      loop_side_effects_[id].Add(side_effects);
     }
 
     // Propagate loop side effects upwards.
     if (block->HasParentLoopHeader()) {
       int header_id = block->parent_loop_header()->block_id();
-      loop_side_effects_[header_id] |=
-          block->IsLoopHeader() ? loop_side_effects_[id] : side_effects;
+      loop_side_effects_[header_id].Add(block->IsLoopHeader()
+                                        ? loop_side_effects_[id]
+                                        : side_effects);
     }
   }
 }
@@ -1416,50 +1514,114 @@
   for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
     HBasicBlock* block = graph_->blocks()->at(i);
     if (block->IsLoopHeader()) {
-      int side_effects = loop_side_effects_[block->block_id()];
+      GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
       TraceGVN("Try loop invariant motion for block B%d effects=0x%x\n",
                block->block_id(),
-               side_effects);
+               side_effects.ToIntegral());
 
+      GVNFlagSet accumulated_first_time_depends;
+      GVNFlagSet accumulated_first_time_changes;
       HBasicBlock* last = block->loop_information()->GetLastBackEdge();
       for (int j = block->block_id(); j <= last->block_id(); ++j) {
-        ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects);
+        ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects,
+                         &accumulated_first_time_depends,
+                         &accumulated_first_time_changes);
       }
     }
   }
 }
 
 
-void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
-                                            HBasicBlock* loop_header,
-                                            int loop_kills) {
+void HGlobalValueNumberer::ProcessLoopBlock(
+    HBasicBlock* block,
+    HBasicBlock* loop_header,
+    GVNFlagSet loop_kills,
+    GVNFlagSet* first_time_depends,
+    GVNFlagSet* first_time_changes) {
   HBasicBlock* pre_header = loop_header->predecessors()->at(0);
-  int depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
+  GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
   TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n",
            block->block_id(),
-           depends_flags);
+           depends_flags.ToIntegral());
   HInstruction* instr = block->first();
   while (instr != NULL) {
     HInstruction* next = instr->next();
-    if (instr->CheckFlag(HValue::kUseGVN) &&
-        (instr->flags() & depends_flags) == 0) {
-      TraceGVN("Checking instruction %d (%s)\n",
+    bool hoisted = false;
+    if (instr->CheckFlag(HValue::kUseGVN)) {
+      TraceGVN("Checking instruction %d (%s) instruction GVN flags 0x%X, "
+               "loop kills 0x%X\n",
                instr->id(),
-               instr->Mnemonic());
-      bool inputs_loop_invariant = true;
-      for (int i = 0; i < instr->OperandCount(); ++i) {
-        if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
-          inputs_loop_invariant = false;
+               instr->Mnemonic(),
+               instr->gvn_flags().ToIntegral(),
+               depends_flags.ToIntegral());
+      bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
+      if (instr->IsTransitionElementsKind()) {
+        // It's possible to hoist transitions out of a loop as long as the
+        // hoisting wouldn't move the transition past a DependsOn of one of it's
+        // changes or any instructions that might change an objects map or
+        // elements contents.
+        GVNFlagSet changes = instr->ChangesFlags();
+        GVNFlagSet hoist_depends_blockers =
+            HValue::ConvertChangesToDependsFlags(changes);
+        // In addition to not hoisting transitions above other instructions that
+        // change dependencies that the transition changes, it must not be
+        // hoisted above map changes and stores to an elements backing store
+        // that the transition might change.
+        GVNFlagSet hoist_change_blockers = changes;
+        hoist_change_blockers.Add(kChangesMaps);
+        HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr);
+        if (trans->original_map()->has_fast_double_elements()) {
+          hoist_change_blockers.Add(kChangesDoubleArrayElements);
         }
+        if (trans->transitioned_map()->has_fast_double_elements()) {
+          hoist_change_blockers.Add(kChangesArrayElements);
+        }
+        TraceGVN("Checking dependencies on HTransitionElementsKind %d (%s) "
+                 "hoist depends blockers 0x%X, hoist change blockers 0x%X, "
+                 "accumulated depends 0x%X, accumulated changes 0x%X\n",
+                 instr->id(),
+                 instr->Mnemonic(),
+                 hoist_depends_blockers.ToIntegral(),
+                 hoist_change_blockers.ToIntegral(),
+                 first_time_depends->ToIntegral(),
+                 first_time_changes->ToIntegral());
+        // It's possible to hoist transition from the current loop loop only if
+        // they dominate all of the successor blocks in the same loop and there
+        // are not any instructions that have Changes/DependsOn that intervene
+        // between it and the beginning of the loop header.
+        bool in_nested_loop = block != loop_header &&
+            ((block->parent_loop_header() != loop_header) ||
+             block->IsLoopHeader());
+        can_hoist = !in_nested_loop &&
+            block->IsLoopSuccessorDominator() &&
+            !first_time_depends->ContainsAnyOf(hoist_depends_blockers) &&
+            !first_time_changes->ContainsAnyOf(hoist_change_blockers);
       }
 
-      if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
-        TraceGVN("Found loop invariant instruction %d\n", instr->id());
-        // Move the instruction out of the loop.
-        instr->Unlink();
-        instr->InsertBefore(pre_header->end());
+      if (can_hoist) {
+        bool inputs_loop_invariant = true;
+        for (int i = 0; i < instr->OperandCount(); ++i) {
+          if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
+            inputs_loop_invariant = false;
+          }
+        }
+
+        if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
+          TraceGVN("Hoisting loop invariant instruction %d\n", instr->id());
+          // Move the instruction out of the loop.
+          instr->Unlink();
+          instr->InsertBefore(pre_header->end());
+          if (instr->HasSideEffects()) removed_side_effects_ = true;
+          hoisted = true;
+        }
       }
     }
+    if (!hoisted) {
+      // If an instruction is not hoisted, we have to account for its side
+      // effects when hoisting later HTransitionElementsKind instructions.
+      first_time_depends->Add(instr->DependsOnFlags());
+      first_time_changes->Add(instr->ChangesFlags());
+    }
     instr = next;
   }
 }
@@ -1478,20 +1640,20 @@
 }
 
 
-int HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
+GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
     HBasicBlock* dominator, HBasicBlock* dominated) {
-  int side_effects = 0;
+  GVNFlagSet side_effects;
   for (int i = 0; i < dominated->predecessors()->length(); ++i) {
     HBasicBlock* block = dominated->predecessors()->at(i);
     if (dominator->block_id() < block->block_id() &&
         block->block_id() < dominated->block_id() &&
         visited_on_paths_.Add(block->block_id())) {
-      side_effects |= block_side_effects_[block->block_id()];
+      side_effects.Add(block_side_effects_[block->block_id()]);
       if (block->IsLoopHeader()) {
-        side_effects |= loop_side_effects_[block->block_id()];
+        side_effects.Add(loop_side_effects_[block->block_id()]);
       }
-      side_effects |= CollectSideEffectsOnPathsToDominatedBlock(
-          dominator, block);
+      side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
+          dominator, block));
     }
   }
   return side_effects;
@@ -1512,13 +1674,14 @@
   HInstruction* instr = block->first();
   while (instr != NULL) {
     HInstruction* next = instr->next();
-    int flags = instr->ChangesFlags();
-    if (flags != 0) {
-      ASSERT(!instr->CheckFlag(HValue::kUseGVN));
+    GVNFlagSet flags = instr->ChangesFlags();
+    if (!flags.IsEmpty()) {
       // Clear all instructions in the map that are affected by side effects.
       map->Kill(flags);
       TraceGVN("Instruction %d kills\n", instr->id());
-    } else if (instr->CheckFlag(HValue::kUseGVN)) {
+    }
+    if (instr->CheckFlag(HValue::kUseGVN)) {
+      ASSERT(!instr->HasObservableSideEffects());
       HValue* other = map->Lookup(instr);
       if (other != NULL) {
         ASSERT(instr->Equals(other) && other->Equals(instr));
@@ -1527,6 +1690,7 @@
                  instr->Mnemonic(),
                  other->id(),
                  other->Mnemonic());
+        if (instr->HasSideEffects()) removed_side_effects_ = true;
         instr->DeleteAndReplaceWith(other);
       } else {
         map->Add(instr);
@@ -1562,7 +1726,9 @@
 class HInferRepresentation BASE_EMBEDDED {
  public:
   explicit HInferRepresentation(HGraph* graph)
-      : graph_(graph), worklist_(8), in_worklist_(graph->GetMaximumValueID()) {}
+      : graph_(graph),
+        worklist_(8),
+        in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
 
   void Analyze();
 
@@ -1600,6 +1766,12 @@
   ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
   Representation inferred = current->InferredRepresentation();
   if (inferred.IsSpecialization()) {
+    if (FLAG_trace_representation) {
+      PrintF("Changing #%d representation %s -> %s based on inputs\n",
+             current->id(),
+             r.Mnemonic(),
+             inferred.Mnemonic());
+    }
     current->ChangeRepresentation(inferred);
     AddDependantsToWorklist(current);
   }
@@ -1627,6 +1799,12 @@
   Representation new_rep = TryChange(value);
   if (!new_rep.IsNone()) {
     if (!value->representation().Equals(new_rep)) {
+      if (FLAG_trace_representation) {
+        PrintF("Changing #%d representation %s -> %s based on uses\n",
+               value->id(),
+               r.Mnemonic(),
+               new_rep.Mnemonic());
+      }
       value->ChangeRepresentation(new_rep);
       AddDependantsToWorklist(value);
     }
@@ -1643,7 +1821,7 @@
     Representation rep = use->RequiredInputRepresentation(it.index());
     if (rep.IsNone()) continue;
     if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
-    ++use_count[rep.kind()];
+    use_count[rep.kind()] += use->LoopWeight();
   }
   int tagged_count = use_count[Representation::kTagged];
   int double_count = use_count[Representation::kDouble];
@@ -1656,7 +1834,7 @@
   }
 
   // Prefer unboxing over boxing, the latter is more expensive.
-  if (tagged_count > non_tagged_count) Representation::None();
+  if (tagged_count > non_tagged_count) return Representation::None();
 
   // Prefer Integer32 over Double, if possible.
   if (int32_count > 0 && value->IsConvertibleToInteger()) {
@@ -1670,7 +1848,7 @@
 
 
 void HInferRepresentation::Analyze() {
-  HPhase phase("Infer representations", graph_);
+  HPhase phase("H_Infer representations", graph_);
 
   // (1) Initialize bit vectors and count real uses. Each phi gets a
   // bit-vector of length <number of phis>.
@@ -1679,7 +1857,7 @@
   ZoneList<BitVector*> connected_phis(phi_count);
   for (int i = 0; i < phi_count; ++i) {
     phi_list->at(i)->InitRealUses(i);
-    BitVector* connected_set = new(zone()) BitVector(phi_count);
+    BitVector* connected_set = new(zone()) BitVector(phi_count, graph_->zone());
     connected_set->Add(i);
     connected_phis.Add(connected_set);
   }
@@ -1749,7 +1927,7 @@
 
 
 void HGraph::InitializeInferredTypes() {
-  HPhase phase("Inferring types", this);
+  HPhase phase("H_Inferring types", this);
   InitializeInferredTypes(0, this->blocks_.length() - 1);
 }
 
@@ -1851,7 +2029,7 @@
   }
 
   if (new_value == NULL) {
-    new_value = new(zone()) HChange(value, value->representation(), to,
+    new_value = new(zone()) HChange(value, to,
                                     is_truncating, deoptimize_on_undefined);
   }
 
@@ -1886,8 +2064,7 @@
 
 
 void HGraph::InsertRepresentationChanges() {
-  HPhase phase("Insert representation changes", this);
-
+  HPhase phase("H_Representation changes", this);
 
   // Compute truncation flag for phis: Initially assume that all
   // int32-phis allow truncation and iteratively remove the ones that
@@ -1906,13 +2083,9 @@
     for (int i = 0; i < phi_list()->length(); i++) {
       HPhi* phi = phi_list()->at(i);
       if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
-      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
-        HValue* use = it.value();
-        if (!use->CheckFlag(HValue::kTruncatingToInt32)) {
-          phi->ClearFlag(HValue::kTruncatingToInt32);
-          change = true;
-          break;
-        }
+      if (!phi->CheckUsesForFlag(HValue::kTruncatingToInt32)) {
+        phi->ClearFlag(HValue::kTruncatingToInt32);
+        change = true;
       }
     }
   }
@@ -1947,7 +2120,7 @@
 
 
 void HGraph::MarkDeoptimizeOnUndefined() {
-  HPhase phase("MarkDeoptimizeOnUndefined", this);
+  HPhase phase("H_MarkDeoptimizeOnUndefined", this);
   // Compute DeoptimizeOnUndefined flag for phis.
   // Any phi that can reach a use with DeoptimizeOnUndefined set must
   // have DeoptimizeOnUndefined set.  Currently only HCompareIDAndBranch, with
@@ -1969,7 +2142,7 @@
 
 
 void HGraph::ComputeMinusZeroChecks() {
-  BitVector visited(GetMaximumValueID());
+  BitVector visited(GetMaximumValueID(), zone());
   for (int i = 0; i < blocks_.length(); ++i) {
     for (HInstruction* current = blocks_[i]->first();
          current != NULL;
@@ -1996,11 +2169,13 @@
 // a (possibly inlined) function.
 FunctionState::FunctionState(HGraphBuilder* owner,
                              CompilationInfo* info,
-                             TypeFeedbackOracle* oracle)
+                             TypeFeedbackOracle* oracle,
+                             ReturnHandlingFlag return_handling)
     : owner_(owner),
       compilation_info_(info),
       oracle_(oracle),
       call_context_(NULL),
+      return_handling_(return_handling),
       function_return_(NULL),
       test_context_(NULL),
       outer_(owner->function_state()) {
@@ -2043,6 +2218,7 @@
       for_typeof_(false) {
   owner->set_ast_context(this);  // Push.
 #ifdef DEBUG
+  ASSERT(owner->environment()->frame_type() == JS_FUNCTION);
   original_length_ = owner->environment()->length();
 #endif
 }
@@ -2056,14 +2232,16 @@
 EffectContext::~EffectContext() {
   ASSERT(owner()->HasStackOverflow() ||
          owner()->current_block() == NULL ||
-         owner()->environment()->length() == original_length_);
+         (owner()->environment()->length() == original_length_ &&
+          owner()->environment()->frame_type() == JS_FUNCTION));
 }
 
 
 ValueContext::~ValueContext() {
   ASSERT(owner()->HasStackOverflow() ||
          owner()->current_block() == NULL ||
-         owner()->environment()->length() == original_length_ + 1);
+         (owner()->environment()->length() == original_length_ + 1 &&
+          owner()->environment()->frame_type() == JS_FUNCTION));
 }
 
 
@@ -2090,12 +2268,12 @@
 void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
   ASSERT(!instr->IsControlInstruction());
   owner()->AddInstruction(instr);
-  if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+  if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
 }
 
 
 void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasSideEffects());
+  ASSERT(!instr->HasObservableSideEffects());
   HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
   instr->SetSuccessorAt(0, empty_true);
@@ -2113,12 +2291,12 @@
   }
   owner()->AddInstruction(instr);
   owner()->Push(instr);
-  if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+  if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
 }
 
 
 void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasSideEffects());
+  ASSERT(!instr->HasObservableSideEffects());
   if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
     return owner()->Bailout("bad value context for arguments object value");
   }
@@ -2143,7 +2321,7 @@
   builder->AddInstruction(instr);
   // We expect a simulate after every expression with side effects, though
   // this one isn't actually needed (and wouldn't work if it were targeted).
-  if (instr->HasSideEffects()) {
+  if (instr->HasObservableSideEffects()) {
     builder->Push(instr);
     builder->AddSimulate(ast_id);
     builder->Pop();
@@ -2153,14 +2331,14 @@
 
 
 void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasSideEffects());
+  ASSERT(!instr->HasObservableSideEffects());
   HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
   instr->SetSuccessorAt(0, empty_true);
   instr->SetSuccessorAt(1, empty_false);
   owner()->current_block()->Finish(instr);
-  empty_true->Goto(if_true());
-  empty_false->Goto(if_false());
+  empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
+  empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
   owner()->set_current_block(NULL);
 }
 
@@ -2181,8 +2359,8 @@
   HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
   builder->current_block()->Finish(test);
 
-  empty_true->Goto(if_true());
-  empty_false->Goto(if_false());
+  empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
+  empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
   builder->set_current_block(NULL);
 }
 
@@ -2268,7 +2446,7 @@
   if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
 
   {
-    HPhase phase("Block building");
+    HPhase phase("H_Block building");
     current_block_ = graph()->entry_block();
 
     Scope* scope = info()->scope();
@@ -2276,7 +2454,11 @@
       Bailout("function with illegal redeclaration");
       return NULL;
     }
-    SetupScope(scope);
+    if (scope->calls_eval()) {
+      Bailout("function calls eval");
+      return NULL;
+    }
+    SetUpScope(scope);
 
     // Add an edge to the body entry.  This is warty: the graph's start
     // environment will be used by the Lithium translation as the initial
@@ -2302,7 +2484,7 @@
     // Handle implicit declaration of the function name in named function
     // expressions before other declarations.
     if (scope->is_function_scope() && scope->function() != NULL) {
-      HandleDeclaration(scope->function(), Variable::CONST, NULL);
+      HandleDeclaration(scope->function(), CONST, NULL, NULL);
     }
     VisitDeclarations(scope->declarations());
     AddSimulate(AstNode::kDeclarationsId);
@@ -2323,16 +2505,31 @@
 
   graph()->OrderBlocks();
   graph()->AssignDominators();
+
+#ifdef DEBUG
+  // Do a full verify after building the graph and computing dominators.
+  graph()->Verify(true);
+#endif
+
   graph()->PropagateDeoptimizingMark();
+  if (!graph()->CheckConstPhiUses()) {
+    Bailout("Unsupported phi use of const variable");
+    return NULL;
+  }
   graph()->EliminateRedundantPhis();
-  if (!graph()->CheckPhis()) {
-    Bailout("Unsupported phi use of arguments object");
+  if (!graph()->CheckArgumentsPhiUses()) {
+    Bailout("Unsupported phi use of arguments");
     return NULL;
   }
   if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
-  if (!graph()->CollectPhis()) {
-    Bailout("Unsupported phi use of uninitialized constant");
-    return NULL;
+  graph()->CollectPhis();
+
+  if (graph()->has_osr_loop_entry()) {
+    const ZoneList<HPhi*>* phis = graph()->osr_loop_entry()->phis();
+    for (int j = 0; j < phis->length(); j++) {
+      HPhi* phi = phis->at(j);
+      graph()->osr_values()->at(phi->merged_index())->set_incoming_value(phi);
+    }
   }
 
   HInferRepresentation rep(graph());
@@ -2346,9 +2543,16 @@
 
   // Perform common subexpression elimination and loop-invariant code motion.
   if (FLAG_use_gvn) {
-    HPhase phase("Global value numbering", graph());
+    HPhase phase("H_Global value numbering", graph());
     HGlobalValueNumberer gvn(graph(), info());
-    gvn.Analyze();
+    bool removed_side_effects = gvn.Analyze();
+    // Trigger a second analysis pass to further eliminate duplicate values that
+    // could only be discovered by removing side-effect-generating instructions
+    // during the first pass.
+    if (FLAG_smi_only_arrays && removed_side_effects) {
+      removed_side_effects = gvn.Analyze();
+      ASSERT(!removed_side_effects);
+    }
   }
 
   if (FLAG_use_range) {
@@ -2372,7 +2576,7 @@
 
 
 void HGraph::ReplaceCheckedValues() {
-  HPhase phase("Replace checked values", this);
+  HPhase phase("H_Replace checked values", this);
   for (int i = 0; i < blocks()->length(); ++i) {
     HInstruction* instr = blocks()->at(i)->first();
     while (instr != NULL) {
@@ -2412,8 +2616,8 @@
 }
 
 
-template <int V>
-HInstruction* HGraphBuilder::PreProcessCall(HCall<V>* call) {
+template <class Instruction>
+HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
   int count = call->argument_count();
   ZoneList<HValue*> arguments(count);
   for (int i = 0; i < count; ++i) {
@@ -2427,12 +2631,16 @@
 }
 
 
-void HGraphBuilder::SetupScope(Scope* scope) {
+void HGraphBuilder::SetUpScope(Scope* scope) {
   HConstant* undefined_constant = new(zone()) HConstant(
       isolate()->factory()->undefined_value(), Representation::Tagged());
   AddInstruction(undefined_constant);
   graph_->set_undefined_constant(undefined_constant);
 
+  HArgumentsObject* object = new(zone()) HArgumentsObject;
+  AddInstruction(object);
+  graph()->SetArgumentsObject(object);
+
   // Set the initial values of parameters including "this".  "This" has
   // parameter index 0.
   ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
@@ -2459,10 +2667,9 @@
     if (!scope->arguments()->IsStackAllocated()) {
       return Bailout("context-allocated arguments");
     }
-    HArgumentsObject* object = new(zone()) HArgumentsObject;
-    AddInstruction(object);
-    graph()->SetArgumentsObject(object);
-    environment()->Bind(scope->arguments(), object);
+
+    environment()->Bind(scope->arguments(),
+                        graph()->GetArgumentsObject());
   }
 }
 
@@ -2566,12 +2773,20 @@
 
 HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
     BreakableStatement* stmt,
-    BreakType type) {
+    BreakType type,
+    int* drop_extra) {
+  *drop_extra = 0;
   BreakAndContinueScope* current = this;
   while (current != NULL && current->info()->target() != stmt) {
+    *drop_extra += current->info()->drop_extra();
     current = current->next();
   }
   ASSERT(current != NULL);  // Always found (unless stack is malformed).
+
+  if (type == BREAK) {
+    *drop_extra += current->info()->drop_extra();
+  }
+
   HBasicBlock* block = NULL;
   switch (type) {
     case BREAK:
@@ -2599,7 +2814,11 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE);
+  int drop_extra = 0;
+  HBasicBlock* continue_block = break_scope()->Get(stmt->target(),
+                                                   CONTINUE,
+                                                   &drop_extra);
+  Drop(drop_extra);
   current_block()->Goto(continue_block);
   set_current_block(NULL);
 }
@@ -2609,7 +2828,11 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK);
+  int drop_extra = 0;
+  HBasicBlock* break_block = break_scope()->Get(stmt->target(),
+                                                BREAK,
+                                                &drop_extra);
+  Drop(drop_extra);
   current_block()->Goto(break_block);
   set_current_block(NULL);
 }
@@ -2625,7 +2848,38 @@
     CHECK_ALIVE(VisitForValue(stmt->expression()));
     HValue* result = environment()->Pop();
     current_block()->FinishExit(new(zone()) HReturn(result));
-    set_current_block(NULL);
+  } else if (function_state()->is_construct()) {
+    // Return from an inlined construct call.  In a test context the return
+    // value will always evaluate to true, in a value context the return value
+    // needs to be a JSObject.
+    if (context->IsTest()) {
+      TestContext* test = TestContext::cast(context);
+      CHECK_ALIVE(VisitForEffect(stmt->expression()));
+      current_block()->Goto(test->if_true(), function_state()->drop_extra());
+    } else if (context->IsEffect()) {
+      CHECK_ALIVE(VisitForEffect(stmt->expression()));
+      current_block()->Goto(function_return(), function_state()->drop_extra());
+    } else {
+      ASSERT(context->IsValue());
+      CHECK_ALIVE(VisitForValue(stmt->expression()));
+      HValue* return_value = Pop();
+      HValue* receiver = environment()->Lookup(0);
+      HHasInstanceTypeAndBranch* typecheck =
+          new(zone()) HHasInstanceTypeAndBranch(return_value,
+                                                FIRST_SPEC_OBJECT_TYPE,
+                                                LAST_SPEC_OBJECT_TYPE);
+      HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
+      HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
+      typecheck->SetSuccessorAt(0, if_spec_object);
+      typecheck->SetSuccessorAt(1, not_spec_object);
+      current_block()->Finish(typecheck);
+      if_spec_object->AddLeaveInlined(return_value,
+                                      function_return(),
+                                      function_state()->drop_extra());
+      not_spec_object->AddLeaveInlined(receiver,
+                                       function_return(),
+                                       function_state()->drop_extra());
+    }
   } else {
     // Return from an inlined function, visit the subexpression in the
     // expression context of the call.
@@ -2636,15 +2890,17 @@
                       test->if_false());
     } else if (context->IsEffect()) {
       CHECK_ALIVE(VisitForEffect(stmt->expression()));
-      current_block()->Goto(function_return());
+      current_block()->Goto(function_return(), function_state()->drop_extra());
     } else {
       ASSERT(context->IsValue());
       CHECK_ALIVE(VisitForValue(stmt->expression()));
-      HValue* return_value = environment()->Pop();
-      current_block()->AddLeaveInlined(return_value, function_return());
+      HValue* return_value = Pop();
+      current_block()->AddLeaveInlined(return_value,
+                                       function_return(),
+                                       function_state()->drop_extra());
     }
-    set_current_block(NULL);
   }
+  set_current_block(NULL);
 }
 
 
@@ -2669,43 +2925,98 @@
     return Bailout("SwitchStatement: too many clauses");
   }
 
+  HValue* context = environment()->LookupContext();
+
   CHECK_ALIVE(VisitForValue(stmt->tag()));
   AddSimulate(stmt->EntryId());
   HValue* tag_value = Pop();
   HBasicBlock* first_test_block = current_block();
 
-  // 1. Build all the tests, with dangling true branches.  Unconditionally
-  // deoptimize if we encounter a non-smi comparison.
+  SwitchType switch_type = UNKNOWN_SWITCH;
+
+  // 1. Extract clause type
   for (int i = 0; i < clause_count; ++i) {
     CaseClause* clause = clauses->at(i);
     if (clause->is_default()) continue;
-    if (!clause->label()->IsSmiLiteral()) {
-      return Bailout("SwitchStatement: non-literal switch label");
+
+    if (switch_type == UNKNOWN_SWITCH) {
+      if (clause->label()->IsSmiLiteral()) {
+        switch_type = SMI_SWITCH;
+      } else if (clause->label()->IsStringLiteral()) {
+        switch_type = STRING_SWITCH;
+      } else {
+        return Bailout("SwitchStatement: non-literal switch label");
+      }
+    } else if ((switch_type == STRING_SWITCH &&
+                !clause->label()->IsStringLiteral()) ||
+               (switch_type == SMI_SWITCH &&
+                !clause->label()->IsSmiLiteral())) {
+      return Bailout("SwitchStatemnt: mixed label types are not supported");
+    }
+  }
+
+  HUnaryControlInstruction* string_check = NULL;
+  HBasicBlock* not_string_block = NULL;
+
+  // Test switch's tag value if all clauses are string literals
+  if (switch_type == STRING_SWITCH) {
+    string_check = new(zone()) HIsStringAndBranch(tag_value);
+    first_test_block = graph()->CreateBasicBlock();
+    not_string_block = graph()->CreateBasicBlock();
+
+    string_check->SetSuccessorAt(0, first_test_block);
+    string_check->SetSuccessorAt(1, not_string_block);
+    current_block()->Finish(string_check);
+
+    set_current_block(first_test_block);
+  }
+
+  // 2. Build all the tests, with dangling true branches
+  int default_id = AstNode::kNoNumber;
+  for (int i = 0; i < clause_count; ++i) {
+    CaseClause* clause = clauses->at(i);
+    if (clause->is_default()) {
+      default_id = clause->EntryId();
+      continue;
+    }
+    if (switch_type == SMI_SWITCH) {
+      clause->RecordTypeFeedback(oracle());
     }
 
-    // Unconditionally deoptimize on the first non-smi compare.
-    clause->RecordTypeFeedback(oracle());
-    if (!clause->IsSmiCompare()) {
-      // Finish with deoptimize and add uses of enviroment values to
-      // account for invisible uses.
-      current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
-      set_current_block(NULL);
-      break;
-    }
-
-    // Otherwise generate a compare and branch.
+    // Generate a compare and branch.
     CHECK_ALIVE(VisitForValue(clause->label()));
     HValue* label_value = Pop();
-    HCompareIDAndBranch* compare =
-        new(zone()) HCompareIDAndBranch(tag_value,
-                                        label_value,
-                                        Token::EQ_STRICT);
-    compare->SetInputRepresentation(Representation::Integer32());
-    HBasicBlock* body_block = graph()->CreateBasicBlock();
+
     HBasicBlock* next_test_block = graph()->CreateBasicBlock();
+    HBasicBlock* body_block = graph()->CreateBasicBlock();
+
+    HControlInstruction* compare;
+
+    if (switch_type == SMI_SWITCH) {
+      if (!clause->IsSmiCompare()) {
+        // Finish with deoptimize and add uses of enviroment values to
+        // account for invisible uses.
+        current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+        set_current_block(NULL);
+        break;
+      }
+
+      HCompareIDAndBranch* compare_ =
+          new(zone()) HCompareIDAndBranch(tag_value,
+                                          label_value,
+                                          Token::EQ_STRICT);
+      compare_->SetInputRepresentation(Representation::Integer32());
+      compare = compare_;
+    } else {
+      compare = new(zone()) HStringCompareAndBranch(context, tag_value,
+                                                     label_value,
+                                                     Token::EQ_STRICT);
+    }
+
     compare->SetSuccessorAt(0, body_block);
     compare->SetSuccessorAt(1, next_test_block);
     current_block()->Finish(compare);
+
     set_current_block(next_test_block);
   }
 
@@ -2713,10 +3024,18 @@
   // exit.  This block is NULL if we deoptimized.
   HBasicBlock* last_block = current_block();
 
-  // 2. Loop over the clauses and the linked list of tests in lockstep,
+  if (not_string_block != NULL) {
+    int join_id = (default_id != AstNode::kNoNumber)
+        ? default_id
+        : stmt->ExitId();
+    last_block = CreateJoin(last_block, not_string_block, join_id);
+  }
+
+  // 3. Loop over the clauses and the linked list of tests in lockstep,
   // translating the clause bodies.
   HBasicBlock* curr_test_block = first_test_block;
   HBasicBlock* fall_through_block = NULL;
+
   BreakAndContinueInfo break_info(stmt);
   { BreakAndContinueScope push(&break_info, this);
     for (int i = 0; i < clause_count; ++i) {
@@ -2785,8 +3104,8 @@
 }
 
 
-void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
-  if (!HasOsrEntryAt(statement)) return;
+bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
+  if (!HasOsrEntryAt(statement)) return false;
 
   HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
   HBasicBlock* osr_entry = graph()->CreateBasicBlock();
@@ -2799,15 +3118,30 @@
 
   set_current_block(osr_entry);
   int osr_entry_id = statement->OsrEntryId();
-  // We want the correct environment at the OsrEntry instruction.  Build
-  // it explicitly.  The expression stack should be empty.
-  ASSERT(environment()->ExpressionStackIsEmpty());
-  for (int i = 0; i < environment()->length(); ++i) {
+  int first_expression_index = environment()->first_expression_index();
+  int length = environment()->length();
+  ZoneList<HUnknownOSRValue*>* osr_values =
+      new(zone()) ZoneList<HUnknownOSRValue*>(length);
+
+  for (int i = 0; i < first_expression_index; ++i) {
     HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
     AddInstruction(osr_value);
     environment()->Bind(i, osr_value);
+    osr_values->Add(osr_value);
   }
 
+  if (first_expression_index != length) {
+    environment()->Drop(length - first_expression_index);
+    for (int i = first_expression_index; i < length; ++i) {
+      HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
+      AddInstruction(osr_value);
+      environment()->Push(osr_value);
+      osr_values->Add(osr_value);
+    }
+  }
+
+  graph()->set_osr_values(osr_values);
+
   AddSimulate(osr_entry_id);
   AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
   HContext* context = new(zone()) HContext;
@@ -2816,6 +3150,7 @@
   current_block()->Goto(loop_predecessor);
   loop_predecessor->SetJoinId(statement->EntryId());
   set_current_block(loop_predecessor);
+  return true;
 }
 
 
@@ -2839,10 +3174,11 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   ASSERT(current_block() != NULL);
-  PreProcessOsrEntry(stmt);
+  bool osr_entry = PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
   current_block()->Goto(loop_entry);
   set_current_block(loop_entry);
+  if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
 
   BreakAndContinueInfo break_info(stmt);
   CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
@@ -2881,10 +3217,12 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   ASSERT(current_block() != NULL);
-  PreProcessOsrEntry(stmt);
+  bool osr_entry = PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
   current_block()->Goto(loop_entry);
   set_current_block(loop_entry);
+  if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
+
 
   // If the condition is constant true, do not generate a branch.
   HBasicBlock* loop_successor = NULL;
@@ -2905,7 +3243,6 @@
 
   BreakAndContinueInfo break_info(stmt);
   if (current_block() != NULL) {
-    BreakAndContinueScope push(&break_info, this);
     CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
   }
   HBasicBlock* body_exit =
@@ -2927,10 +3264,11 @@
     CHECK_ALIVE(Visit(stmt->init()));
   }
   ASSERT(current_block() != NULL);
-  PreProcessOsrEntry(stmt);
+  bool osr_entry = PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
   current_block()->Goto(loop_entry);
   set_current_block(loop_entry);
+  if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
 
   HBasicBlock* loop_successor = NULL;
   if (stmt->cond() != NULL) {
@@ -2950,7 +3288,6 @@
 
   BreakAndContinueInfo break_info(stmt);
   if (current_block() != NULL) {
-    BreakAndContinueScope push(&break_info, this);
     CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
   }
   HBasicBlock* body_exit =
@@ -2975,7 +3312,119 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  return Bailout("ForInStatement");
+
+  if (!FLAG_optimize_for_in) {
+    return Bailout("ForInStatement optimization is disabled");
+  }
+
+  if (!oracle()->IsForInFastCase(stmt)) {
+    return Bailout("ForInStatement is not fast case");
+  }
+
+  if (!stmt->each()->IsVariableProxy() ||
+      !stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
+    return Bailout("ForInStatement with non-local each variable");
+  }
+
+  Variable* each_var = stmt->each()->AsVariableProxy()->var();
+
+  CHECK_ALIVE(VisitForValue(stmt->enumerable()));
+  HValue* enumerable = Top();  // Leave enumerable at the top.
+
+  HInstruction* map = AddInstruction(new(zone()) HForInPrepareMap(
+      environment()->LookupContext(), enumerable));
+  AddSimulate(stmt->PrepareId());
+
+  HInstruction* array = AddInstruction(
+      new(zone()) HForInCacheArray(
+          enumerable,
+          map,
+          DescriptorArray::kEnumCacheBridgeCacheIndex));
+
+  HInstruction* array_length = AddInstruction(
+      new(zone()) HFixedArrayBaseLength(array));
+
+  HInstruction* start_index = AddInstruction(new(zone()) HConstant(
+      Handle<Object>(Smi::FromInt(0)), Representation::Integer32()));
+
+  Push(map);
+  Push(array);
+  Push(array_length);
+  Push(start_index);
+
+  HInstruction* index_cache = AddInstruction(
+      new(zone()) HForInCacheArray(
+          enumerable,
+          map,
+          DescriptorArray::kEnumCacheBridgeIndicesCacheIndex));
+  HForInCacheArray::cast(array)->set_index_cache(
+      HForInCacheArray::cast(index_cache));
+
+  bool osr_entry = PreProcessOsrEntry(stmt);
+  HBasicBlock* loop_entry = CreateLoopHeaderBlock();
+  current_block()->Goto(loop_entry);
+  set_current_block(loop_entry);
+  if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
+
+  HValue* index = environment()->ExpressionStackAt(0);
+  HValue* limit = environment()->ExpressionStackAt(1);
+
+  // Check that we still have more keys.
+  HCompareIDAndBranch* compare_index =
+      new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
+  compare_index->SetInputRepresentation(Representation::Integer32());
+
+  HBasicBlock* loop_body = graph()->CreateBasicBlock();
+  HBasicBlock* loop_successor = graph()->CreateBasicBlock();
+
+  compare_index->SetSuccessorAt(0, loop_body);
+  compare_index->SetSuccessorAt(1, loop_successor);
+  current_block()->Finish(compare_index);
+
+  set_current_block(loop_successor);
+  Drop(5);
+
+  set_current_block(loop_body);
+
+  HValue* key = AddInstruction(
+      new(zone()) HLoadKeyedFastElement(
+          environment()->ExpressionStackAt(2),  // Enum cache.
+          environment()->ExpressionStackAt(0),  // Iteration index.
+          HLoadKeyedFastElement::OMIT_HOLE_CHECK));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not just deoptimize.
+  AddInstruction(new(zone()) HCheckMapValue(
+      environment()->ExpressionStackAt(4),
+      environment()->ExpressionStackAt(3)));
+
+  Bind(each_var, key);
+
+  BreakAndContinueInfo break_info(stmt, 5);
+  CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
+
+  HBasicBlock* body_exit =
+      JoinContinue(stmt, current_block(), break_info.continue_block());
+
+  if (body_exit != NULL) {
+    set_current_block(body_exit);
+
+    HValue* current_index = Pop();
+    HInstruction* new_index = new(zone()) HAdd(environment()->LookupContext(),
+                                               current_index,
+                                               graph()->GetConstant1());
+    new_index->AssumeRepresentation(Representation::Integer32());
+    PushAndAdd(new_index);
+    body_exit = current_block();
+  }
+
+  HBasicBlock* loop_exit = CreateLoop(stmt,
+                                      loop_entry,
+                                      body_exit,
+                                      loop_successor,
+                                      break_info.break_block());
+
+  set_current_block(loop_exit);
 }
 
 
@@ -3096,7 +3545,7 @@
   }
   Handle<GlobalObject> global(info()->global_object());
   global->Lookup(*var->name(), lookup);
-  if (!lookup->IsProperty() ||
+  if (!lookup->IsFound() ||
       lookup->type() != NORMAL ||
       (is_store && lookup->IsReadOnly()) ||
       lookup->holder() != *global) {
@@ -3125,12 +3574,22 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   Variable* variable = expr->var();
-  if (variable->mode() == Variable::LET) {
-    return Bailout("reference to let variable");
-  }
   switch (variable->location()) {
     case Variable::UNALLOCATED: {
-      LookupResult lookup;
+      if (variable->mode() == LET || variable->mode() == CONST_HARMONY) {
+        return Bailout("reference to global harmony declared variable");
+      }
+      // Handle known global constants like 'undefined' specially to avoid a
+      // load from a global cell for them.
+      Handle<Object> constant_value =
+          isolate()->factory()->GlobalConstantFor(variable->name());
+      if (!constant_value.is_null()) {
+        HConstant* instr =
+            new(zone()) HConstant(constant_value, Representation::Tagged());
+        return ast_context()->ReturnInstruction(instr, expr->id());
+      }
+
+      LookupResult lookup(isolate());
       GlobalPropertyAccess type =
           LookupGlobalProperty(variable, &lookup, false);
 
@@ -3142,8 +3601,8 @@
       if (type == kUseCell) {
         Handle<GlobalObject> global(info()->global_object());
         Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-        bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
-        HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
+        HLoadGlobalCell* instr =
+            new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
         return ast_context()->ReturnInstruction(instr, expr->id());
       } else {
         HValue* context = environment()->LookupContext();
@@ -3162,20 +3621,18 @@
     case Variable::PARAMETER:
     case Variable::LOCAL: {
       HValue* value = environment()->Lookup(variable);
-      if (variable->mode() == Variable::CONST &&
-          value == graph()->GetConstantHole()) {
-        return Bailout("reference to uninitialized const variable");
+      if (value == graph()->GetConstantHole()) {
+        ASSERT(variable->mode() == CONST ||
+               variable->mode() == CONST_HARMONY ||
+               variable->mode() == LET);
+        return Bailout("reference to uninitialized variable");
       }
       return ast_context()->ReturnValue(value);
     }
 
     case Variable::CONTEXT: {
-      if (variable->mode() == Variable::CONST) {
-        return Bailout("reference to const context slot");
-      }
       HValue* context = BuildContextChainWalk(variable);
-      HLoadContextSlot* instr =
-          new(zone()) HLoadContextSlot(context, variable->index());
+      HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, variable);
       return ast_context()->ReturnInstruction(instr, expr->id());
     }
 
@@ -3209,18 +3666,99 @@
 }
 
 
+// Determines whether the given array or object literal boilerplate satisfies
+// all limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+static bool IsFastLiteral(Handle<JSObject> boilerplate,
+                          int max_depth,
+                          int* max_properties,
+                          int* total_size) {
+  ASSERT(max_depth >= 0 && *max_properties >= 0);
+  if (max_depth == 0) return false;
+
+  Handle<FixedArrayBase> elements(boilerplate->elements());
+  if (elements->length() > 0 &&
+      elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
+    if (boilerplate->HasFastDoubleElements()) {
+      *total_size += FixedDoubleArray::SizeFor(elements->length());
+    } else if (boilerplate->HasFastElements()) {
+      int length = elements->length();
+      for (int i = 0; i < length; i++) {
+        if ((*max_properties)-- == 0) return false;
+        Handle<Object> value = JSObject::GetElement(boilerplate, i);
+        if (value->IsJSObject()) {
+          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+          if (!IsFastLiteral(value_object,
+                             max_depth - 1,
+                             max_properties,
+                             total_size)) {
+            return false;
+          }
+        }
+      }
+      *total_size += FixedArray::SizeFor(length);
+    } else {
+      return false;
+    }
+  }
+
+  Handle<FixedArray> properties(boilerplate->properties());
+  if (properties->length() > 0) {
+    return false;
+  } else {
+    int nof = boilerplate->map()->inobject_properties();
+    for (int i = 0; i < nof; i++) {
+      if ((*max_properties)-- == 0) return false;
+      Handle<Object> value(boilerplate->InObjectPropertyAt(i));
+      if (value->IsJSObject()) {
+        Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+        if (!IsFastLiteral(value_object,
+                           max_depth - 1,
+                           max_properties,
+                           total_size)) {
+          return false;
+        }
+      }
+    }
+  }
+
+  *total_size += boilerplate->map()->instance_size();
+  return true;
+}
+
+
 void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
+  Handle<JSFunction> closure = function_state()->compilation_info()->closure();
   HValue* context = environment()->LookupContext();
-  HObjectLiteral* literal =
-      new(zone()) HObjectLiteral(context,
-                                 expr->constant_properties(),
-                                 expr->fast_elements(),
-                                 expr->literal_index(),
-                                 expr->depth(),
-                                 expr->has_function());
+  HInstruction* literal;
+
+  // Check whether to use fast or slow deep-copying for boilerplate.
+  int total_size = 0;
+  int max_properties = HFastLiteral::kMaxLiteralProperties;
+  Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()));
+  if (boilerplate->IsJSObject() &&
+      IsFastLiteral(Handle<JSObject>::cast(boilerplate),
+                    HFastLiteral::kMaxLiteralDepth,
+                    &max_properties,
+                    &total_size)) {
+    Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
+    literal = new(zone()) HFastLiteral(context,
+                                       boilerplate_object,
+                                       total_size,
+                                       expr->literal_index(),
+                                       expr->depth());
+  } else {
+    literal = new(zone()) HObjectLiteral(context,
+                                         expr->constant_properties(),
+                                         expr->fast_elements(),
+                                         expr->literal_index(),
+                                         expr->depth(),
+                                         expr->has_function());
+  }
+
   // The object is expected in the bailout environment during computation
   // of the property values and is the value of the entire expression.
   PushAndAdd(literal);
@@ -3241,18 +3779,12 @@
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
           if (property->emit_store()) {
+            property->RecordTypeFeedback(oracle());
             CHECK_ALIVE(VisitForValue(value));
             HValue* value = Pop();
-            Handle<String> name = Handle<String>::cast(key->handle());
-            HStoreNamedGeneric* store =
-                new(zone()) HStoreNamedGeneric(
-                                context,
-                                literal,
-                                name,
-                                value,
-                                function_strict_mode());
+            HInstruction* store = BuildStoreNamed(literal, value, property);
             AddInstruction(store);
-            AddSimulate(key->id());
+            if (store->HasObservableSideEffects()) AddSimulate(key->id());
           } else {
             CHECK_ALIVE(VisitForEffect(value));
           }
@@ -3289,12 +3821,48 @@
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
   HValue* context = environment()->LookupContext();
+  HInstruction* literal;
 
-  HArrayLiteral* literal = new(zone()) HArrayLiteral(context,
-                                                     expr->constant_elements(),
-                                                     length,
-                                                     expr->literal_index(),
-                                                     expr->depth());
+  Handle<FixedArray> literals(environment()->closure()->literals());
+  Handle<Object> raw_boilerplate(literals->get(expr->literal_index()));
+
+  if (raw_boilerplate->IsUndefined()) {
+    raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
+        isolate(), literals, expr->constant_elements());
+    if (raw_boilerplate.is_null()) {
+      return Bailout("array boilerplate creation failed");
+    }
+    literals->set(expr->literal_index(), *raw_boilerplate);
+    if (JSObject::cast(*raw_boilerplate)->elements()->map() ==
+        isolate()->heap()->fixed_cow_array_map()) {
+      isolate()->counters()->cow_arrays_created_runtime()->Increment();
+    }
+  }
+
+  Handle<JSObject> boilerplate = Handle<JSObject>::cast(raw_boilerplate);
+  ElementsKind boilerplate_elements_kind =
+        Handle<JSObject>::cast(boilerplate)->GetElementsKind();
+
+  // Check whether to use fast or slow deep-copying for boilerplate.
+  int total_size = 0;
+  int max_properties = HFastLiteral::kMaxLiteralProperties;
+  if (IsFastLiteral(boilerplate,
+                    HFastLiteral::kMaxLiteralDepth,
+                    &max_properties,
+                    &total_size)) {
+    literal = new(zone()) HFastLiteral(context,
+                                       boilerplate,
+                                       total_size,
+                                       expr->literal_index(),
+                                       expr->depth());
+  } else {
+    literal = new(zone()) HArrayLiteral(context,
+                                        boilerplate,
+                                        length,
+                                        expr->literal_index(),
+                                        expr->depth());
+  }
+
   // The array is expected in the bailout environment during computation
   // of the property values and is the value of the entire expression.
   PushAndAdd(literal);
@@ -3311,16 +3879,35 @@
     HValue* value = Pop();
     if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
 
-    // Load the elements array before the first store.
-    if (elements == NULL)  {
-      elements = new(zone()) HLoadElements(literal);
-      AddInstruction(elements);
-    }
+    elements = new(zone()) HLoadElements(literal);
+    AddInstruction(elements);
 
     HValue* key = AddInstruction(
         new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
                               Representation::Integer32()));
-    AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
+
+    switch (boilerplate_elements_kind) {
+      case FAST_SMI_ONLY_ELEMENTS:
+        // Smi-only arrays need a smi check.
+        AddInstruction(new(zone()) HCheckSmi(value));
+        // Fall through.
+      case FAST_ELEMENTS:
+        AddInstruction(new(zone()) HStoreKeyedFastElement(
+            elements,
+            key,
+            value,
+            boilerplate_elements_kind));
+        break;
+      case FAST_DOUBLE_ELEMENTS:
+        AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
+                                                                key,
+                                                                value));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+
     AddSimulate(expr->GetIdForElement(i));
   }
   return ast_context()->ReturnValue(Pop());
@@ -3332,7 +3919,7 @@
                                Handle<String> name,
                                LookupResult* lookup) {
   type->LookupInDescriptors(NULL, *name, lookup);
-  if (!lookup->IsPropertyOrTransition()) return false;
+  if (!lookup->IsFound()) return false;
   if (lookup->type() == FIELD) return true;
   return (lookup->type() == MAP_TRANSITION) &&
       (type->unused_property_fields() > 0);
@@ -3360,7 +3947,8 @@
                                                   bool smi_and_map_check) {
   if (smi_and_map_check) {
     AddInstruction(new(zone()) HCheckNonSmi(object));
-    AddInstruction(new(zone()) HCheckMap(object, type));
+    AddInstruction(new(zone()) HCheckMap(object, type, NULL,
+                                         ALLOW_ELEMENT_TRANSITION_MAPS));
   }
 
   int index = ComputeStoredFieldIndex(type, name, lookup);
@@ -3380,7 +3968,7 @@
     instr->set_transition(transition);
     // TODO(fschneider): Record the new map type of the object in the IR to
     // enable elimination of redundant checks after the transition store.
-    instr->SetFlag(HValue::kChangesMaps);
+    instr->SetGVNFlag(kChangesMaps);
   }
   return instr;
 }
@@ -3395,7 +3983,26 @@
                          object,
                          name,
                          value,
-                         function_strict_mode());
+                         function_strict_mode_flag());
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
+                                             HValue* value,
+                                             ObjectLiteral::Property* prop) {
+  Literal* key = prop->key()->AsLiteral();
+  Handle<String> name = Handle<String>::cast(key->handle());
+  ASSERT(!name.is_null());
+
+  LookupResult lookup(isolate());
+  Handle<Map> type = prop->GetReceiverType();
+  bool is_monomorphic = prop->IsMonomorphic() &&
+      ComputeStoredField(type, name, &lookup);
+
+  return is_monomorphic
+      ? BuildStoreNamedField(object, name, value, type, &lookup,
+                             true)  // Needs smi and map check.
+      : BuildStoreNamedGeneric(object, name, value);
 }
 
 
@@ -3409,7 +4016,7 @@
   Handle<String> name = Handle<String>::cast(key->handle());
   ASSERT(!name.is_null());
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   SmallMapList* types = expr->GetReceiverTypes();
   bool is_monomorphic = expr->IsMonomorphic() &&
       ComputeStoredField(types->first(), name, &lookup);
@@ -3433,7 +4040,7 @@
   HBasicBlock* join = NULL;
   for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
     Handle<Map> map = types->at(i);
-    LookupResult lookup;
+    LookupResult lookup(isolate());
     if (ComputeStoredField(map, name, &lookup)) {
       if (count == 0) {
         AddInstruction(new(zone()) HCheckNonSmi(object));  // Only needed once.
@@ -3476,7 +4083,7 @@
       // The HSimulate for the store should not see the stored value in
       // effect contexts (it is not materialized at expr->id() in the
       // unoptimized code).
-      if (instr->HasSideEffects()) {
+      if (instr->HasObservableSideEffects()) {
         if (ast_context()->IsEffect()) {
           AddSimulate(expr->id());
         } else {
@@ -3516,7 +4123,7 @@
     ASSERT(!name.is_null());
 
     SmallMapList* types = expr->GetReceiverTypes();
-    LookupResult lookup;
+    LookupResult lookup(isolate());
 
     if (expr->IsMonomorphic()) {
       instr = BuildStoreNamed(object, value, expr);
@@ -3549,7 +4156,7 @@
   Push(value);
   instr->set_position(expr->position());
   AddInstruction(instr);
-  if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+  if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
   return ast_context()->ReturnValue(Pop());
 }
 
@@ -3561,16 +4168,16 @@
                                                    HValue* value,
                                                    int position,
                                                    int ast_id) {
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
   if (type == kUseCell) {
-    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
     Handle<GlobalObject> global(info()->global_object());
     Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-    HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
+    HInstruction* instr =
+        new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
     instr->set_position(position);
     AddInstruction(instr);
-    if (instr->HasSideEffects()) AddSimulate(ast_id);
+    if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
   } else {
     HValue* context =  environment()->LookupContext();
     HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -3580,11 +4187,11 @@
                                         global_object,
                                         var->name(),
                                         value,
-                                        function_strict_mode());
+                                        function_strict_mode_flag());
     instr->set_position(position);
     AddInstruction(instr);
-    ASSERT(instr->HasSideEffects());
-    if (instr->HasSideEffects()) AddSimulate(ast_id);
+    ASSERT(instr->HasObservableSideEffects());
+    if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
   }
 }
 
@@ -3601,8 +4208,8 @@
 
   if (proxy != NULL) {
     Variable* var = proxy->var();
-    if (var->mode() == Variable::CONST || var->mode() == Variable::LET)  {
-      return Bailout("unsupported let or const compound assignment");
+    if (var->mode() == LET)  {
+      return Bailout("unsupported let compound assignment");
     }
 
     CHECK_ALIVE(VisitForValue(operation));
@@ -3617,6 +4224,9 @@
 
       case Variable::PARAMETER:
       case Variable::LOCAL:
+        if (var->mode() == CONST)  {
+          return Bailout("unsupported const compound assignment");
+        }
         Bind(var, Top());
         break;
 
@@ -3637,11 +4247,29 @@
           }
         }
 
+        HStoreContextSlot::Mode mode;
+
+        switch (var->mode()) {
+          case LET:
+            mode = HStoreContextSlot::kCheckDeoptimize;
+            break;
+          case CONST:
+            return ast_context()->ReturnValue(Pop());
+          case CONST_HARMONY:
+            // This case is checked statically so no need to
+            // perform checks here
+            UNREACHABLE();
+          default:
+            mode = HStoreContextSlot::kNoCheck;
+        }
+
         HValue* context = BuildContextChainWalk(var);
         HStoreContextSlot* instr =
-            new(zone()) HStoreContextSlot(context, var->index(), Top());
+            new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
         AddInstruction(instr);
-        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        if (instr->HasObservableSideEffects()) {
+          AddSimulate(expr->AssignmentId());
+        }
         break;
       }
 
@@ -3667,7 +4295,7 @@
         load = BuildLoadNamedGeneric(obj, prop);
       }
       PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
+      if (load->HasObservableSideEffects()) AddSimulate(expr->CompoundLoadId());
 
       CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
@@ -3675,14 +4303,14 @@
 
       HInstruction* instr = BuildBinaryOperation(operation, left, right);
       PushAndAdd(instr);
-      if (instr->HasSideEffects()) AddSimulate(operation->id());
+      if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
 
       HInstruction* store = BuildStoreNamed(obj, instr, prop);
       AddInstruction(store);
       // Drop the simulated receiver and value.  Return the value.
       Drop(2);
       Push(instr);
-      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
       return ast_context()->ReturnValue(Pop());
 
     } else {
@@ -3707,7 +4335,7 @@
 
       HInstruction* instr = BuildBinaryOperation(operation, left, right);
       PushAndAdd(instr);
-      if (instr->HasSideEffects()) AddSimulate(operation->id());
+      if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
 
       expr->RecordTypeFeedback(oracle());
       HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -3746,19 +4374,23 @@
     HandlePropertyAssignment(expr);
   } else if (proxy != NULL) {
     Variable* var = proxy->var();
-    if (var->mode() == Variable::CONST) {
+
+    if (var->mode() == CONST) {
       if (expr->op() != Token::INIT_CONST) {
+        CHECK_ALIVE(VisitForValue(expr->value()));
+        return ast_context()->ReturnValue(Pop());
+      }
+
+      if (var->IsStackAllocated()) {
+        // We insert a use of the old value to detect unsupported uses of const
+        // variables (e.g. initialization inside a loop).
+        HValue* old_value = environment()->Lookup(var);
+        AddInstruction(new HUseConst(old_value));
+      }
+    } else if (var->mode() == CONST_HARMONY) {
+      if (expr->op() != Token::INIT_CONST_HARMONY) {
         return Bailout("non-initializer assignment to const");
       }
-      if (!var->IsStackAllocated()) {
-        return Bailout("assignment to const context slot");
-      }
-      // We insert a use of the old value to detect unsupported uses of const
-      // variables (e.g. initialization inside a loop).
-      HValue* old_value = environment()->Lookup(var);
-      AddInstruction(new HUseConst(old_value));
-    } else if (var->mode() == Variable::LET) {
-      return Bailout("unsupported assignment to let");
     }
 
     if (proxy->IsArguments()) return Bailout("assignment to arguments");
@@ -3775,6 +4407,14 @@
 
       case Variable::PARAMETER:
       case Variable::LOCAL: {
+        // Perform an initialization check for let declared variables
+        // or parameters.
+        if (var->mode() == LET && expr->op() == Token::ASSIGN) {
+          HValue* env_value = environment()->Lookup(var);
+          if (env_value == graph()->GetConstantHole()) {
+            return Bailout("assignment to let variable before initialization");
+          }
+        }
         // We do not allow the arguments object to occur in a context where it
         // may escape, but assignments to stack-allocated locals are
         // permitted.
@@ -3785,7 +4425,6 @@
       }
 
       case Variable::CONTEXT: {
-        ASSERT(var->mode() != Variable::CONST);
         // Bail out if we try to mutate a parameter value in a function using
         // the arguments object.  We do not (yet) correctly handle the
         // arguments property of the function.
@@ -3801,11 +4440,38 @@
         }
 
         CHECK_ALIVE(VisitForValue(expr->value()));
+        HStoreContextSlot::Mode mode;
+        if (expr->op() == Token::ASSIGN) {
+          switch (var->mode()) {
+            case LET:
+              mode = HStoreContextSlot::kCheckDeoptimize;
+              break;
+            case CONST:
+              return ast_context()->ReturnValue(Pop());
+            case CONST_HARMONY:
+              // This case is checked statically so no need to
+              // perform checks here
+              UNREACHABLE();
+            default:
+              mode = HStoreContextSlot::kNoCheck;
+          }
+        } else if (expr->op() == Token::INIT_VAR ||
+                   expr->op() == Token::INIT_LET ||
+                   expr->op() == Token::INIT_CONST_HARMONY) {
+          mode = HStoreContextSlot::kNoCheck;
+        } else {
+          ASSERT(expr->op() == Token::INIT_CONST);
+
+          mode = HStoreContextSlot::kCheckIgnoreAssignment;
+        }
+
         HValue* context = BuildContextChainWalk(var);
-        HStoreContextSlot* instr =
-            new(zone()) HStoreContextSlot(context, var->index(), Top());
+        HStoreContextSlot* instr = new(zone()) HStoreContextSlot(
+            context, var->index(), mode, Top());
         AddInstruction(instr);
-        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        if (instr->HasObservableSideEffects()) {
+          AddSimulate(expr->AssignmentId());
+        }
         return ast_context()->ReturnValue(Pop());
       }
 
@@ -3846,7 +4512,8 @@
                                                     bool smi_and_map_check) {
   if (smi_and_map_check) {
     AddInstruction(new(zone()) HCheckNonSmi(object));
-    AddInstruction(new(zone()) HCheckMap(object, type));
+    AddInstruction(new(zone()) HCheckMap(object, type, NULL,
+                                         ALLOW_ELEMENT_TRANSITION_MAPS));
   }
 
   int index = lookup->GetLocalFieldIndexFromMap(*type);
@@ -3865,6 +4532,10 @@
 
 HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
                                                    Property* expr) {
+  if (expr->IsUninitialized() && !FLAG_always_opt) {
+    AddInstruction(new(zone()) HSoftDeoptimize);
+    current_block()->MarkAsDeoptimizing();
+  }
   ASSERT(expr->key()->IsPropertyName());
   Handle<Object> name = expr->key()->AsLiteral()->handle();
   HValue* context = environment()->LookupContext();
@@ -3876,17 +4547,18 @@
                                             Property* expr,
                                             Handle<Map> map,
                                             Handle<String> name) {
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   map->LookupInDescriptors(NULL, *name, &lookup);
-  if (lookup.IsProperty() && lookup.type() == FIELD) {
+  if (lookup.IsFound() && lookup.type() == FIELD) {
     return BuildLoadNamedField(obj,
                                expr,
                                map,
                                &lookup,
                                true);
-  } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
+  } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
     AddInstruction(new(zone()) HCheckNonSmi(obj));
-    AddInstruction(new(zone()) HCheckMap(obj, map));
+    AddInstruction(new(zone()) HCheckMap(obj, map, NULL,
+                                         ALLOW_ELEMENT_TRANSITION_MAPS));
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
     return new(zone()) HConstant(function, Representation::Tagged());
   } else {
@@ -3912,9 +4584,7 @@
     ASSERT(val != NULL);
     switch (elements_kind) {
       case EXTERNAL_PIXEL_ELEMENTS: {
-        HClampToUint8* clamp = new(zone()) HClampToUint8(val);
-        AddInstruction(clamp);
-        val = clamp;
+        val = AddInstruction(new(zone()) HClampToUint8(val));
         break;
       }
       case EXTERNAL_BYTE_ELEMENTS:
@@ -3923,14 +4593,19 @@
       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
       case EXTERNAL_INT_ELEMENTS:
       case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
-        HToInt32* floor_val = new(zone()) HToInt32(val);
-        AddInstruction(floor_val);
-        val = floor_val;
+        if (!val->representation().IsInteger32()) {
+          val = AddInstruction(new(zone()) HChange(
+              val,
+              Representation::Integer32(),
+              true,  // Truncate to int32.
+              false));  // Don't deoptimize undefined (irrelevant here).
+        }
         break;
       }
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
         break;
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -3941,30 +4616,55 @@
     return new(zone()) HStoreKeyedSpecializedArrayElement(
         external_elements, checked_key, val, elements_kind);
   } else {
+    ASSERT(val == NULL);
     return new(zone()) HLoadKeyedSpecializedArrayElement(
         external_elements, checked_key, elements_kind);
   }
 }
 
 
+HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
+                                                    HValue* checked_key,
+                                                    HValue* val,
+                                                    ElementsKind elements_kind,
+                                                    bool is_store) {
+  if (is_store) {
+    ASSERT(val != NULL);
+    switch (elements_kind) {
+      case FAST_DOUBLE_ELEMENTS:
+        return new(zone()) HStoreKeyedFastDoubleElement(
+            elements, checked_key, val);
+      case FAST_SMI_ONLY_ELEMENTS:
+        // Smi-only arrays need a smi check.
+        AddInstruction(new(zone()) HCheckSmi(val));
+        // Fall through.
+      case FAST_ELEMENTS:
+        return new(zone()) HStoreKeyedFastElement(
+            elements, checked_key, val, elements_kind);
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+  // It's an element load (!is_store).
+  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+    return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
+  } else {  // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
+    return new(zone()) HLoadKeyedFastElement(elements, checked_key);
+  }
+}
+
+
 HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
                                                            HValue* key,
                                                            HValue* val,
-                                                           Expression* expr,
+                                                           Handle<Map> map,
                                                            bool is_store) {
-  ASSERT(expr->IsMonomorphic());
-  Handle<Map> map = expr->GetMonomorphicReceiverType();
-  if (!map->has_fast_elements() &&
-      !map->has_fast_double_elements() &&
-      !map->has_external_array_elements()) {
-    return is_store ? BuildStoreKeyedGeneric(object, key, val)
-                    : BuildLoadKeyedGeneric(object, key);
-  }
-  AddInstruction(new(zone()) HCheckNonSmi(object));
   HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
+  bool fast_smi_only_elements = map->has_fast_smi_only_elements();
+  bool fast_elements = map->has_fast_elements();
   HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
-  bool fast_double_elements = map->has_fast_double_elements();
-  if (is_store && map->has_fast_elements()) {
+  if (is_store && (fast_elements || fast_smi_only_elements)) {
     AddInstruction(new(zone()) HCheckMap(
         elements, isolate()->factory()->fixed_array_map()));
   }
@@ -3979,28 +4679,17 @@
     return BuildExternalArrayElementAccess(external_elements, checked_key,
                                            val, map->elements_kind(), is_store);
   }
-  ASSERT(map->has_fast_elements() || fast_double_elements);
+  ASSERT(fast_smi_only_elements ||
+         fast_elements ||
+         map->has_fast_double_elements());
   if (map->instance_type() == JS_ARRAY_TYPE) {
     length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
   } else {
     length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
   }
   checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-  if (is_store) {
-    if (fast_double_elements) {
-      return new(zone()) HStoreKeyedFastDoubleElement(elements,
-                                                      checked_key,
-                                                      val);
-    } else {
-      return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
-    }
-  } else {
-    if (fast_double_elements) {
-      return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
-    } else {
-      return new(zone()) HLoadKeyedFastElement(elements, checked_key);
-    }
-  }
+  return BuildFastElementAccess(elements, checked_key, val,
+                                map->elements_kind(), is_store);
 }
 
 
@@ -4014,7 +4703,6 @@
                                                       bool* has_side_effects) {
   *has_side_effects = false;
   AddInstruction(new(zone()) HCheckNonSmi(object));
-  AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
   SmallMapList* maps = prop->GetReceiverTypes();
   bool todo_external_array = false;
 
@@ -4024,15 +4712,61 @@
     type_todo[i] = false;
   }
 
+  // Elements_kind transition support.
+  MapHandleList transition_target(maps->length());
+  // Collect possible transition targets.
+  MapHandleList possible_transitioned_maps(maps->length());
   for (int i = 0; i < maps->length(); ++i) {
-    ASSERT(maps->at(i)->IsMap());
-    type_todo[maps->at(i)->elements_kind()] = true;
-    if (maps->at(i)->elements_kind()
-        >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
-      todo_external_array = true;
+    Handle<Map> map = maps->at(i);
+    ElementsKind elements_kind = map->elements_kind();
+    if (elements_kind == FAST_DOUBLE_ELEMENTS ||
+        elements_kind == FAST_ELEMENTS) {
+      possible_transitioned_maps.Add(map);
+    }
+  }
+  // Get transition target for each map (NULL == no transition).
+  for (int i = 0; i < maps->length(); ++i) {
+    Handle<Map> map = maps->at(i);
+    Handle<Map> transitioned_map =
+        map->FindTransitionedMap(&possible_transitioned_maps);
+    transition_target.Add(transitioned_map);
+  }
+
+  int num_untransitionable_maps = 0;
+  Handle<Map> untransitionable_map;
+  for (int i = 0; i < maps->length(); ++i) {
+    Handle<Map> map = maps->at(i);
+    ASSERT(map->IsMap());
+    if (!transition_target.at(i).is_null()) {
+      AddInstruction(new(zone()) HTransitionElementsKind(
+          object, map, transition_target.at(i)));
+    } else {
+      type_todo[map->elements_kind()] = true;
+      if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
+        todo_external_array = true;
+      }
+      num_untransitionable_maps++;
+      untransitionable_map = map;
     }
   }
 
+  // If only one map is left after transitioning, handle this case
+  // monomorphically.
+  if (num_untransitionable_maps == 1) {
+    HInstruction* instr = NULL;
+    if (untransitionable_map->has_slow_elements_kind()) {
+      instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
+                                      : BuildLoadKeyedGeneric(object, key));
+    } else {
+      instr = AddInstruction(BuildMonomorphicElementAccess(
+          object, key, val, untransitionable_map, is_store));
+    }
+    *has_side_effects |= instr->HasObservableSideEffects();
+    instr->set_position(position);
+    return is_store ? NULL : instr;
+  }
+
+  AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
   HBasicBlock* join = graph()->CreateBasicBlock();
 
   HInstruction* elements_kind_instr =
@@ -4042,14 +4776,20 @@
   HLoadExternalArrayPointer* external_elements = NULL;
   HInstruction* checked_key = NULL;
 
-  // FAST_ELEMENTS is assumed to be the first case.
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
+  // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
+  // arrays.
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+  STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+  STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+  STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
 
-  for (ElementsKind elements_kind = FAST_ELEMENTS;
+  for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
        elements_kind <= LAST_ELEMENTS_KIND;
        elements_kind = ElementsKind(elements_kind + 1)) {
-    // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
-    // need to add some code that's executed for all external array cases.
+    // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
+    // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
+    // that's executed for all external array cases.
     STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
                   LAST_ELEMENTS_KIND);
     if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@@ -4071,15 +4811,22 @@
 
       set_current_block(if_true);
       HInstruction* access;
-      if (elements_kind == FAST_ELEMENTS ||
+      if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+          elements_kind == FAST_ELEMENTS ||
           elements_kind == FAST_DOUBLE_ELEMENTS) {
-        bool fast_double_elements =
-            elements_kind == FAST_DOUBLE_ELEMENTS;
-        if (is_store && elements_kind == FAST_ELEMENTS) {
+        if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
           AddInstruction(new(zone()) HCheckMap(
               elements, isolate()->factory()->fixed_array_map(),
               elements_kind_branch));
         }
+        // TODO(jkummerow): The need for these two blocks could be avoided
+        // in one of two ways:
+        // (1) Introduce ElementsKinds for JSArrays that are distinct from
+        //     those for fast objects.
+        // (2) Put the common instructions into a third "join" block. This
+        //     requires additional AST IDs that we can deopt to from inside
+        //     that join block. They must be added to the Property class (when
+        //     it's a keyed property) and registered in the full codegen.
         HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
         HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
         HHasInstanceTypeAndBranch* typecheck =
@@ -4089,30 +4836,16 @@
         current_block()->Finish(typecheck);
 
         set_current_block(if_jsarray);
-        HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
-        AddInstruction(length);
+        HInstruction* length;
+        length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck));
         checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-        if (is_store) {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastDoubleElement(elements,
-                                                         checked_key,
-                                                         val));
-          } else {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
-          }
-        } else {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
-          } else {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastElement(elements, checked_key));
-          }
+        access = AddInstruction(BuildFastElementAccess(
+            elements, checked_key, val, elements_kind, is_store));
+        if (!is_store) {
           Push(access);
         }
-        *has_side_effects |= access->HasSideEffects();
+
+        *has_side_effects |= access->HasObservableSideEffects();
         if (position != -1) {
           access->set_position(position);
         }
@@ -4121,25 +4854,8 @@
         set_current_block(if_fastobject);
         length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
         checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-        if (is_store) {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastDoubleElement(elements,
-                                                         checked_key,
-                                                         val));
-          } else {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
-          }
-        } else {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
-          } else {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastElement(elements, checked_key));
-          }
-        }
+        access = AddInstruction(BuildFastElementAccess(
+            elements, checked_key, val, elements_kind, is_store));
       } else if (elements_kind == DICTIONARY_ELEMENTS) {
         if (is_store) {
           access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@@ -4150,7 +4866,7 @@
         access = AddInstruction(BuildExternalArrayElementAccess(
             external_elements, checked_key, val, elements_kind, is_store));
       }
-      *has_side_effects |= access->HasSideEffects();
+      *has_side_effects |= access->HasObservableSideEffects();
       access->set_position(position);
       if (!is_store) {
         Push(access);
@@ -4179,7 +4895,14 @@
   ASSERT(!expr->IsPropertyName());
   HInstruction* instr = NULL;
   if (expr->IsMonomorphic()) {
-    instr = BuildMonomorphicElementAccess(obj, key, val, expr, is_store);
+    Handle<Map> map = expr->GetMonomorphicReceiverType();
+    if (map->has_slow_elements_kind()) {
+      instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
+                       : BuildLoadKeyedGeneric(obj, key);
+    } else {
+      AddInstruction(new(zone()) HCheckNonSmi(obj));
+      instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
+    }
   } else if (expr->GetReceiverTypes() != NULL &&
              !expr->GetReceiverTypes()->is_empty()) {
     return HandlePolymorphicElementAccess(
@@ -4193,7 +4916,7 @@
   }
   instr->set_position(position);
   AddInstruction(instr);
-  *has_side_effects = instr->HasSideEffects();
+  *has_side_effects = instr->HasObservableSideEffects();
   return instr;
 }
 
@@ -4207,7 +4930,7 @@
                          object,
                          key,
                          value,
-                         function_strict_mode());
+                         function_strict_mode_flag());
 }
 
 bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
@@ -4260,7 +4983,7 @@
   CHECK_ALIVE(VisitForValue(expr->obj()));
 
   HInstruction* instr = NULL;
-  if (expr->IsArrayLength()) {
+  if (expr->AsProperty()->IsArrayLength()) {
     HValue* array = Pop();
     AddInstruction(new(zone()) HCheckNonSmi(array));
     HInstruction* mapcheck =
@@ -4338,7 +5061,8 @@
   // its prototypes.
   if (smi_and_map_check) {
     AddInstruction(new(zone()) HCheckNonSmi(receiver));
-    AddInstruction(new(zone()) HCheckMap(receiver, receiver_map));
+    AddInstruction(new(zone()) HCheckMap(receiver, receiver_map, NULL,
+                                         ALLOW_ELEMENT_TRANSITION_MAPS));
   }
   if (!expr->holder().is_null()) {
     AddInstruction(new(zone()) HCheckPrototypeMaps(
@@ -4379,7 +5103,7 @@
         PrintF("Trying to inline the polymorphic call to %s\n",
                *name->ToCString());
       }
-      if (FLAG_polymorphic_inlining && TryInline(expr)) {
+      if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
         // Trying to inline will signal that we should bailout from the
         // entire compilation by setting stack overflow on the visitor.
         if (HasStackOverflow()) return;
@@ -4449,24 +5173,24 @@
 }
 
 
-bool HGraphBuilder::TryInline(Call* expr) {
+bool HGraphBuilder::TryInline(CallKind call_kind,
+                              Handle<JSFunction> target,
+                              ZoneList<Expression*>* arguments,
+                              HValue* receiver,
+                              int ast_id,
+                              int return_id,
+                              ReturnHandlingFlag return_handling) {
   if (!FLAG_use_inlining) return false;
 
-  // The function call we are inlining is a method call if the call
-  // is a property call.
-  CallKind call_kind = (expr->expression()->AsProperty() == NULL)
-      ? CALL_AS_FUNCTION
-      : CALL_AS_METHOD;
-
   // Precondition: call is monomorphic and we have found a target with the
   // appropriate arity.
   Handle<JSFunction> caller = info()->closure();
-  Handle<JSFunction> target = expr->target();
   Handle<SharedFunctionInfo> target_shared(target->shared());
 
   // Do a quick check on source code length to avoid parsing large
   // inlining candidates.
-  if (FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize) {
+  if ((FLAG_limit_inlining && target_shared->SourceSize() > kMaxSourceSize)
+      || target_shared->SourceSize() > kUnlimitedMaxSourceSize) {
     TraceInline(target, caller, "target text too big");
     return false;
   }
@@ -4476,8 +5200,20 @@
     TraceInline(target, caller, "target not inlineable");
     return false;
   }
+  if (target_shared->dont_inline() || target_shared->dont_optimize()) {
+    TraceInline(target, caller, "target contains unsupported syntax [early]");
+    return false;
+  }
 
-  // No context change required.
+  int nodes_added = target_shared->ast_node_count();
+  if ((FLAG_limit_inlining && nodes_added > kMaxInlinedSize) ||
+      nodes_added > kUnlimitedMaxInlinedSize) {
+    TraceInline(target, caller, "target AST is too large [early]");
+    return false;
+  }
+
+#if !defined(V8_TARGET_ARCH_IA32)
+  // Target must be able to use caller's context.
   CompilationInfo* outer_info = info();
   if (target->context() != outer_info->closure()->context() ||
       outer_info->scope()->contains_with() ||
@@ -4485,6 +5221,8 @@
     TraceInline(target, caller, "target requires context change");
     return false;
   }
+#endif
+
 
   // Don't inline deeper than kMaxInliningLevels calls.
   HEnvironment* env = environment();
@@ -4494,32 +5232,37 @@
       TraceInline(target, caller, "inline depth limit reached");
       return false;
     }
-    current_level++;
+    if (env->outer()->frame_type() == JS_FUNCTION) {
+      current_level++;
+    }
     env = env->outer();
   }
 
   // Don't inline recursive functions.
-  if (*target_shared == outer_info->closure()->shared()) {
-    TraceInline(target, caller, "target is recursive");
-    return false;
+  for (FunctionState* state = function_state();
+       state != NULL;
+       state = state->outer()) {
+    if (state->compilation_info()->closure()->shared() == *target_shared) {
+      TraceInline(target, caller, "target is recursive");
+      return false;
+    }
   }
 
   // We don't want to add more than a certain number of nodes from inlining.
-  if (FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) {
+  if ((FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) ||
+      inlined_count_ > kUnlimitedMaxInlinedNodes) {
     TraceInline(target, caller, "cumulative AST node limit reached");
     return false;
   }
 
-  int count_before = AstNode::Count();
-
   // Parse and allocate variables.
   CompilationInfo target_info(target);
-  if (!ParserApi::Parse(&target_info) ||
+  if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
       !Scope::Analyze(&target_info)) {
     if (target_info.isolate()->has_pending_exception()) {
       // Parse or scope error, never optimize this function.
       SetStackOverflow();
-      target_shared->DisableOptimization(*target);
+      target_shared->DisableOptimization();
     }
     TraceInline(target, caller, "parse failure");
     return false;
@@ -4531,20 +5274,35 @@
   }
   FunctionLiteral* function = target_info.function();
 
-  // Count the number of AST nodes added by inlining this call.
-  int nodes_added = AstNode::Count() - count_before;
-  if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) {
-    TraceInline(target, caller, "target AST is too large");
+  // The following conditions must be checked again after re-parsing, because
+  // earlier the information might not have been complete due to lazy parsing.
+  nodes_added = function->ast_node_count();
+  if ((FLAG_limit_inlining && nodes_added > kMaxInlinedSize) ||
+      nodes_added > kUnlimitedMaxInlinedSize) {
+    TraceInline(target, caller, "target AST is too large [late]");
+    return false;
+  }
+  AstProperties::Flags* flags(function->flags());
+  if (flags->Contains(kDontInline) || flags->Contains(kDontOptimize)) {
+    TraceInline(target, caller, "target contains unsupported syntax [late]");
     return false;
   }
 
-  // Don't inline functions that uses the arguments object or that
-  // have a mismatching number of parameters.
-  int arity = expr->arguments()->length();
-  if (function->scope()->arguments() != NULL ||
-      arity != target_shared->formal_parameter_count()) {
-    TraceInline(target, caller, "target requires special argument handling");
-    return false;
+  // If the function uses the arguments object check that inlining of functions
+  // with arguments object is enabled and the arguments-variable is
+  // stack allocated.
+  if (function->scope()->arguments() != NULL) {
+    if (!FLAG_inline_arguments) {
+      TraceInline(target, caller, "target uses arguments object");
+      return false;
+    }
+
+    if (!function->scope()->arguments()->IsStackAllocated()) {
+      TraceInline(target,
+                  caller,
+                  "target uses non-stackallocated arguments object");
+      return false;
+    }
   }
 
   // All declarations must be inlineable.
@@ -4556,13 +5314,6 @@
       return false;
     }
   }
-  // All statements in the body must be inlineable.
-  for (int i = 0, count = function->body()->length(); i < count; ++i) {
-    if (!function->body()->at(i)->IsInlineable()) {
-      TraceInline(target, caller, "target contains unsupported syntax");
-      return false;
-    }
-  }
 
   // Generate the deoptimization data for the unoptimized version of
   // the target function if we don't already have it.
@@ -4574,11 +5325,11 @@
       TraceInline(target, caller, "could not generate deoptimization info");
       return false;
     }
-    if (target_shared->scope_info() == SerializedScopeInfo::Empty()) {
+    if (target_shared->scope_info() == ScopeInfo::Empty()) {
       // The scope info might not have been set if a lazily compiled
       // function is inlined before being called for the first time.
-      Handle<SerializedScopeInfo> target_scope_info =
-          SerializedScopeInfo::Create(target_info.scope());
+      Handle<ScopeInfo> target_scope_info =
+          ScopeInfo::Create(target_info.scope());
       target_shared->set_scope_info(*target_scope_info);
     }
     target_shared->EnableDeoptimizationSupport(*target_info.code());
@@ -4596,30 +5347,55 @@
   ASSERT(target_shared->has_deoptimization_support());
   TypeFeedbackOracle target_oracle(
       Handle<Code>(target_shared->code()),
-      Handle<Context>(target->context()->global_context()));
-  FunctionState target_state(this, &target_info, &target_oracle);
+      Handle<Context>(target->context()->global_context()),
+      isolate());
+  // The function state is new-allocated because we need to delete it
+  // in two different places.
+  FunctionState* target_state = new FunctionState(
+      this, &target_info, &target_oracle, return_handling);
 
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner_env =
       environment()->CopyForInlining(target,
+                                     arguments->length(),
                                      function,
                                      undefined,
-                                     call_kind);
-  HBasicBlock* body_entry = CreateBasicBlock(inner_env);
-  current_block()->Goto(body_entry);
-  body_entry->SetJoinId(expr->ReturnId());
-  set_current_block(body_entry);
+                                     call_kind,
+                                     function_state()->is_construct());
+#ifdef V8_TARGET_ARCH_IA32
+  // IA32 only, overwrite the caller's context in the deoptimization
+  // environment with the correct one.
+  //
+  // TODO(kmillikin): implement the same inlining on other platforms so we
+  // can remove the unsightly ifdefs in this function.
+  HConstant* context = new HConstant(Handle<Context>(target->context()),
+                                     Representation::Tagged());
+  AddInstruction(context);
+  inner_env->BindContext(context);
+#endif
+  AddSimulate(return_id);
+  current_block()->UpdateEnvironment(inner_env);
   AddInstruction(new(zone()) HEnterInlined(target,
+                                           arguments->length(),
                                            function,
-                                           call_kind));
+                                           call_kind,
+                                           function_state()->is_construct(),
+                                           function->scope()->arguments()));
+  // If the function uses arguments object create and bind one.
+  if (function->scope()->arguments() != NULL) {
+    ASSERT(function->scope()->arguments()->IsStackAllocated());
+    environment()->Bind(function->scope()->arguments(),
+                        graph()->GetArgumentsObject());
+  }
   VisitDeclarations(target_info.scope()->declarations());
   VisitStatements(function->body());
   if (HasStackOverflow()) {
     // Bail out if the inline function did, as we cannot residualize a call
     // instead.
     TraceInline(target, caller, "inline graph construction failed");
-    target_shared->DisableOptimization(*target);
+    target_shared->DisableOptimization();
     inline_bailout_ = true;
+    delete target_state;
     return true;
   }
 
@@ -4629,29 +5405,27 @@
   TraceInline(target, caller, NULL);
 
   if (current_block() != NULL) {
-    // Add a return of undefined if control can fall off the body.  In a
-    // test context, undefined is false.
-    if (inlined_test_context() == NULL) {
+    // Add default return value (i.e. undefined for normals calls or the newly
+    // allocated receiver for construct calls) if control can fall off the
+    // body.  In a test context, undefined is false and any JSObject is true.
+    if (call_context()->IsValue()) {
       ASSERT(function_return() != NULL);
-      ASSERT(call_context()->IsEffect() || call_context()->IsValue());
-      if (call_context()->IsEffect()) {
-        current_block()->Goto(function_return());
-      } else {
-        current_block()->AddLeaveInlined(undefined, function_return());
-      }
+      HValue* return_value = function_state()->is_construct()
+          ? receiver
+          : undefined;
+      current_block()->AddLeaveInlined(return_value,
+                                       function_return(),
+                                       function_state()->drop_extra());
+    } else if (call_context()->IsEffect()) {
+      ASSERT(function_return() != NULL);
+      current_block()->Goto(function_return(), function_state()->drop_extra());
     } else {
-      // The graph builder assumes control can reach both branches of a
-      // test, so we materialize the undefined value and test it rather than
-      // simply jumping to the false target.
-      //
-      // TODO(3168478): refactor to avoid this.
-      HBasicBlock* empty_true = graph()->CreateBasicBlock();
-      HBasicBlock* empty_false = graph()->CreateBasicBlock();
-      HBranch* test = new(zone()) HBranch(undefined, empty_true, empty_false);
-      current_block()->Finish(test);
-
-      empty_true->Goto(inlined_test_context()->if_true());
-      empty_false->Goto(inlined_test_context()->if_false());
+      ASSERT(call_context()->IsTest());
+      ASSERT(inlined_test_context() != NULL);
+      HBasicBlock* target = function_state()->is_construct()
+          ? inlined_test_context()->if_true()
+          : inlined_test_context()->if_false();
+      current_block()->Goto(target, function_state()->drop_extra());
     }
   }
 
@@ -4663,35 +5437,96 @@
     // Pop the return test context from the expression context stack.
     ASSERT(ast_context() == inlined_test_context());
     ClearInlinedTestContext();
+    delete target_state;
 
     // Forward to the real test context.
     if (if_true->HasPredecessor()) {
-      if_true->SetJoinId(expr->id());
+      if_true->SetJoinId(ast_id);
       HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
-      if_true->Goto(true_target);
+      if_true->Goto(true_target, function_state()->drop_extra());
     }
     if (if_false->HasPredecessor()) {
-      if_false->SetJoinId(expr->id());
+      if_false->SetJoinId(ast_id);
       HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
-      if_false->Goto(false_target);
+      if_false->Goto(false_target, function_state()->drop_extra());
     }
     set_current_block(NULL);
+    return true;
 
   } else if (function_return()->HasPredecessor()) {
-    function_return()->SetJoinId(expr->id());
+    function_return()->SetJoinId(ast_id);
     set_current_block(function_return());
   } else {
     set_current_block(NULL);
   }
-
+  delete target_state;
   return true;
 }
 
 
-bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
-                                             HValue* receiver,
-                                             Handle<Map> receiver_map,
-                                             CheckType check_type) {
+bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
+  // The function call we are inlining is a method call if the call
+  // is a property call.
+  CallKind call_kind = (expr->expression()->AsProperty() == NULL)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+
+  return TryInline(call_kind,
+                   expr->target(),
+                   expr->arguments(),
+                   NULL,
+                   expr->id(),
+                   expr->ReturnId(),
+                   drop_extra ? DROP_EXTRA_ON_RETURN : NORMAL_RETURN);
+}
+
+
+bool HGraphBuilder::TryInlineConstruct(CallNew* expr, HValue* receiver) {
+  return TryInline(CALL_AS_FUNCTION,
+                   expr->target(),
+                   expr->arguments(),
+                   receiver,
+                   expr->id(),
+                   expr->ReturnId(),
+                   CONSTRUCT_CALL_RETURN);
+}
+
+
+bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
+  if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
+  BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
+  switch (id) {
+    case kMathRound:
+    case kMathAbs:
+    case kMathSqrt:
+    case kMathLog:
+    case kMathSin:
+    case kMathCos:
+    case kMathTan:
+      if (expr->arguments()->length() == 1) {
+        HValue* argument = Pop();
+        HValue* context = environment()->LookupContext();
+        Drop(1);  // Receiver.
+        HUnaryMathOperation* op =
+            new(zone()) HUnaryMathOperation(context, argument, id);
+        op->set_position(expr->position());
+        if (drop_extra) Drop(1);  // Optionally drop the function.
+        ast_context()->ReturnInstruction(op, expr->id());
+        return true;
+      }
+      break;
+    default:
+      // Not supported for inlining yet.
+      break;
+  }
+  return false;
+}
+
+
+bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
+                                               HValue* receiver,
+                                               Handle<Map> receiver_map,
+                                               CheckType check_type) {
   ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
   // Try to inline calls like Math.* as operations in the calling function.
   if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
@@ -4728,6 +5563,7 @@
     case kMathLog:
     case kMathSin:
     case kMathCos:
+    case kMathTan:
       if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
         AddCheckConstantFunction(expr, receiver, receiver_map, true);
         HValue* argument = Pop();
@@ -4764,7 +5600,7 @@
             AddInstruction(square_root);
             // MathPowHalf doesn't have side effects so there's no need for
             // an environment simulation here.
-            ASSERT(!square_root->HasSideEffects());
+            ASSERT(!square_root->HasObservableSideEffects());
             result = new(zone()) HDiv(context, double_one, square_root);
           } else if (exponent == 2.0) {
             result = new(zone()) HMul(context, left, left);
@@ -4782,6 +5618,100 @@
         return true;
       }
       break;
+    case kMathRandom:
+      if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
+        AddCheckConstantFunction(expr, receiver, receiver_map, true);
+        Drop(1);  // Receiver.
+        HValue* context = environment()->LookupContext();
+        HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+        AddInstruction(global_object);
+        HRandom* result = new(zone()) HRandom(global_object);
+        ast_context()->ReturnInstruction(result, expr->id());
+        return true;
+      }
+      break;
+    case kMathMax:
+    case kMathMin:
+      if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
+        AddCheckConstantFunction(expr, receiver, receiver_map, true);
+        HValue* right = Pop();
+        HValue* left = Pop();
+        Pop();  // Pop receiver.
+
+        HValue* left_operand = left;
+        HValue* right_operand = right;
+
+        // If we do not have two integers, we convert to double for comparison.
+        if (!left->representation().IsInteger32() ||
+            !right->representation().IsInteger32()) {
+          if (!left->representation().IsDouble()) {
+            HChange* left_convert = new(zone()) HChange(
+                left,
+                Representation::Double(),
+                false,  // Do not truncate when converting to double.
+                true);  // Deoptimize for undefined.
+            left_convert->SetFlag(HValue::kBailoutOnMinusZero);
+            left_operand = AddInstruction(left_convert);
+          }
+          if (!right->representation().IsDouble()) {
+            HChange* right_convert = new(zone()) HChange(
+                right,
+                Representation::Double(),
+                false,  // Do not truncate when converting to double.
+                true);  // Deoptimize for undefined.
+            right_convert->SetFlag(HValue::kBailoutOnMinusZero);
+            right_operand = AddInstruction(right_convert);
+          }
+        }
+
+        ASSERT(left_operand->representation().Equals(
+               right_operand->representation()));
+        ASSERT(!left_operand->representation().IsTagged());
+
+        Token::Value op = (id == kMathMin) ? Token::LT : Token::GT;
+
+        HCompareIDAndBranch* compare =
+            new(zone()) HCompareIDAndBranch(left_operand, right_operand, op);
+        compare->SetInputRepresentation(left_operand->representation());
+
+        HBasicBlock* return_left = graph()->CreateBasicBlock();
+        HBasicBlock* return_right = graph()->CreateBasicBlock();
+
+        compare->SetSuccessorAt(0, return_left);
+        compare->SetSuccessorAt(1, return_right);
+        current_block()->Finish(compare);
+
+        set_current_block(return_left);
+        Push(left);
+        set_current_block(return_right);
+        // The branch above always returns the right operand if either of
+        // them is NaN, but the spec requires that max/min(NaN, X) = NaN.
+        // We add another branch that checks if the left operand is NaN or not.
+        if (left_operand->representation().IsDouble()) {
+          // If left_operand != left_operand then it is NaN.
+          HCompareIDAndBranch* compare_nan = new(zone()) HCompareIDAndBranch(
+              left_operand, left_operand, Token::EQ);
+          compare_nan->SetInputRepresentation(left_operand->representation());
+          HBasicBlock* left_is_number = graph()->CreateBasicBlock();
+          HBasicBlock* left_is_nan = graph()->CreateBasicBlock();
+          compare_nan->SetSuccessorAt(0, left_is_number);
+          compare_nan->SetSuccessorAt(1, left_is_nan);
+          current_block()->Finish(compare_nan);
+          set_current_block(left_is_nan);
+          Push(left);
+          set_current_block(left_is_number);
+          Push(right);
+          return_right = CreateJoin(left_is_number, left_is_nan, expr->id());
+        } else {
+          Push(right);
+        }
+
+        HBasicBlock* join = CreateJoin(return_left, return_right, expr->id());
+        set_current_block(join);
+        ast_context()->ReturnValue(Pop());
+        return true;
+      }
+      break;
     default:
       // Not yet supported for inlining.
       break;
@@ -4815,13 +5745,6 @@
   HValue* arg_two_value = environment()->Lookup(arg_two->var());
   if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
 
-  // Our implementation of arguments (based on this stack frame or an
-  // adapter below it) does not work for inlined functions.
-  if (function_state()->outer() != NULL) {
-    Bailout("Function.prototype.apply optimization in inlined function");
-    return true;
-  }
-
   // Found pattern f.apply(receiver, arguments).
   VisitForValue(prop->obj());
   if (HasStackOverflow() || current_block() == NULL) return true;
@@ -4832,13 +5755,46 @@
   VisitForValue(args->at(0));
   if (HasStackOverflow() || current_block() == NULL) return true;
   HValue* receiver = Pop();
-  HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
-  HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
-  HInstruction* result =
-      new(zone()) HApplyArguments(function, receiver, length, elements);
-  result->set_position(expr->position());
-  ast_context()->ReturnInstruction(result, expr->id());
-  return true;
+
+  if (function_state()->outer() == NULL) {
+    HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+    HInstruction* length =
+        AddInstruction(new(zone()) HArgumentsLength(elements));
+    HValue* wrapped_receiver =
+        AddInstruction(new(zone()) HWrapReceiver(receiver, function));
+    HInstruction* result =
+        new(zone()) HApplyArguments(function,
+                                    wrapped_receiver,
+                                    length,
+                                    elements);
+    result->set_position(expr->position());
+    ast_context()->ReturnInstruction(result, expr->id());
+    return true;
+  } else {
+    // We are inside inlined function and we know exactly what is inside
+    // arguments object.
+    HValue* context = environment()->LookupContext();
+
+    HValue* wrapped_receiver =
+        AddInstruction(new(zone()) HWrapReceiver(receiver, function));
+    PushAndAdd(new(zone()) HPushArgument(wrapped_receiver));
+
+    HEnvironment* arguments_env = environment()->arguments_environment();
+
+    int parameter_count = arguments_env->parameter_count();
+    for (int i = 1; i < arguments_env->parameter_count(); i++) {
+      PushAndAdd(new(zone()) HPushArgument(arguments_env->Lookup(i)));
+    }
+
+    HInvokeFunction* call = new(zone()) HInvokeFunction(
+        context,
+        function,
+        parameter_count);
+    Drop(parameter_count);
+    call->set_position(expr->position());
+    ast_context()->ReturnInstruction(call, expr->id());
+    return true;
+  }
 }
 
 
@@ -4890,14 +5846,19 @@
       Handle<Map> receiver_map = (types == NULL || types->is_empty())
           ? Handle<Map>::null()
           : types->first();
-      if (TryInlineBuiltinFunction(expr,
-                                   receiver,
-                                   receiver_map,
-                                   expr->check_type())) {
+      if (TryInlineBuiltinMethodCall(expr,
+                                     receiver,
+                                     receiver_map,
+                                     expr->check_type())) {
+        if (FLAG_trace_inlining) {
+          PrintF("Inlining builtin ");
+          expr->target()->ShortPrint();
+          PrintF("\n");
+        }
         return;
       }
 
-      if (CallStubCompiler::HasCustomCallGenerator(*expr->target()) ||
+      if (CallStubCompiler::HasCustomCallGenerator(expr->target()) ||
           expr->check_type() != RECEIVER_MAP_CHECK) {
         // When the target has a custom call IC generator, use the IC,
         // because it is likely to generate better code.  Also use the IC
@@ -4908,7 +5869,7 @@
       } else {
         AddCheckConstantFunction(expr, receiver, receiver_map, true);
 
-        if (TryInline(expr)) return;
+        if (TryInlineCall(expr)) return;
         call = PreProcessCall(
             new(zone()) HCallConstantFunction(expr->target(),
                                               argument_count));
@@ -4925,17 +5886,21 @@
     }
 
   } else {
+    expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
     VariableProxy* proxy = expr->expression()->AsVariableProxy();
-    // FIXME.
     bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
 
+    if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+      return Bailout("possible direct call to eval");
+    }
+
     if (global_call) {
       Variable* var = proxy->var();
       bool known_global_function = false;
       // If there is a global property cell for the name at compile time and
       // access check is not enabled we assume that the function will not change
       // and generate optimized code for calling the function.
-      LookupResult lookup;
+      LookupResult lookup(isolate());
       GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
       if (type == kUseCell &&
           !info()->global_object()->IsAccessCheckNeeded()) {
@@ -4964,7 +5929,15 @@
                IsGlobalObject());
         environment()->SetExpressionStackAt(receiver_index, global_receiver);
 
-        if (TryInline(expr)) return;
+        if (TryInlineBuiltinFunctionCall(expr, false)) {  // Nothing to drop.
+          if (FLAG_trace_inlining) {
+            PrintF("Inlining builtin ");
+            expr->target()->ShortPrint();
+            PrintF("\n");
+          }
+          return;
+        }
+        if (TryInlineCall(expr)) return;
         call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
                                                            argument_count));
       } else {
@@ -4978,8 +5951,40 @@
         Drop(argument_count);
       }
 
+    } else if (expr->IsMonomorphic()) {
+      // The function is on the stack in the unoptimized code during
+      // evaluation of the arguments.
+      CHECK_ALIVE(VisitForValue(expr->expression()));
+      HValue* function = Top();
+      HValue* context = environment()->LookupContext();
+      HGlobalObject* global = new(zone()) HGlobalObject(context);
+      HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
+      AddInstruction(global);
+      PushAndAdd(receiver);
+      CHECK_ALIVE(VisitExpressions(expr->arguments()));
+      AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
+
+      if (TryInlineBuiltinFunctionCall(expr, true)) {  // Drop the function.
+        if (FLAG_trace_inlining) {
+          PrintF("Inlining builtin ");
+          expr->target()->ShortPrint();
+          PrintF("\n");
+        }
+        return;
+      }
+
+      if (TryInlineCall(expr, true)) {   // Drop function from environment.
+        return;
+      } else {
+        call = PreProcessCall(new(zone()) HInvokeFunction(context,
+                                                          function,
+                                                          argument_count));
+        Drop(1);  // The function.
+      }
+
     } else {
-      CHECK_ALIVE(VisitArgument(expr->expression()));
+      CHECK_ALIVE(VisitForValue(expr->expression()));
+      HValue* function = Top();
       HValue* context = environment()->LookupContext();
       HGlobalObject* global_object = new(zone()) HGlobalObject(context);
       HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
@@ -4988,9 +5993,7 @@
       PushAndAdd(new(zone()) HPushArgument(receiver));
       CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
-      // The function to call is treated as an argument to the call function
-      // stub.
-      call = new(zone()) HCallFunction(context, argument_count + 1);
+      call = new(zone()) HCallFunction(context, function, argument_count);
       Drop(argument_count + 1);
     }
   }
@@ -5000,25 +6003,72 @@
 }
 
 
+// Checks whether allocation using the given constructor can be inlined.
+static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
+  return constructor->has_initial_map() &&
+      constructor->initial_map()->instance_type() == JS_OBJECT_TYPE;
+}
+
+
 void HGraphBuilder::VisitCallNew(CallNew* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  // The constructor function is also used as the receiver argument to the
-  // JS construct call builtin.
-  HValue* constructor = NULL;
-  CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
-  CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
+  expr->RecordTypeFeedback(oracle());
+  int argument_count = expr->arguments()->length() + 1;  // Plus constructor.
   HValue* context = environment()->LookupContext();
 
-  // The constructor is both an operand to the instruction and an argument
-  // to the construct call.
-  int arg_count = expr->arguments()->length() + 1;  // Plus constructor.
-  HCallNew* call = new(zone()) HCallNew(context, constructor, arg_count);
-  call->set_position(expr->position());
-  Drop(arg_count);
-  return ast_context()->ReturnInstruction(call, expr->id());
+  if (FLAG_inline_construct &&
+      expr->IsMonomorphic() &&
+      IsAllocationInlineable(expr->target())) {
+    // The constructor function is on the stack in the unoptimized code
+    // during evaluation of the arguments.
+    CHECK_ALIVE(VisitForValue(expr->expression()));
+    HValue* function = Top();
+    CHECK_ALIVE(VisitExpressions(expr->arguments()));
+    Handle<JSFunction> constructor = expr->target();
+    HValue* check = AddInstruction(
+        new(zone()) HCheckFunction(function, constructor));
+
+    // Force completion of inobject slack tracking before generating
+    // allocation code to finalize instance size.
+    if (constructor->shared()->IsInobjectSlackTrackingInProgress()) {
+      constructor->shared()->CompleteInobjectSlackTracking();
+    }
+
+    // Replace the constructor function with a newly allocated receiver.
+    HInstruction* receiver = new(zone()) HAllocateObject(context, constructor);
+    // Index of the receiver from the top of the expression stack.
+    const int receiver_index = argument_count - 1;
+    AddInstruction(receiver);
+    ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
+    environment()->SetExpressionStackAt(receiver_index, receiver);
+
+    if (TryInlineConstruct(expr, receiver)) return;
+
+    // TODO(mstarzinger): For now we remove the previous HAllocateObject and
+    // add HPushArgument for the arguments in case inlining failed.  What we
+    // actually should do is emit HInvokeFunction on the constructor instead
+    // of using HCallNew as a fallback.
+    receiver->DeleteAndReplaceWith(NULL);
+    check->DeleteAndReplaceWith(NULL);
+    environment()->SetExpressionStackAt(receiver_index, function);
+    HInstruction* call = PreProcessCall(
+        new(zone()) HCallNew(context, function, argument_count));
+    call->set_position(expr->position());
+    return ast_context()->ReturnInstruction(call, expr->id());
+  } else {
+    // The constructor function is both an operand to the instruction and an
+    // argument to the construct call.
+    HValue* constructor = NULL;
+    CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
+    CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+    HInstruction* call =
+        new(zone()) HCallNew(context, constructor, argument_count);
+    Drop(argument_count);
+    call->set_position(expr->position());
+    return ast_context()->ReturnInstruction(call, expr->id());
+  }
 }
 
 
@@ -5185,7 +6235,6 @@
 
 
 void HGraphBuilder::VisitNot(UnaryOperation* expr) {
-  // TODO(svenpanne) Perhaps a switch/virtual function is nicer here.
   if (ast_context()->IsTest()) {
     TestContext* context = TestContext::cast(ast_context());
     VisitForControl(expr->expression(),
@@ -5207,7 +6256,7 @@
                                 materialize_true));
 
   if (materialize_false->HasPredecessor()) {
-    materialize_false->SetJoinId(expr->expression()->id());
+    materialize_false->SetJoinId(expr->MaterializeFalseId());
     set_current_block(materialize_false);
     Push(graph()->GetConstantFalse());
   } else {
@@ -5215,7 +6264,7 @@
   }
 
   if (materialize_true->HasPredecessor()) {
-    materialize_true->SetJoinId(expr->expression()->id());
+    materialize_true->SetJoinId(expr->MaterializeTrueId());
     set_current_block(materialize_true);
     Push(graph()->GetConstantTrue());
   } else {
@@ -5284,7 +6333,7 @@
 
   if (proxy != NULL) {
     Variable* var = proxy->var();
-    if (var->mode() == Variable::CONST)  {
+    if (var->mode() == CONST)  {
       return Bailout("unsupported count operation with const");
     }
     // Argument of the count operation is a variable, not a property.
@@ -5325,10 +6374,15 @@
         }
 
         HValue* context = BuildContextChainWalk(var);
+        HStoreContextSlot::Mode mode =
+            (var->mode() == LET || var->mode() == CONST_HARMONY)
+            ? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
         HStoreContextSlot* instr =
-            new(zone()) HStoreContextSlot(context, var->index(), after);
+            new(zone()) HStoreContextSlot(context, var->index(), mode, after);
         AddInstruction(instr);
-        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        if (instr->HasObservableSideEffects()) {
+          AddSimulate(expr->AssignmentId());
+        }
         break;
       }
 
@@ -5357,7 +6411,7 @@
         load = BuildLoadNamedGeneric(obj, prop);
       }
       PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(expr->CountId());
+      if (load->HasObservableSideEffects()) AddSimulate(expr->CountId());
 
       after = BuildIncrement(returns_original_input, expr);
       input = Pop();
@@ -5370,7 +6424,7 @@
       // necessary.
       environment()->SetExpressionStackAt(0, after);
       if (returns_original_input) environment()->SetExpressionStackAt(1, input);
-      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
 
     } else {
       // Keyed property.
@@ -5447,38 +6501,34 @@
         AddInstruction(HCheckInstanceType::NewIsString(right));
         instr = new(zone()) HStringAdd(context, left, right);
       } else {
-        instr = new(zone()) HAdd(context, left, right);
+        instr = HAdd::NewHAdd(zone(), context, left, right);
       }
       break;
     case Token::SUB:
-      instr = new(zone()) HSub(context, left, right);
+      instr = HSub::NewHSub(zone(), context, left, right);
       break;
     case Token::MUL:
-      instr = new(zone()) HMul(context, left, right);
+      instr = HMul::NewHMul(zone(), context, left, right);
       break;
     case Token::MOD:
-      instr = new(zone()) HMod(context, left, right);
+      instr = HMod::NewHMod(zone(), context, left, right);
       break;
     case Token::DIV:
-      instr = new(zone()) HDiv(context, left, right);
+      instr = HDiv::NewHDiv(zone(), context, left, right);
       break;
     case Token::BIT_XOR:
-      instr = new(zone()) HBitXor(context, left, right);
-      break;
     case Token::BIT_AND:
-      instr = new(zone()) HBitAnd(context, left, right);
-      break;
     case Token::BIT_OR:
-      instr = new(zone()) HBitOr(context, left, right);
+      instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
       break;
     case Token::SAR:
-      instr = new(zone()) HSar(context, left, right);
+      instr = HSar::NewHSar(zone(), context, left, right);
       break;
     case Token::SHR:
-      instr = new(zone()) HShr(context, left, right);
+      instr = HShr::NewHShr(zone(), context, left, right);
       break;
     case Token::SHL:
-      instr = new(zone()) HShl(context, left, right);
+      instr = HShl::NewHShl(zone(), context, left, right);
       break;
     default:
       UNREACHABLE();
@@ -5671,26 +6721,75 @@
 }
 
 
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
-                                               Expression* expr,
+void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+                                               HTypeof* typeof_expr,
                                                Handle<String> check) {
-  CHECK_ALIVE(VisitForTypeOf(expr));
-  HValue* expr_value = Pop();
-  HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
-  instr->set_position(compare_expr->position());
-  return ast_context()->ReturnControl(instr, compare_expr->id());
+  // Note: The HTypeof itself is removed during canonicalization, if possible.
+  HValue* value = typeof_expr->value();
+  HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
+  instr->set_position(expr->position());
+  return ast_context()->ReturnControl(instr, expr->id());
 }
 
 
-void HGraphBuilder::HandleLiteralCompareUndefined(
-    CompareOperation* compare_expr, Expression* expr) {
-  CHECK_ALIVE(VisitForValue(expr));
-  HValue* lhs = Pop();
-  HValue* rhs = graph()->GetConstantUndefined();
-  HCompareObjectEqAndBranch* instr =
-      new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
-  instr->set_position(compare_expr->position());
-  return ast_context()->ReturnControl(instr, compare_expr->id());
+static bool MatchLiteralCompareNil(HValue* left,
+                                   Token::Value op,
+                                   HValue* right,
+                                   Handle<Object> nil,
+                                   HValue** expr) {
+  if (left->IsConstant() &&
+      HConstant::cast(left)->handle().is_identical_to(nil) &&
+      Token::IsEqualityOp(op)) {
+    *expr = right;
+    return true;
+  }
+  return false;
+}
+
+
+static bool MatchLiteralCompareTypeof(HValue* left,
+                                      Token::Value op,
+                                      HValue* right,
+                                      HTypeof** typeof_expr,
+                                      Handle<String>* check) {
+  if (left->IsTypeof() &&
+      Token::IsEqualityOp(op) &&
+      right->IsConstant() &&
+      HConstant::cast(right)->HasStringValue()) {
+    *typeof_expr = HTypeof::cast(left);
+    *check = Handle<String>::cast(HConstant::cast(right)->handle());
+    return true;
+  }
+  return false;
+}
+
+
+static bool IsLiteralCompareTypeof(HValue* left,
+                                   Token::Value op,
+                                   HValue* right,
+                                   HTypeof** typeof_expr,
+                                   Handle<String>* check) {
+  return MatchLiteralCompareTypeof(left, op, right, typeof_expr, check) ||
+      MatchLiteralCompareTypeof(right, op, left, typeof_expr, check);
+}
+
+
+static bool IsLiteralCompareNil(HValue* left,
+                                Token::Value op,
+                                HValue* right,
+                                Handle<Object> nil,
+                                HValue** expr) {
+  return MatchLiteralCompareNil(left, op, right, nil, expr) ||
+      MatchLiteralCompareNil(right, op, left, nil, expr);
+}
+
+
+static bool IsLiteralCompareBool(HValue* left,
+                                 Token::Value op,
+                                 HValue* right) {
+  return op == Token::EQ_STRICT &&
+      ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
+       (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
 }
 
 
@@ -5711,21 +6810,9 @@
     return ast_context()->ReturnControl(instr, expr->id());
   }
 
-  // Check for special cases that compare against literals.
-  Expression *sub_expr;
-  Handle<String> check;
-  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
-    HandleLiteralCompareTypeof(expr, sub_expr, check);
-    return;
-  }
-
-  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
-    HandleLiteralCompareUndefined(expr, sub_expr);
-    return;
-  }
-
   TypeInfo type_info = oracle()->CompareType(expr);
   // Check if this expression was ever executed according to type feedback.
+  // Note that for the special typeof/null/undefined cases we get unknown here.
   if (type_info.IsUninitialized()) {
     AddInstruction(new(zone()) HSoftDeoptimize);
     current_block()->MarkAsDeoptimizing();
@@ -5740,6 +6827,26 @@
   HValue* left = Pop();
   Token::Value op = expr->op();
 
+  HTypeof* typeof_expr = NULL;
+  Handle<String> check;
+  if (IsLiteralCompareTypeof(left, op, right, &typeof_expr, &check)) {
+    return HandleLiteralCompareTypeof(expr, typeof_expr, check);
+  }
+  HValue* sub_expr = NULL;
+  Factory* f = graph()->isolate()->factory();
+  if (IsLiteralCompareNil(left, op, right, f->undefined_value(), &sub_expr)) {
+    return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+  }
+  if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
+    return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
+  }
+  if (IsLiteralCompareBool(left, op, right)) {
+    HCompareObjectEqAndBranch* result =
+        new(zone()) HCompareObjectEqAndBranch(left, right);
+    result->set_position(expr->position());
+    return ast_context()->ReturnControl(result, expr->id());
+  }
+
   if (op == Token::INSTANCEOF) {
     // Check to see if the rhs of the instanceof is a global function not
     // residing in new space. If it is we assume that the function will stay the
@@ -5752,9 +6859,9 @@
         !info()->global_object()->IsAccessCheckNeeded()) {
       Handle<String> name = proxy->name();
       Handle<GlobalObject> global(info()->global_object());
-      LookupResult lookup;
+      LookupResult lookup(isolate());
       global->Lookup(*name, &lookup);
-      if (lookup.IsProperty() &&
+      if (lookup.IsFound() &&
           lookup.type() == NORMAL &&
           lookup.GetValue()->IsJSFunction()) {
         Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
@@ -5787,14 +6894,29 @@
     switch (op) {
       case Token::EQ:
       case Token::EQ_STRICT: {
-        AddInstruction(new(zone()) HCheckNonSmi(left));
-        AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
-        AddInstruction(new(zone()) HCheckNonSmi(right));
-        AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
-        HCompareObjectEqAndBranch* result =
-            new(zone()) HCompareObjectEqAndBranch(left, right);
-        result->set_position(expr->position());
-        return ast_context()->ReturnControl(result, expr->id());
+        // Can we get away with map check and not instance type check?
+        Handle<Map> map = oracle()->GetCompareMap(expr);
+        if (!map.is_null()) {
+          AddInstruction(new(zone()) HCheckNonSmi(left));
+          AddInstruction(new(zone()) HCheckMap(left, map, NULL,
+                                               ALLOW_ELEMENT_TRANSITION_MAPS));
+          AddInstruction(new(zone()) HCheckNonSmi(right));
+          AddInstruction(new(zone()) HCheckMap(right, map, NULL,
+                                               ALLOW_ELEMENT_TRANSITION_MAPS));
+          HCompareObjectEqAndBranch* result =
+              new(zone()) HCompareObjectEqAndBranch(left, right);
+          result->set_position(expr->position());
+          return ast_context()->ReturnControl(result, expr->id());
+        } else {
+          AddInstruction(new(zone()) HCheckNonSmi(left));
+          AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
+          AddInstruction(new(zone()) HCheckNonSmi(right));
+          AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
+          HCompareObjectEqAndBranch* result =
+              new(zone()) HCompareObjectEqAndBranch(left, right);
+          result->set_position(expr->position());
+          return ast_context()->ReturnControl(result, expr->id());
+        }
       }
       default:
         return Bailout("Unsupported non-primitive compare");
@@ -5827,14 +6949,16 @@
 }
 
 
-void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
+void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+                                            HValue* value,
+                                            NilValue nil) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  CHECK_ALIVE(VisitForValue(expr->expression()));
-  HValue* value = Pop();
-  HIsNullAndBranch* instr =
-      new(zone()) HIsNullAndBranch(value, expr->is_strict());
+  EqualityKind kind =
+      expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
+  HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
+  instr->set_position(expr->position());
   return ast_context()->ReturnControl(instr, expr->id());
 }
 
@@ -5843,41 +6967,94 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  HThisFunction* self = new(zone()) HThisFunction;
+  HThisFunction* self = new(zone()) HThisFunction(
+      function_state()->compilation_info()->closure());
   return ast_context()->ReturnInstruction(self, expr->id());
 }
 
 
-void HGraphBuilder::VisitDeclaration(Declaration* decl) {
-  HandleDeclaration(decl->proxy(), decl->mode(), decl->fun());
+void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+  int length = declarations->length();
+  int global_count = 0;
+  for (int i = 0; i < declarations->length(); i++) {
+    Declaration* decl = declarations->at(i);
+    FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
+    HandleDeclaration(decl->proxy(),
+                      decl->mode(),
+                      fun_decl != NULL ? fun_decl->fun() : NULL,
+                      &global_count);
+  }
+
+  // Batch declare global functions and variables.
+  if (global_count > 0) {
+    Handle<FixedArray> array =
+        isolate()->factory()->NewFixedArray(2 * global_count, TENURED);
+    for (int j = 0, i = 0; i < length; i++) {
+      Declaration* decl = declarations->at(i);
+      Variable* var = decl->proxy()->var();
+
+      if (var->IsUnallocated()) {
+        array->set(j++, *(var->name()));
+        FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
+        if (fun_decl == NULL) {
+          if (var->binding_needs_init()) {
+            // In case this binding needs initialization use the hole.
+            array->set_the_hole(j++);
+          } else {
+            array->set_undefined(j++);
+          }
+        } else {
+          Handle<SharedFunctionInfo> function =
+              Compiler::BuildFunctionInfo(fun_decl->fun(), info()->script());
+          // Check for stack-overflow exception.
+          if (function.is_null()) {
+            SetStackOverflow();
+            return;
+          }
+          array->set(j++, *function);
+        }
+      }
+    }
+    int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+                DeclareGlobalsNativeFlag::encode(info()->is_native()) |
+                DeclareGlobalsLanguageMode::encode(info()->language_mode());
+    HInstruction* result =
+        new(zone()) HDeclareGlobals(environment()->LookupContext(),
+                                    array,
+                                    flags);
+    AddInstruction(result);
+  }
 }
 
 
 void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
-                                      Variable::Mode mode,
-                                      FunctionLiteral* function) {
-  if (mode == Variable::LET) return Bailout("unsupported let declaration");
+                                      VariableMode mode,
+                                      FunctionLiteral* function,
+                                      int* global_count) {
   Variable* var = proxy->var();
+  bool binding_needs_init =
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (var->location()) {
     case Variable::UNALLOCATED:
-      return Bailout("unsupported global declaration");
+      ++(*global_count);
+      return;
     case Variable::PARAMETER:
     case Variable::LOCAL:
     case Variable::CONTEXT:
-      if (mode == Variable::CONST || function != NULL) {
+      if (binding_needs_init || function != NULL) {
         HValue* value = NULL;
-        if (mode == Variable::CONST) {
-          value = graph()->GetConstantHole();
-        } else {
-          VisitForValue(function);
+        if (function != NULL) {
+          CHECK_ALIVE(VisitForValue(function));
           value = Pop();
+        } else {
+          value = graph()->GetConstantHole();
         }
         if (var->IsContextSlot()) {
           HValue* context = environment()->LookupContext();
-          HStoreContextSlot* store =
-              new HStoreContextSlot(context, var->index(), value);
+          HStoreContextSlot* store = new HStoreContextSlot(
+              context, var->index(), HStoreContextSlot::kNoCheck, value);
           AddInstruction(store);
-          if (store->HasSideEffects()) AddSimulate(proxy->id());
+          if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
         } else {
           environment()->Bind(var, value);
         }
@@ -5889,6 +7066,51 @@
 }
 
 
+void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
+  // TODO(rossberg)
+}
+
+
+void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
+  // TODO(rossberg)
+}
+
+
+void HGraphBuilder::VisitModulePath(ModulePath* module) {
+  // TODO(rossberg)
+}
+
+
+void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
+  // TODO(rossberg)
+}
+
+
 // Generators for inline runtime functions.
 // Support for types.
 void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
@@ -5917,9 +7139,7 @@
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceTypeAndBranch* result =
-      new(zone()) HHasInstanceTypeAndBranch(value,
-                                            JS_FUNCTION_TYPE,
-                                            JS_FUNCTION_PROXY_TYPE);
+      new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
   return ast_context()->ReturnControl(result, call->id());
 }
 
@@ -5989,10 +7209,11 @@
 void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 0);
   if (function_state()->outer() != NULL) {
-    // We are generating graph for inlined function. Currently
-    // constructor inlining is not supported and we can just return
-    // false from %_IsConstructCall().
-    return ast_context()->ReturnValue(graph()->GetConstantFalse());
+    // We are generating graph for inlined function.
+    HValue* value = function_state()->is_construct()
+        ? graph()->GetConstantTrue()
+        : graph()->GetConstantFalse();
+    return ast_context()->ReturnValue(value);
   } else {
     return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch,
                                         call->id());
@@ -6046,8 +7267,56 @@
 }
 
 
+void HGraphBuilder::GenerateDateField(CallRuntime* call) {
+  ASSERT(call->arguments()->length() == 2);
+  ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
+  Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* date = Pop();
+  HDateField* result = new(zone()) HDateField(date, index);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
 void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
-  return Bailout("inlined runtime function: SetValueOf");
+  ASSERT(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* value = Pop();
+  HValue* object = Pop();
+  // Check if object is a not a smi.
+  HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(object);
+  HBasicBlock* if_smi = graph()->CreateBasicBlock();
+  HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
+  HBasicBlock* join = graph()->CreateBasicBlock();
+  smicheck->SetSuccessorAt(0, if_smi);
+  smicheck->SetSuccessorAt(1, if_heap_object);
+  current_block()->Finish(smicheck);
+  if_smi->Goto(join);
+
+  // Check if object is a JSValue.
+  set_current_block(if_heap_object);
+  HHasInstanceTypeAndBranch* typecheck =
+      new(zone()) HHasInstanceTypeAndBranch(object, JS_VALUE_TYPE);
+  HBasicBlock* if_js_value = graph()->CreateBasicBlock();
+  HBasicBlock* not_js_value = graph()->CreateBasicBlock();
+  typecheck->SetSuccessorAt(0, if_js_value);
+  typecheck->SetSuccessorAt(1, not_js_value);
+  current_block()->Finish(typecheck);
+  not_js_value->Goto(join);
+
+  // Create in-object property store to kValueOffset.
+  set_current_block(if_js_value);
+  Handle<String> name = isolate()->factory()->undefined_symbol();
+  AddInstruction(new HStoreNamedField(object,
+                                      name,
+                                      value,
+                                      true,  // in-object store.
+                                      JSValue::kValueOffset));
+  if_js_value->Goto(join);
+  join->SetJoinId(call->id());
+  set_current_block(join);
+  return ast_context()->ReturnValue(value);
 }
 
 
@@ -6113,7 +7382,11 @@
 
 // Fast support for Math.random().
 void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
-  return Bailout("inlined runtime function: RandomHeapNumber");
+  HValue* context = environment()->LookupContext();
+  HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+  AddInstruction(global_object);
+  HRandom* result = new(zone()) HRandom(global_object);
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
@@ -6210,12 +7483,37 @@
     CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
   }
   CHECK_ALIVE(VisitForValue(call->arguments()->last()));
+
   HValue* function = Pop();
   HValue* context = environment()->LookupContext();
-  HInvokeFunction* result =
-      new(zone()) HInvokeFunction(context, function, arg_count);
+
+  // Branch for function proxies, or other non-functions.
+  HHasInstanceTypeAndBranch* typecheck =
+      new(zone()) HHasInstanceTypeAndBranch(function, JS_FUNCTION_TYPE);
+  HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
+  HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
+  HBasicBlock* join = graph()->CreateBasicBlock();
+  typecheck->SetSuccessorAt(0, if_jsfunction);
+  typecheck->SetSuccessorAt(1, if_nonfunction);
+  current_block()->Finish(typecheck);
+
+  set_current_block(if_jsfunction);
+  HInstruction* invoke_result = AddInstruction(
+      new(zone()) HInvokeFunction(context, function, arg_count));
   Drop(arg_count);
-  return ast_context()->ReturnInstruction(result, call->id());
+  Push(invoke_result);
+  if_jsfunction->Goto(join);
+
+  set_current_block(if_nonfunction);
+  HInstruction* call_result = AddInstruction(
+      new(zone()) HCallFunction(context, function, arg_count));
+  Drop(arg_count);
+  Push(call_result);
+  if_nonfunction->Goto(join);
+
+  set_current_block(join);
+  join->SetJoinId(call->id());
+  return ast_context()->ReturnValue(Pop());
 }
 
 
@@ -6255,6 +7553,18 @@
 }
 
 
+void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
+  ASSERT_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
+  HCallStub* result =
+      new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+  result->set_transcendental_type(TranscendentalCache::TAN);
+  Drop(1);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
 void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
@@ -6302,6 +7612,7 @@
     : closure_(closure),
       values_(0),
       assigned_variables_(4),
+      frame_type_(JS_FUNCTION),
       parameter_count_(0),
       specials_count_(1),
       local_count_(0),
@@ -6316,6 +7627,7 @@
 HEnvironment::HEnvironment(const HEnvironment* other)
     : values_(0),
       assigned_variables_(0),
+      frame_type_(JS_FUNCTION),
       parameter_count_(0),
       specials_count_(1),
       local_count_(0),
@@ -6327,6 +7639,23 @@
 }
 
 
+HEnvironment::HEnvironment(HEnvironment* outer,
+                           Handle<JSFunction> closure,
+                           FrameType frame_type,
+                           int arguments)
+    : closure_(closure),
+      values_(arguments),
+      assigned_variables_(0),
+      frame_type_(frame_type),
+      parameter_count_(arguments),
+      local_count_(0),
+      outer_(outer),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(AstNode::kNoNumber) {
+}
+
+
 void HEnvironment::Initialize(int parameter_count,
                               int local_count,
                               int stack_height) {
@@ -6344,6 +7673,7 @@
   closure_ = other->closure();
   values_.AddAll(other->values_);
   assigned_variables_.AddAll(other->assigned_variables_);
+  frame_type_ = other->frame_type_;
   parameter_count_ = other->parameter_count_;
   local_count_ = other->local_count_;
   if (other->outer_ != NULL) outer_ = other->outer_->Copy();  // Deep copy.
@@ -6398,9 +7728,8 @@
 
 
 bool HEnvironment::ExpressionStackIsEmpty() const {
-  int first_expression = parameter_count() + specials_count() + local_count();
-  ASSERT(length() >= first_expression);
-  return length() == first_expression;
+  ASSERT(length() >= first_expression_index());
+  return length() == first_expression_index();
 }
 
 
@@ -6451,32 +7780,66 @@
 }
 
 
+HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer,
+                                                  Handle<JSFunction> target,
+                                                  FrameType frame_type,
+                                                  int arguments) const {
+  HEnvironment* new_env = new(closure()->GetIsolate()->zone())
+      HEnvironment(outer, target, frame_type, arguments + 1);
+  for (int i = 0; i <= arguments; ++i) {  // Include receiver.
+    new_env->Push(ExpressionStackAt(arguments - i));
+  }
+  new_env->ClearHistory();
+  return new_env;
+}
+
+
 HEnvironment* HEnvironment::CopyForInlining(
     Handle<JSFunction> target,
+    int arguments,
     FunctionLiteral* function,
     HConstant* undefined,
-    CallKind call_kind) const {
+    CallKind call_kind,
+    bool is_construct) const {
+  ASSERT(frame_type() == JS_FUNCTION);
+
+  Zone* zone = closure()->GetIsolate()->zone();
+
   // Outer environment is a copy of this one without the arguments.
   int arity = function->scope()->num_parameters();
+
   HEnvironment* outer = Copy();
-  outer->Drop(arity + 1);  // Including receiver.
+  outer->Drop(arguments + 1);  // Including receiver.
   outer->ClearHistory();
-  Zone* zone = closure()->GetIsolate()->zone();
+
+  if (is_construct) {
+    // Create artificial constructor stub environment.  The receiver should
+    // actually be the constructor function, but we pass the newly allocated
+    // object instead, DoComputeConstructStubFrame() relies on that.
+    outer = CreateStubEnvironment(outer, target, JS_CONSTRUCT, arguments);
+  }
+
+  if (arity != arguments) {
+    // Create artificial arguments adaptation environment.
+    outer = CreateStubEnvironment(outer, target, ARGUMENTS_ADAPTOR, arguments);
+  }
+
   HEnvironment* inner =
       new(zone) HEnvironment(outer, function->scope(), target);
   // Get the argument values from the original environment.
   for (int i = 0; i <= arity; ++i) {  // Include receiver.
-    HValue* push = ExpressionStackAt(arity - i);
+    HValue* push = (i <= arguments) ?
+        ExpressionStackAt(arguments - i) : undefined;
     inner->SetValueAt(i, push);
   }
   // If the function we are inlining is a strict mode function or a
   // builtin function, pass undefined as the receiver for function
   // calls (instead of the global receiver).
-  if ((target->shared()->native() || function->strict_mode()) &&
-      call_kind == CALL_AS_FUNCTION) {
+  if ((target->shared()->native() || !function->is_classic_mode()) &&
+      call_kind == CALL_AS_FUNCTION && !is_construct) {
     inner->SetValueAt(0, undefined);
   }
-  inner->SetValueAt(arity + 1, outer->LookupContext());
+  inner->SetValueAt(arity + 1, LookupContext());
   for (int i = arity + 2; i < inner->length(); ++i) {
     inner->SetValueAt(i, undefined);
   }
@@ -6492,7 +7855,7 @@
     if (i == parameter_count()) stream->Add("specials\n");
     if (i == parameter_count() + specials_count()) stream->Add("locals\n");
     if (i == parameter_count() + specials_count() + local_count()) {
-      stream->Add("expressions");
+      stream->Add("expressions\n");
     }
     HValue* val = values_.at(i);
     stream->Add("%d: ", i);
@@ -6503,6 +7866,7 @@
     }
     stream->Add("\n");
   }
+  PrintF("\n");
 }
 
 
@@ -6567,7 +7931,10 @@
     }
 
     PrintEmptyProperty("xhandlers");
-    PrintEmptyProperty("flags");
+    const char* flags = current->IsLoopSuccessorDominator()
+        ? "dom-loop-succ"
+        : "";
+    PrintStringProperty("flags", flags);
 
     if (current->dominator() != NULL) {
       PrintBlockProperty("dominator", current->dominator()->block_id());
@@ -6668,7 +8035,7 @@
     PrintIndent();
     trace_.Add("%d %s", range->id(), type);
     if (range->HasRegisterAssigned()) {
-      LOperand* op = range->CreateAssignedOperand();
+      LOperand* op = range->CreateAssignedOperand(ZONE);
       int assigned_reg = op->index();
       if (op->IsDoubleRegister()) {
         trace_.Add(" \"%s\"",
@@ -6694,7 +8061,9 @@
     }
     LOperand* op = range->FirstHint();
     int hint_index = -1;
-    if (op != NULL && op->IsUnallocated()) hint_index = op->VirtualRegister();
+    if (op != NULL && op->IsUnallocated()) {
+      hint_index = LUnallocated::cast(op)->virtual_register();
+    }
     trace_.Add(" %d %d", parent_index, hint_index);
     UseInterval* cur_interval = range->first_interval();
     while (cur_interval != NULL && range->Covers(cur_interval->start())) {
@@ -6810,7 +8179,10 @@
     HStatistics::Instance()->SaveTiming(name_, end - start_, size);
   }
 
-  if (FLAG_trace_hydrogen) {
+  // Produce trace output if flag is set so that the first letter of the
+  // phase name matches the command line parameter FLAG_trace_phase.
+  if (FLAG_trace_hydrogen &&
+      OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL) {
     if (graph_ != NULL) HTracer::Instance()->TraceHydrogen(name_, graph_);
     if (chunk_ != NULL) HTracer::Instance()->TraceLithium(name_, chunk_);
     if (allocator_ != NULL) {
@@ -6819,7 +8191,7 @@
   }
 
 #ifdef DEBUG
-  if (graph_ != NULL) graph_->Verify();
+  if (graph_ != NULL) graph_->Verify(false);  // No full verify.
   if (allocator_ != NULL) allocator_->Verify();
 #endif
 }
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 03fbc73..e2779bb 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -121,11 +121,12 @@
 
   void Finish(HControlInstruction* last);
   void FinishExit(HControlInstruction* instruction);
-  void Goto(HBasicBlock* block);
+  void Goto(HBasicBlock* block, bool drop_extra = false);
 
   int PredecessorIndexOf(HBasicBlock* predecessor) const;
   void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
   void AssignCommonDominator(HBasicBlock* other);
+  void AssignLoopSuccessorDominators();
 
   void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
     FinishExit(CreateDeoptimize(has_uses));
@@ -133,7 +134,9 @@
 
   // Add the inlined function exit sequence, adding an HLeaveInlined
   // instruction and updating the bailout environment.
-  void AddLeaveInlined(HValue* return_value, HBasicBlock* target);
+  void AddLeaveInlined(HValue* return_value,
+                       HBasicBlock* target,
+                       bool drop_extra = false);
 
   // If a target block is tagged as an inline function return, all
   // predecessors should contain the inlined exit sequence:
@@ -147,6 +150,13 @@
   bool IsDeoptimizing() const { return is_deoptimizing_; }
   void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
 
+  bool IsLoopSuccessorDominator() const {
+    return dominates_loop_successors_;
+  }
+  void MarkAsLoopSuccessorDominator() {
+    dominates_loop_successors_ = true;
+  }
+
   inline Zone* zone();
 
 #ifdef DEBUG
@@ -180,6 +190,22 @@
   HBasicBlock* parent_loop_header_;
   bool is_inline_return_target_;
   bool is_deoptimizing_;
+  bool dominates_loop_successors_;
+};
+
+
+class HPredecessorIterator BASE_EMBEDDED {
+ public:
+  explicit HPredecessorIterator(HBasicBlock* block)
+      : predecessor_list_(block->predecessors()), current_(0) { }
+
+  bool Done() { return current_ >= predecessor_list_->length(); }
+  HBasicBlock* Current() { return predecessor_list_->at(current_); }
+  void Advance() { current_++; }
+
+ private:
+  const ZoneList<HBasicBlock*>* predecessor_list_;
+  int current_;
 };
 
 
@@ -243,11 +269,13 @@
 
   // Returns false if there are phi-uses of the arguments-object
   // which are not supported by the optimizing compiler.
-  bool CheckPhis();
+  bool CheckArgumentsPhiUses();
 
-  // Returns false if there are phi-uses of hole values comming
-  // from uninitialized consts.
-  bool CollectPhis();
+  // Returns false if there are phi-uses of an uninitialized const
+  // which are not supported by the optimizing compiler.
+  bool CheckConstPhiUses();
+
+  void CollectPhis();
 
   Handle<Code> Compile(CompilationInfo* info);
 
@@ -265,7 +293,6 @@
   HArgumentsObject* GetArgumentsObject() const {
     return arguments_object_.get();
   }
-  bool HasArgumentsObject() const { return arguments_object_.is_set(); }
 
   void SetArgumentsObject(HArgumentsObject* object) {
     arguments_object_.set(object);
@@ -283,9 +310,29 @@
   }
 
 #ifdef DEBUG
-  void Verify() const;
+  void Verify(bool do_full_verify) const;
 #endif
 
+  bool has_osr_loop_entry() {
+    return osr_loop_entry_.is_set();
+  }
+
+  HBasicBlock* osr_loop_entry() {
+    return osr_loop_entry_.get();
+  }
+
+  void set_osr_loop_entry(HBasicBlock* entry) {
+    osr_loop_entry_.set(entry);
+  }
+
+  ZoneList<HUnknownOSRValue*>* osr_values() {
+    return osr_values_.get();
+  }
+
+  void set_osr_values(ZoneList<HUnknownOSRValue*>* values) {
+    osr_values_.set(values);
+  }
+
  private:
   void Postorder(HBasicBlock* block,
                  BitVector* visited,
@@ -326,6 +373,9 @@
   SetOncePointer<HConstant> constant_hole_;
   SetOncePointer<HArgumentsObject> arguments_object_;
 
+  SetOncePointer<HBasicBlock> osr_loop_entry_;
+  SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
+
   DISALLOW_COPY_AND_ASSIGN(HGraph);
 };
 
@@ -333,18 +383,34 @@
 Zone* HBasicBlock::zone() { return graph_->zone(); }
 
 
+// Type of stack frame an environment might refer to.
+enum FrameType { JS_FUNCTION, JS_CONSTRUCT, ARGUMENTS_ADAPTOR };
+
+
 class HEnvironment: public ZoneObject {
  public:
   HEnvironment(HEnvironment* outer,
                Scope* scope,
                Handle<JSFunction> closure);
 
+  HEnvironment* DiscardInlined(bool drop_extra) {
+    HEnvironment* outer = outer_;
+    while (outer->frame_type() != JS_FUNCTION) outer = outer->outer_;
+    if (drop_extra) outer->Drop(1);
+    return outer;
+  }
+
+  HEnvironment* arguments_environment() {
+    return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
+  }
+
   // Simple accessors.
   Handle<JSFunction> closure() const { return closure_; }
   const ZoneList<HValue*>* values() const { return &values_; }
   const ZoneList<int>* assigned_variables() const {
     return &assigned_variables_;
   }
+  FrameType frame_type() const { return frame_type_; }
   int parameter_count() const { return parameter_count_; }
   int specials_count() const { return specials_count_; }
   int local_count() const { return local_count_; }
@@ -360,6 +426,10 @@
     return i >= parameter_count() && i < parameter_count() + specials_count();
   }
 
+  int first_expression_index() const {
+    return parameter_count() + specials_count() + local_count();
+  }
+
   void Bind(Variable* variable, HValue* value) {
     Bind(IndexFor(variable), value);
   }
@@ -423,9 +493,11 @@
   // environment is the outer environment but the top expression stack
   // elements are moved to an inner environment as parameters.
   HEnvironment* CopyForInlining(Handle<JSFunction> target,
+                                int arguments,
                                 FunctionLiteral* function,
                                 HConstant* undefined,
-                                CallKind call_kind) const;
+                                CallKind call_kind,
+                                bool is_construct) const;
 
   void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
 
@@ -446,6 +518,18 @@
  private:
   explicit HEnvironment(const HEnvironment* other);
 
+  HEnvironment(HEnvironment* outer,
+               Handle<JSFunction> closure,
+               FrameType frame_type,
+               int arguments);
+
+  // Create an artificial stub environment (e.g. for argument adaptor or
+  // constructor stub).
+  HEnvironment* CreateStubEnvironment(HEnvironment* outer,
+                                      Handle<JSFunction> target,
+                                      FrameType frame_type,
+                                      int arguments) const;
+
   // True if index is included in the expression stack part of the environment.
   bool HasExpressionAt(int index) const;
 
@@ -467,6 +551,7 @@
   // Value array [parameters] [specials] [locals] [temporaries].
   ZoneList<HValue*> values_;
   ZoneList<int> assigned_variables_;
+  FrameType frame_type_;
   int parameter_count_;
   int specials_count_;
   int local_count_;
@@ -601,16 +686,26 @@
 };
 
 
-class FunctionState BASE_EMBEDDED {
+enum ReturnHandlingFlag {
+  NORMAL_RETURN,
+  DROP_EXTRA_ON_RETURN,
+  CONSTRUCT_CALL_RETURN
+};
+
+
+class FunctionState {
  public:
   FunctionState(HGraphBuilder* owner,
                 CompilationInfo* info,
-                TypeFeedbackOracle* oracle);
+                TypeFeedbackOracle* oracle,
+                ReturnHandlingFlag return_handling);
   ~FunctionState();
 
   CompilationInfo* compilation_info() { return compilation_info_; }
   TypeFeedbackOracle* oracle() { return oracle_; }
   AstContext* call_context() { return call_context_; }
+  bool drop_extra() { return return_handling_ == DROP_EXTRA_ON_RETURN; }
+  bool is_construct() { return return_handling_ == CONSTRUCT_CALL_RETURN; }
   HBasicBlock* function_return() { return function_return_; }
   TestContext* test_context() { return test_context_; }
   void ClearInlinedTestContext() {
@@ -630,7 +725,13 @@
   // inlined. NULL when not inlining.
   AstContext* call_context_;
 
-  // When inlining in an effect of value context, this is the return block.
+  // Indicate whether we have to perform special handling on return from
+  // inlined functions.
+  // - DROP_EXTRA_ON_RETURN: Drop an extra value from the environment.
+  // - CONSTRUCT_CALL_RETURN: Either use allocated receiver or return value.
+  ReturnHandlingFlag return_handling_;
+
+  // When inlining in an effect or value context, this is the return block.
   // It is NULL otherwise.  When inlining in a test context, there are a
   // pair of return blocks in the context.  When not inlining, there is no
   // local return point.
@@ -647,14 +748,19 @@
 class HGraphBuilder: public AstVisitor {
  public:
   enum BreakType { BREAK, CONTINUE };
+  enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
 
   // A class encapsulating (lazily-allocated) break and continue blocks for
   // a breakable statement.  Separated from BreakAndContinueScope so that it
   // can have a separate lifetime.
   class BreakAndContinueInfo BASE_EMBEDDED {
    public:
-    explicit BreakAndContinueInfo(BreakableStatement* target)
-      : target_(target), break_block_(NULL), continue_block_(NULL) {
+    explicit BreakAndContinueInfo(BreakableStatement* target,
+                                  int drop_extra = 0)
+        : target_(target),
+          break_block_(NULL),
+          continue_block_(NULL),
+          drop_extra_(drop_extra) {
     }
 
     BreakableStatement* target() { return target_; }
@@ -662,11 +768,13 @@
     void set_break_block(HBasicBlock* block) { break_block_ = block; }
     HBasicBlock* continue_block() { return continue_block_; }
     void set_continue_block(HBasicBlock* block) { continue_block_ = block; }
+    int drop_extra() { return drop_extra_; }
 
    private:
     BreakableStatement* target_;
     HBasicBlock* break_block_;
     HBasicBlock* continue_block_;
+    int drop_extra_;
   };
 
   // A helper class to maintain a stack of current BreakAndContinueInfo
@@ -685,7 +793,7 @@
     BreakAndContinueScope* next() { return next_; }
 
     // Search the break stack for a break or continue target.
-    HBasicBlock* Get(BreakableStatement* stmt, BreakType type);
+    HBasicBlock* Get(BreakableStatement* stmt, BreakType type, int* drop_extra);
 
    private:
     BreakAndContinueInfo* info_;
@@ -726,6 +834,10 @@
 
   TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
 
+  FunctionState* function_state() const { return function_state_; }
+
+  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+
  private:
   // Type of a member function that generates inline code for a native function.
   typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -743,8 +855,13 @@
   static const int kMaxInlinedSize = 196;
   static const int kMaxSourceSize = 600;
 
+  // Even in the 'unlimited' case we have to have some limit in order not to
+  // overflow the stack.
+  static const int kUnlimitedMaxInlinedNodes = 1000;
+  static const int kUnlimitedMaxInlinedSize = 1000;
+  static const int kUnlimitedMaxSourceSize = 600;
+
   // Simple accessors.
-  FunctionState* function_state() const { return function_state_; }
   void set_function_state(FunctionState* state) { function_state_ = state; }
 
   AstContext* ast_context() const { return ast_context_; }
@@ -754,7 +871,6 @@
   CompilationInfo* info() const {
     return function_state()->compilation_info();
   }
-
   AstContext* call_context() const {
     return function_state()->call_context();
   }
@@ -767,8 +883,9 @@
   void ClearInlinedTestContext() {
     function_state()->ClearInlinedTestContext();
   }
-  bool function_strict_mode() {
-    return function_state()->compilation_info()->is_strict_mode();
+  StrictModeFlag function_strict_mode_flag() {
+    return function_state()->compilation_info()->is_classic_mode()
+        ? kNonStrictMode : kStrictMode;
   }
 
   // Generators for inline runtime functions.
@@ -780,8 +897,9 @@
 #undef INLINE_FUNCTION_GENERATOR_DECLARATION
 
   void HandleDeclaration(VariableProxy* proxy,
-                         Variable::Mode mode,
-                         FunctionLiteral* function);
+                         VariableMode mode,
+                         FunctionLiteral* function,
+                         int* global_count);
 
   void VisitDelete(UnaryOperation* expr);
   void VisitVoid(UnaryOperation* expr);
@@ -795,7 +913,7 @@
   void VisitLogicalExpression(BinaryOperation* expr);
   void VisitArithmeticExpression(BinaryOperation* expr);
 
-  void PreProcessOsrEntry(IterationStatement* statement);
+  bool PreProcessOsrEntry(IterationStatement* statement);
   // True iff. we are compiling for OSR and the statement is the entry.
   bool HasOsrEntryAt(IterationStatement* statement);
   void VisitLoopBody(IterationStatement* stmt,
@@ -849,7 +967,7 @@
 
   // Remove the arguments from the bailout environment and emit instructions
   // to push them as outgoing parameters.
-  template <int V> HInstruction* PreProcessCall(HCall<V>* call);
+  template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
 
   void TraceRepresentation(Token::Value op,
                            TypeInfo info,
@@ -857,7 +975,7 @@
                            Representation rep);
   static Representation ToRepresentation(TypeInfo info);
 
-  void SetupScope(Scope* scope);
+  void SetUpScope(Scope* scope);
   virtual void VisitStatements(ZoneList<Statement*>* statements);
 
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
@@ -881,11 +999,21 @@
   // Try to optimize fun.apply(receiver, arguments) pattern.
   bool TryCallApply(Call* expr);
 
-  bool TryInline(Call* expr);
-  bool TryInlineBuiltinFunction(Call* expr,
-                                HValue* receiver,
-                                Handle<Map> receiver_map,
-                                CheckType check_type);
+  bool TryInline(CallKind call_kind,
+                 Handle<JSFunction> target,
+                 ZoneList<Expression*>* arguments,
+                 HValue* receiver,
+                 int ast_id,
+                 int return_id,
+                 ReturnHandlingFlag return_handling);
+
+  bool TryInlineCall(Call* expr, bool drop_extra = false);
+  bool TryInlineConstruct(CallNew* expr, HValue* receiver);
+  bool TryInlineBuiltinMethodCall(Call* expr,
+                                  HValue* receiver,
+                                  Handle<Map> receiver_map,
+                                  CheckType check_type);
+  bool TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra);
 
   // If --trace-inlining, print a line of the inlining trace.  Inlining
   // succeeded if the reason string is NULL and failed if there is a
@@ -910,11 +1038,12 @@
                                   HValue* receiver,
                                   SmallMapList* types,
                                   Handle<String> name);
-  void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
-                                  Expression* expr,
+  void HandleLiteralCompareTypeof(CompareOperation* expr,
+                                  HTypeof* typeof_expr,
                                   Handle<String> check);
-  void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
-                                     Expression* expr);
+  void HandleLiteralCompareNil(CompareOperation* expr,
+                               HValue* value,
+                               NilValue nil);
 
   HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
                                            HValue* string,
@@ -938,11 +1067,16 @@
       HValue* val,
       ElementsKind elements_kind,
       bool is_store);
+  HInstruction* BuildFastElementAccess(HValue* elements,
+                                       HValue* checked_key,
+                                       HValue* val,
+                                       ElementsKind elements_kind,
+                                       bool is_store);
 
   HInstruction* BuildMonomorphicElementAccess(HValue* object,
                                               HValue* key,
                                               HValue* val,
-                                              Expression* expr,
+                                              Handle<Map> map,
                                               bool is_store);
   HValue* HandlePolymorphicElementAccess(HValue* object,
                                          HValue* key,
@@ -969,6 +1103,9 @@
   HInstruction* BuildStoreNamed(HValue* object,
                                 HValue* value,
                                 Expression* expr);
+  HInstruction* BuildStoreNamed(HValue* object,
+                                HValue* value,
+                                ObjectLiteral::Property* prop);
   HInstruction* BuildStoreNamedField(HValue* object,
                                      Handle<String> name,
                                      HValue* value,
@@ -1037,10 +1174,10 @@
     Resize(kInitialSize);
   }
 
-  void Kill(int flags);
+  void Kill(GVNFlagSet flags);
 
   void Add(HValue* value) {
-    present_flags_ |= value->flags();
+    present_flags_.Add(value->gvn_flags());
     Insert(value);
   }
 
@@ -1073,7 +1210,8 @@
   int array_size_;
   int lists_size_;
   int count_;  // The number of values stored in the HValueMap.
-  int present_flags_;  // All flags that are in any value in the HValueMap.
+  GVNFlagSet present_flags_;  // All flags that are in any value in the
+                              // HValueMap.
   HValueMapListElement* array_;  // Primary store - contains the first value
   // with a given hash.  Colliding elements are stored in linked lists.
   HValueMapListElement* lists_;  // The linked lists containing hash collisions.
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 0ca2d6b..3cf0d00 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -30,13 +30,15 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 // A light-weight IA32 Assembler.
 
 #ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
 #define V8_IA32_ASSEMBLER_IA32_INL_H_
 
+#include "ia32/assembler-ia32.h"
+
 #include "cpu.h"
 #include "debug.h"
 
@@ -78,19 +80,26 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+                              || rmode_ == EMBEDDED_OBJECT
+                              || rmode_ == EXTERNAL_REFERENCE);
   return reinterpret_cast<Address>(pc_);
 }
 
 
 int RelocInfo::target_address_size() {
-  return Assembler::kExternalTargetSize;
+  return Assembler::kSpecialTargetSize;
 }
 
 
-void RelocInfo::set_target_address(Address target) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
   Assembler::set_target_address_at(pc_, target);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -112,10 +121,16 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Memory::Object_at(pc_) = target;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -142,11 +157,18 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+                                WriteBarrierMode mode) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -161,6 +183,11 @@
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Assembler::set_target_address_at(pc_ + 1, target);
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -194,14 +221,14 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitPointer(target_object_address());
+    visitor->VisitEmbeddedPointer(this);
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(target_reference_address());
+    visitor->VisitExternalReference(this);
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
@@ -222,14 +249,14 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(target_reference_address());
+    StaticVisitor::VisitExternalReference(this);
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 9996474..a42f632 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been modified
 // significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -55,6 +55,8 @@
 uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
+// The Probe method needs executable memory, so it uses Heap::CreateCode.
+// Allocation failure is silent and leads to safe default.
 void CpuFeatures::Probe() {
   ASSERT(!initialized_);
   ASSERT(supported_ == 0);
@@ -86,23 +88,23 @@
   __ pushfd();
   __ push(ecx);
   __ push(ebx);
-  __ mov(ebp, Operand(esp));
+  __ mov(ebp, esp);
 
   // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
   __ pushfd();
   __ pop(eax);
-  __ mov(edx, Operand(eax));
+  __ mov(edx, eax);
   __ xor_(eax, 0x200000);  // Flip bit 21.
   __ push(eax);
   __ popfd();
   __ pushfd();
   __ pop(eax);
-  __ xor_(eax, Operand(edx));  // Different if CPUID is supported.
+  __ xor_(eax, edx);  // Different if CPUID is supported.
   __ j(not_zero, &cpuid);
 
   // CPUID not supported. Clear the supported features in edx:eax.
-  __ xor_(eax, Operand(eax));
-  __ xor_(edx, Operand(edx));
+  __ xor_(eax, eax);
+  __ xor_(edx, edx);
   __ jmp(&done);
 
   // Invoke CPUID with 1 in eax to get feature information in
@@ -118,13 +120,13 @@
 
   // Move the result from ecx:edx to edx:eax and make sure to mark the
   // CPUID feature as supported.
-  __ mov(eax, Operand(edx));
+  __ mov(eax, edx);
   __ or_(eax, 1 << CPUID);
-  __ mov(edx, Operand(ecx));
+  __ mov(edx, ecx);
 
   // Done.
   __ bind(&done);
-  __ mov(esp, Operand(ebp));
+  __ mov(esp, ebp);
   __ pop(ebx);
   __ pop(ecx);
   __ popfd();
@@ -286,6 +288,18 @@
       && ((buf_[0] & 0x07) == reg.code());  // register codes match.
 }
 
+
+bool Operand::is_reg_only() const {
+  return (buf_[0] & 0xF8) == 0xC0;  // Addressing mode is register only.
+}
+
+
+Register Operand::reg() const {
+  ASSERT(is_reg_only());
+  return Register::from_code(buf_[0] & 0x07);
+}
+
+
 // -----------------------------------------------------------------------------
 // Implementation of Assembler.
 
@@ -336,7 +350,7 @@
   }
 #endif
 
-  // Setup buffer pointers.
+  // Set up buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -363,7 +377,7 @@
   // Finalize code (at this point overflow() may be true, but the gap ensures
   // that we are still not overlapping instructions and relocation info).
   ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
-  // Setup code descriptor.
+  // Set up code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -374,8 +388,91 @@
 
 void Assembler::Align(int m) {
   ASSERT(IsPowerOf2(m));
-  while ((pc_offset() & (m - 1)) != 0) {
-    nop();
+  int mask = m - 1;
+  int addr = pc_offset();
+  Nop((m - (addr & mask)) & mask);
+}
+
+
+bool Assembler::IsNop(Address addr) {
+  Address a = addr;
+  while (*a == 0x66) a++;
+  if (*a == 0x90) return true;
+  if (a[0] == 0xf && a[1] == 0x1f) return true;
+  return false;
+}
+
+
+void Assembler::Nop(int bytes) {
+  EnsureSpace ensure_space(this);
+
+  if (!CpuFeatures::IsSupported(SSE2)) {
+    // Older CPUs that do not support SSE2 may not support multibyte NOP
+    // instructions.
+    for (; bytes > 0; bytes--) {
+      EMIT(0x90);
+    }
+    return;
+  }
+
+  // Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
+  while (bytes > 0) {
+    switch (bytes) {
+      case 2:
+        EMIT(0x66);
+      case 1:
+        EMIT(0x90);
+        return;
+      case 3:
+        EMIT(0xf);
+        EMIT(0x1f);
+        EMIT(0);
+        return;
+      case 4:
+        EMIT(0xf);
+        EMIT(0x1f);
+        EMIT(0x40);
+        EMIT(0);
+        return;
+      case 6:
+        EMIT(0x66);
+      case 5:
+        EMIT(0xf);
+        EMIT(0x1f);
+        EMIT(0x44);
+        EMIT(0);
+        EMIT(0);
+        return;
+      case 7:
+        EMIT(0xf);
+        EMIT(0x1f);
+        EMIT(0x80);
+        EMIT(0);
+        EMIT(0);
+        EMIT(0);
+        EMIT(0);
+        return;
+      default:
+      case 11:
+        EMIT(0x66);
+        bytes--;
+      case 10:
+        EMIT(0x66);
+        bytes--;
+      case 9:
+        EMIT(0x66);
+        bytes--;
+      case 8:
+        EMIT(0xf);
+        EMIT(0x1f);
+        EMIT(0x84);
+        EMIT(0);
+        EMIT(0);
+        EMIT(0);
+        EMIT(0);
+        EMIT(0);
+        bytes -= 8;
+    }
   }
 }
 
@@ -449,13 +546,6 @@
 }
 
 
-void Assembler::push(Handle<Object> handle) {
-  EnsureSpace ensure_space(this);
-  EMIT(0x68);
-  emit(handle);
-}
-
-
 void Assembler::pop(Register dst) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
@@ -485,7 +575,7 @@
 
 
 void Assembler::mov_b(Register dst, const Operand& src) {
-  ASSERT(dst.code() < 4);
+  CHECK(dst.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x8A);
   emit_operand(dst, src);
@@ -501,7 +591,7 @@
 
 
 void Assembler::mov_b(const Operand& dst, Register src) {
-  ASSERT(src.code() < 4);
+  CHECK(src.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x88);
   emit_operand(src, dst);
@@ -614,26 +704,6 @@
 }
 
 
-void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
-  ASSERT(CpuFeatures::IsEnabled(CMOV));
-  EnsureSpace ensure_space(this);
-  UNIMPLEMENTED();
-  USE(cc);
-  USE(dst);
-  USE(imm32);
-}
-
-
-void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
-  ASSERT(CpuFeatures::IsEnabled(CMOV));
-  EnsureSpace ensure_space(this);
-  UNIMPLEMENTED();
-  USE(cc);
-  USE(dst);
-  USE(handle);
-}
-
-
 void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
@@ -701,6 +771,13 @@
 }
 
 
+void Assembler::add(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x01);
+  emit_operand(src, dst);
+}
+
+
 void Assembler::add(const Operand& dst, const Immediate& x) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
@@ -741,25 +818,29 @@
 
 void Assembler::cmpb(const Operand& op, int8_t imm8) {
   EnsureSpace ensure_space(this);
-  EMIT(0x80);
-  emit_operand(edi, op);  // edi == 7
+  if (op.is_reg(eax)) {
+    EMIT(0x3C);
+  } else {
+    EMIT(0x80);
+    emit_operand(edi, op);  // edi == 7
+  }
   EMIT(imm8);
 }
 
 
-void Assembler::cmpb(const Operand& dst, Register src) {
-  ASSERT(src.is_byte_register());
+void Assembler::cmpb(const Operand& op, Register reg) {
+  CHECK(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x38);
-  emit_operand(src, dst);
+  emit_operand(reg, op);
 }
 
 
-void Assembler::cmpb(Register dst, const Operand& src) {
-  ASSERT(dst.is_byte_register());
+void Assembler::cmpb(Register reg, const Operand& op) {
+  CHECK(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x3A);
-  emit_operand(dst, src);
+  emit_operand(reg, op);
 }
 
 
@@ -820,6 +901,7 @@
 
 
 void Assembler::dec_b(Register dst) {
+  CHECK(dst.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0xFE);
   EMIT(0xC8 | dst.code());
@@ -1069,18 +1151,6 @@
 }
 
 
-void Assembler::subb(const Operand& op, int8_t imm8) {
-  EnsureSpace ensure_space(this);
-  if (op.is_reg(eax)) {
-    EMIT(0x2c);
-  } else {
-    EMIT(0x80);
-    emit_operand(ebp, op);  // ebp == 5
-  }
-  EMIT(imm8);
-}
-
-
 void Assembler::sub(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
   emit_arith(5, dst, x);
@@ -1094,14 +1164,6 @@
 }
 
 
-void Assembler::subb(Register dst, const Operand& src) {
-  ASSERT(dst.code() < 4);
-  EnsureSpace ensure_space(this);
-  EMIT(0x2A);
-  emit_operand(dst, src);
-}
-
-
 void Assembler::sub(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   EMIT(0x29);
@@ -1113,7 +1175,9 @@
   EnsureSpace ensure_space(this);
   // Only use test against byte for registers that have a byte
   // variant: eax, ebx, ecx, and edx.
-  if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) {
+  if (imm.rmode_ == RelocInfo::NONE &&
+      is_uint8(imm.x_) &&
+      reg.is_byte_register()) {
     uint8_t imm8 = imm.x_;
     if (reg.is(eax)) {
       EMIT(0xA8);
@@ -1143,6 +1207,7 @@
 
 
 void Assembler::test_b(Register reg, const Operand& op) {
+  CHECK(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x84);
   emit_operand(reg, op);
@@ -1158,6 +1223,10 @@
 
 
 void Assembler::test_b(const Operand& op, uint8_t imm8) {
+  if (op.is_reg_only() && !op.reg().is_byte_register()) {
+    test(op, Immediate(imm8));
+    return;
+  }
   EnsureSpace ensure_space(this);
   EMIT(0xF6);
   emit_operand(eax, op);
@@ -1178,10 +1247,10 @@
 }
 
 
-void Assembler::xor_(const Operand& src, Register dst) {
+void Assembler::xor_(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   EMIT(0x31);
-  emit_operand(dst, src);
+  emit_operand(src, dst);
 }
 
 
@@ -1637,6 +1706,13 @@
 }
 
 
+void Assembler::fptan() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  EMIT(0xF2);
+}
+
+
 void Assembler::fyl2x() {
   EnsureSpace ensure_space(this);
   EMIT(0xD9);
@@ -1644,6 +1720,27 @@
 }
 
 
+void Assembler::f2xm1() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  EMIT(0xF0);
+}
+
+
+void Assembler::fscale() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  EMIT(0xFD);
+}
+
+
+void Assembler::fninit() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xDB);
+  EMIT(0xE3);
+}
+
+
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDC, 0xC0, i);
@@ -1957,6 +2054,16 @@
 }
 
 
+void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x2E);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
@@ -2162,6 +2269,19 @@
 }
 
 
+void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
+  ASSERT(CpuFeatures::IsSupported(SSE4_1));
+  ASSERT(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x3A);
+  EMIT(0x17);
+  emit_sse_operand(dst, src);
+  EMIT(imm8);
+}
+
+
 void Assembler::pand(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2341,7 +2461,7 @@
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
-  // Setup new buffer.
+  // Set up new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
   desc.instr_size = pc_offset();
   desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
@@ -2471,7 +2591,7 @@
       return;
     }
   }
-  RelocInfo rinfo(pc_, rmode, data);
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   reloc_info_writer.Write(&rinfo);
 }
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 4698e3e..929b485 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -75,6 +75,8 @@
   static inline Register FromAllocationIndex(int index);
 
   static Register from_code(int code) {
+    ASSERT(code >= 0);
+    ASSERT(code < kNumRegisters);
     Register r = { code };
     return r;
   }
@@ -95,16 +97,25 @@
   int code_;
 };
 
+const int kRegister_eax_Code = 0;
+const int kRegister_ecx_Code = 1;
+const int kRegister_edx_Code = 2;
+const int kRegister_ebx_Code = 3;
+const int kRegister_esp_Code = 4;
+const int kRegister_ebp_Code = 5;
+const int kRegister_esi_Code = 6;
+const int kRegister_edi_Code = 7;
+const int kRegister_no_reg_Code = -1;
 
-const Register eax = { 0 };
-const Register ecx = { 1 };
-const Register edx = { 2 };
-const Register ebx = { 3 };
-const Register esp = { 4 };
-const Register ebp = { 5 };
-const Register esi = { 6 };
-const Register edi = { 7 };
-const Register no_reg = { -1 };
+const Register eax = { kRegister_eax_Code };
+const Register ecx = { kRegister_ecx_Code };
+const Register edx = { kRegister_edx_Code };
+const Register ebx = { kRegister_ebx_Code };
+const Register esp = { kRegister_esp_Code };
+const Register ebp = { kRegister_ebp_Code };
+const Register esi = { kRegister_esi_Code };
+const Register edi = { kRegister_edi_Code };
+const Register no_reg = { kRegister_no_reg_Code };
 
 
 inline const char* Register::AllocationIndexToString(int index) {
@@ -300,9 +311,6 @@
 
 class Operand BASE_EMBEDDED {
  public:
-  // reg
-  INLINE(explicit Operand(Register reg));
-
   // XMM reg
   INLINE(explicit Operand(XMMRegister xmm_reg));
 
@@ -347,12 +355,16 @@
   // Returns true if this Operand is a wrapper for the specified register.
   bool is_reg(Register reg) const;
 
+  // Returns true if this Operand is a wrapper for one register.
+  bool is_reg_only() const;
+
+  // Asserts that this Operand is a wrapper for one register and returns the
+  // register.
+  Register reg() const;
+
  private:
-  byte buf_[6];
-  // The number of bytes in buf_.
-  unsigned int len_;
-  // Only valid if len_ > 4.
-  RelocInfo::Mode rmode_;
+  // reg
+  INLINE(explicit Operand(Register reg));
 
   // Set the ModRM byte without an encoded 'reg' register. The
   // register is encoded later as part of the emit_operand operation.
@@ -362,7 +374,15 @@
   inline void set_disp8(int8_t disp);
   inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
 
+  byte buf_[6];
+  // The number of bytes in buf_.
+  unsigned int len_;
+  // Only valid if len_ > 4.
+  RelocInfo::Mode rmode_;
+
   friend class Assembler;
+  friend class MacroAssembler;
+  friend class LCodeGen;
 };
 
 
@@ -578,8 +598,8 @@
 
   // This sets the branch destination (which is in the instruction on x86).
   // This is for calls and branches within generated code.
-  inline static void set_target_at(Address instruction_payload,
-                                   Address target) {
+  inline static void deserialization_set_special_target_at(
+      Address instruction_payload, Address target) {
     set_target_address_at(instruction_payload, target);
   }
 
@@ -590,8 +610,7 @@
     set_target_address_at(instruction_payload, target);
   }
 
-  static const int kCallTargetSize = kPointerSize;
-  static const int kExternalTargetSize = kPointerSize;
+  static const int kSpecialTargetSize = kPointerSize;
 
   // Distance between the address of the code target in the call instruction
   // and the return address
@@ -610,8 +629,6 @@
   // The debug break slot must be able to contain a call instruction.
   static const int kDebugBreakSlotLength = kCallInstructionLength;
 
-  // One byte opcode for test eax,0xXXXXXXXX.
-  static const byte kTestEaxByte = 0xA9;
   // One byte opcode for test al, 0xXX.
   static const byte kTestAlByte = 0xA8;
   // One byte opcode for nop.
@@ -648,6 +665,7 @@
   // possible to align the pc offset to a multiple
   // of m. m must be a power of 2.
   void Align(int m);
+  void Nop(int bytes = 1);
   // Aligns code to something that's optimal for a jump target for the platform.
   void CodeTargetAlign();
 
@@ -662,7 +680,6 @@
   void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
-  void push(Handle<Object> handle);
 
   void pop(Register dst);
   void pop(const Operand& dst);
@@ -671,7 +688,9 @@
   void leave();
 
   // Moves
+  void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
   void mov_b(Register dst, const Operand& src);
+  void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
   void mov_b(const Operand& dst, int8_t imm8);
   void mov_b(const Operand& dst, Register src);
 
@@ -687,17 +706,22 @@
   void mov(const Operand& dst, Handle<Object> handle);
   void mov(const Operand& dst, Register src);
 
+  void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
   void movsx_b(Register dst, const Operand& src);
 
+  void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
   void movsx_w(Register dst, const Operand& src);
 
+  void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
   void movzx_b(Register dst, const Operand& src);
 
+  void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
   void movzx_w(Register dst, const Operand& src);
 
   // Conditional moves
-  void cmov(Condition cc, Register dst, int32_t imm32);
-  void cmov(Condition cc, Register dst, Handle<Object> handle);
+  void cmov(Condition cc, Register dst, Register src) {
+    cmov(cc, dst, Operand(src));
+  }
   void cmov(Condition cc, Register dst, const Operand& src);
 
   // Flag management.
@@ -715,24 +739,31 @@
   void adc(Register dst, int32_t imm32);
   void adc(Register dst, const Operand& src);
 
+  void add(Register dst, Register src) { add(dst, Operand(src)); }
   void add(Register dst, const Operand& src);
+  void add(const Operand& dst, Register src);
+  void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
   void add(const Operand& dst, const Immediate& x);
 
   void and_(Register dst, int32_t imm32);
   void and_(Register dst, const Immediate& x);
+  void and_(Register dst, Register src) { and_(dst, Operand(src)); }
   void and_(Register dst, const Operand& src);
-  void and_(const Operand& src, Register dst);
+  void and_(const Operand& dst, Register src);
   void and_(const Operand& dst, const Immediate& x);
 
+  void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
   void cmpb(const Operand& op, int8_t imm8);
-  void cmpb(Register src, const Operand& dst);
-  void cmpb(const Operand& dst, Register src);
+  void cmpb(Register reg, const Operand& op);
+  void cmpb(const Operand& op, Register reg);
   void cmpb_al(const Operand& op);
   void cmpw_ax(const Operand& op);
   void cmpw(const Operand& op, Immediate imm16);
   void cmp(Register reg, int32_t imm32);
   void cmp(Register reg, Handle<Object> handle);
+  void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
   void cmp(Register reg, const Operand& op);
+  void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
   void cmp(const Operand& op, const Immediate& imm);
   void cmp(const Operand& op, Handle<Object> handle);
 
@@ -748,6 +779,7 @@
 
   // Signed multiply instructions.
   void imul(Register src);                               // edx:eax = eax * src.
+  void imul(Register dst, Register src) { imul(dst, Operand(src)); }
   void imul(Register dst, const Operand& src);           // dst = dst * src.
   void imul(Register dst, Register src, int32_t imm32);  // dst = src * imm32.
 
@@ -764,8 +796,10 @@
   void not_(Register dst);
 
   void or_(Register dst, int32_t imm32);
+  void or_(Register dst, Register src) { or_(dst, Operand(src)); }
   void or_(Register dst, const Operand& src);
   void or_(const Operand& dst, Register src);
+  void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
   void or_(const Operand& dst, const Immediate& x);
 
   void rcl(Register dst, uint8_t imm8);
@@ -776,35 +810,42 @@
 
   void sbb(Register dst, const Operand& src);
 
+  void shld(Register dst, Register src) { shld(dst, Operand(src)); }
   void shld(Register dst, const Operand& src);
 
   void shl(Register dst, uint8_t imm8);
   void shl_cl(Register dst);
 
+  void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
   void shrd(Register dst, const Operand& src);
 
   void shr(Register dst, uint8_t imm8);
   void shr_cl(Register dst);
 
-  void subb(const Operand& dst, int8_t imm8);
-  void subb(Register dst, const Operand& src);
+  void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
   void sub(const Operand& dst, const Immediate& x);
+  void sub(Register dst, Register src) { sub(dst, Operand(src)); }
   void sub(Register dst, const Operand& src);
   void sub(const Operand& dst, Register src);
 
   void test(Register reg, const Immediate& imm);
+  void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
   void test(Register reg, const Operand& op);
   void test_b(Register reg, const Operand& op);
   void test(const Operand& op, const Immediate& imm);
+  void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
   void test_b(const Operand& op, uint8_t imm8);
 
   void xor_(Register dst, int32_t imm32);
+  void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
   void xor_(Register dst, const Operand& src);
-  void xor_(const Operand& src, Register dst);
+  void xor_(const Operand& dst, Register src);
+  void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
   void xor_(const Operand& dst, const Immediate& x);
 
   // Bit operations.
   void bt(const Operand& dst, Register src);
+  void bts(Register dst, Register src) { bts(Operand(dst), src); }
   void bts(const Operand& dst, Register src);
 
   // Miscellaneous
@@ -835,6 +876,7 @@
   void call(Label* L);
   void call(byte* entry, RelocInfo::Mode rmode);
   int CallSize(const Operand& adr);
+  void call(Register reg) { call(Operand(reg)); }
   void call(const Operand& adr);
   int CallSize(Handle<Code> code, RelocInfo::Mode mode);
   void call(Handle<Code> code,
@@ -845,6 +887,7 @@
   // unconditional jump to L
   void jmp(Label* L, Label::Distance distance = Label::kFar);
   void jmp(byte* entry, RelocInfo::Mode rmode);
+  void jmp(Register reg) { jmp(Operand(reg)); }
   void jmp(const Operand& adr);
   void jmp(Handle<Code> code, RelocInfo::Mode rmode);
 
@@ -887,7 +930,11 @@
   void fchs();
   void fcos();
   void fsin();
+  void fptan();
   void fyl2x();
+  void f2xm1();
+  void fscale();
+  void fninit();
 
   void fadd(int i);
   void fsub(int i);
@@ -929,6 +976,7 @@
   void cvttss2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, const Operand& src);
 
+  void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
   void cvtsi2sd(XMMRegister dst, const Operand& src);
   void cvtss2sd(XMMRegister dst, XMMRegister src);
   void cvtsd2ss(XMMRegister dst, XMMRegister src);
@@ -944,6 +992,7 @@
   void andpd(XMMRegister dst, XMMRegister src);
 
   void ucomisd(XMMRegister dst, XMMRegister src);
+  void ucomisd(XMMRegister dst, const Operand& src);
 
   enum RoundingMode {
     kRoundToNearest = 0x0,
@@ -969,13 +1018,16 @@
   void movdbl(XMMRegister dst, const Operand& src);
   void movdbl(const Operand& dst, XMMRegister src);
 
+  void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
   void movd(XMMRegister dst, const Operand& src);
-  void movd(const Operand& src, XMMRegister dst);
+  void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
+  void movd(const Operand& dst, XMMRegister src);
   void movsd(XMMRegister dst, XMMRegister src);
 
   void movss(XMMRegister dst, const Operand& src);
-  void movss(const Operand& src, XMMRegister dst);
+  void movss(const Operand& dst, XMMRegister src);
   void movss(XMMRegister dst, XMMRegister src);
+  void extractps(Register dst, XMMRegister src, byte imm8);
 
   void pand(XMMRegister dst, XMMRegister src);
   void pxor(XMMRegister dst, XMMRegister src);
@@ -987,11 +1039,17 @@
   void psrlq(XMMRegister reg, int8_t shift);
   void psrlq(XMMRegister dst, XMMRegister src);
   void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
+  void pextrd(Register dst, XMMRegister src, int8_t offset) {
+    pextrd(Operand(dst), src, offset);
+  }
   void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+  void pinsrd(XMMRegister dst, Register src, int8_t offset) {
+    pinsrd(dst, Operand(src), offset);
+  }
   void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
 
   // Parallel XMM operations.
-  void movntdqa(XMMRegister src, const Operand& dst);
+  void movntdqa(XMMRegister dst, const Operand& src);
   void movntdq(const Operand& dst, XMMRegister src);
   // Prefetch src position into cache level.
   // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
@@ -1033,7 +1091,7 @@
   // Get the number of bytes available in the buffer.
   inline int available_space() const { return reloc_info_writer.pos() - pc_; }
 
-  static bool IsNop(Address addr) { return *addr == 0x90; }
+  static bool IsNop(Address addr);
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
@@ -1045,6 +1103,9 @@
   static const int kMaximalBufferSize = 512*MB;
   static const int kMinimalBufferSize = 4*KB;
 
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
  protected:
   bool emit_debug_code() const { return emit_debug_code_; }
 
@@ -1057,9 +1118,8 @@
 
   byte* addr_at(int pos) { return buffer_ + pos; }
 
+
  private:
-  byte byte_at(int pos)  { return buffer_[pos]; }
-  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
     return *reinterpret_cast<uint32_t*>(addr_at(pos));
   }
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 310ea3d..a5d42cf 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -69,309 +69,294 @@
 
   // JumpToExternalReference expects eax to contain the number of arguments
   // including the receiver and the extra arguments.
-  __ add(Operand(eax), Immediate(num_extra_args + 1));
+  __ add(eax, Immediate(num_extra_args + 1));
   __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
 }
 
 
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax: number of arguments
-  //  -- edi: constructor function
-  // -----------------------------------
-
-  Label non_function_call;
-  // Check that function is not a smi.
-  __ JumpIfSmi(edi, &non_function_call);
-  // Check that function is a JSFunction.
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &non_function_call);
-
-  // Jump to the function-specific construct stub.
-  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
-  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
-  __ jmp(Operand(ebx));
-
-  // edi: called object
-  // eax: number of arguments
-  __ bind(&non_function_call);
-  // Set expected number of arguments to zero (not changing eax).
-  __ Set(ebx, Immediate(0));
-  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-  Handle<Code> arguments_adaptor =
-      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
-  __ SetCallKind(ecx, CALL_AS_METHOD);
-  __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
-}
-
-
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
                                            bool count_constructions) {
+  // ----------- S t a t e -------------
+  //  -- eax: number of arguments
+  //  -- edi: constructor function
+  // -----------------------------------
+
   // Should never count constructions for api objects.
   ASSERT(!is_api_function || !count_constructions);
 
   // Enter a construct frame.
-  __ EnterConstructFrame();
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Store a smi-tagged arguments count on the stack.
-  __ SmiTag(eax);
-  __ push(eax);
+    // Store a smi-tagged arguments count on the stack.
+    __ SmiTag(eax);
+    __ push(eax);
 
-  // Push the function to invoke on the stack.
-  __ push(edi);
+    // Push the function to invoke on the stack.
+    __ push(edi);
 
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  Label rt_call, allocated;
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    Label rt_call, allocated;
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(masm->isolate());
-    __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
-    __ j(not_equal, &rt_call);
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(masm->isolate());
+      __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+      __ j(not_equal, &rt_call);
 #endif
 
-    // Verified that the constructor is a JSFunction.
-    // Load the initial map and verify that it is in fact a map.
-    // edi: constructor
-    __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi
-    __ JumpIfSmi(eax, &rt_call);
-    // edi: constructor
-    // eax: initial map (if proven valid below)
-    __ CmpObjectType(eax, MAP_TYPE, ebx);
-    __ j(not_equal, &rt_call);
+      // Verified that the constructor is a JSFunction.
+      // Load the initial map and verify that it is in fact a map.
+      // edi: constructor
+      __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+      // Will both indicate a NULL and a Smi
+      __ JumpIfSmi(eax, &rt_call);
+      // edi: constructor
+      // eax: initial map (if proven valid below)
+      __ CmpObjectType(eax, MAP_TYPE, ebx);
+      __ j(not_equal, &rt_call);
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // edi: constructor
-    // eax: initial map
-    __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
-    __ j(equal, &rt_call);
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // edi: constructor
+      // eax: initial map
+      __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+      __ j(equal, &rt_call);
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-      __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
-      __ j(not_zero, &allocate);
-
-      __ push(eax);
-      __ push(edi);
-
-      __ push(edi);  // constructor
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(edi);
-      __ pop(eax);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    // edi: constructor
-    // eax: initial map
-    __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
-    __ shl(edi, kPointerSizeLog2);
-    __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-    // Allocated the JSObject, now initialize the fields.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    __ mov(Operand(ebx, JSObject::kMapOffset), eax);
-    Factory* factory = masm->isolate()->factory();
-    __ mov(ecx, factory->empty_fixed_array());
-    __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
-    __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
-    // Set extra fields in the newly allocated object.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    { Label loop, entry;
-      // To allow for truncation.
       if (count_constructions) {
-        __ mov(edx, factory->one_pointer_filler_map());
-      } else {
-        __ mov(edx, factory->undefined_value());
+        Label allocate;
+        // Decrease generous allocation count.
+        __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+        __ dec_b(FieldOperand(ecx,
+                              SharedFunctionInfo::kConstructionCountOffset));
+        __ j(not_zero, &allocate);
+
+        __ push(eax);
+        __ push(edi);
+
+        __ push(edi);  // constructor
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(edi);
+        __ pop(eax);
+
+        __ bind(&allocate);
       }
+
+      // Now allocate the JSObject on the heap.
+      // edi: constructor
+      // eax: initial map
+      __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+      __ shl(edi, kPointerSizeLog2);
+      __ AllocateInNewSpace(
+          edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+      // Allocated the JSObject, now initialize the fields.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
+      __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+      Factory* factory = masm->isolate()->factory();
+      __ mov(ecx, factory->empty_fixed_array());
+      __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+      __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+      // Set extra fields in the newly allocated object.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
       __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ mov(Operand(ecx, 0), edx);
-      __ add(Operand(ecx), Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmp(ecx, Operand(edi));
-      __ j(less, &loop);
-    }
-
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    __ or_(Operand(ebx), Immediate(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed.
-    // Allocate and initialize a FixedArray if it is.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    // Calculate the total number of properties described by the map.
-    __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
-    __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
-    __ add(edx, Operand(ecx));
-    // Calculate unused properties past the end of the in-object properties.
-    __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
-    __ sub(edx, Operand(ecx));
-    // Done if no extra properties are to be allocated.
-    __ j(zero, &allocated);
-    __ Assert(positive, "Property allocation count failed.");
-
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // ebx: JSObject
-    // edi: start of next object (will be start of FixedArray)
-    // edx: number of elements in properties array
-    __ AllocateInNewSpace(FixedArray::kHeaderSize,
-                          times_pointer_size,
-                          edx,
-                          edi,
-                          ecx,
-                          no_reg,
-                          &undo_allocation,
-                          RESULT_CONTAINS_TOP);
-
-    // Initialize the FixedArray.
-    // ebx: JSObject
-    // edi: FixedArray
-    // edx: number of elements
-    // ecx: start of next object
-    __ mov(eax, factory->fixed_array_map());
-    __ mov(Operand(edi, FixedArray::kMapOffset), eax);  // setup the map
-    __ SmiTag(edx);
-    __ mov(Operand(edi, FixedArray::kLengthOffset), edx);  // and length
-
-    // Initialize the fields to undefined.
-    // ebx: JSObject
-    // edi: FixedArray
-    // ecx: start of next object
-    { Label loop, entry;
       __ mov(edx, factory->undefined_value());
-      __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ mov(Operand(eax, 0), edx);
-      __ add(Operand(eax), Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmp(eax, Operand(ecx));
-      __ j(below, &loop);
+      if (count_constructions) {
+        __ movzx_b(esi,
+                   FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+        __ lea(esi,
+               Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
+        // esi: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ cmp(esi, edi);
+          __ Assert(less_equal,
+                    "Unexpected number of pre-allocated property fields.");
+        }
+        __ InitializeFieldsWithFiller(ecx, esi, edx);
+        __ mov(edx, factory->one_pointer_filler_map());
+      }
+      __ InitializeFieldsWithFiller(ecx, edi, edx);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
+      __ or_(ebx, Immediate(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed.
+      // Allocate and initialize a FixedArray if it is.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
+      // Calculate the total number of properties described by the map.
+      __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+      __ movzx_b(ecx,
+                 FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+      __ add(edx, ecx);
+      // Calculate unused properties past the end of the in-object properties.
+      __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+      __ sub(edx, ecx);
+      // Done if no extra properties are to be allocated.
+      __ j(zero, &allocated);
+      __ Assert(positive, "Property allocation count failed.");
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // ebx: JSObject
+      // edi: start of next object (will be start of FixedArray)
+      // edx: number of elements in properties array
+      __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                            times_pointer_size,
+                            edx,
+                            edi,
+                            ecx,
+                            no_reg,
+                            &undo_allocation,
+                            RESULT_CONTAINS_TOP);
+
+      // Initialize the FixedArray.
+      // ebx: JSObject
+      // edi: FixedArray
+      // edx: number of elements
+      // ecx: start of next object
+      __ mov(eax, factory->fixed_array_map());
+      __ mov(Operand(edi, FixedArray::kMapOffset), eax);  // setup the map
+      __ SmiTag(edx);
+      __ mov(Operand(edi, FixedArray::kLengthOffset), edx);  // and length
+
+      // Initialize the fields to undefined.
+      // ebx: JSObject
+      // edi: FixedArray
+      // ecx: start of next object
+      { Label loop, entry;
+        __ mov(edx, factory->undefined_value());
+        __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ mov(Operand(eax, 0), edx);
+        __ add(eax, Immediate(kPointerSize));
+        __ bind(&entry);
+        __ cmp(eax, ecx);
+        __ j(below, &loop);
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject
+      // ebx: JSObject
+      // edi: FixedArray
+      __ or_(edi, Immediate(kHeapObjectTag));  // add the heap tag
+      __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+
+
+      // Continue with JSObject being successfully allocated
+      // ebx: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // ebx: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(ebx);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject
-    // ebx: JSObject
-    // edi: FixedArray
-    __ or_(Operand(edi), Immediate(kHeapObjectTag));  // add the heap tag
-    __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+    // Allocate the new receiver object using the runtime call.
+    __ bind(&rt_call);
+    // Must restore edi (constructor) before calling runtime.
+    __ mov(edi, Operand(esp, 0));
+    // edi: function (constructor)
+    __ push(edi);
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ mov(ebx, eax);  // store result in ebx
 
+    // New object allocated.
+    // ebx: newly allocated object
+    __ bind(&allocated);
+    // Retrieve the function from the stack.
+    __ pop(edi);
 
-    // Continue with JSObject being successfully allocated
-    // ebx: JSObject
-    __ jmp(&allocated);
+    // Retrieve smi-tagged arguments count from the stack.
+    __ mov(eax, Operand(esp, 0));
+    __ SmiUntag(eax);
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // ebx: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(ebx);
+    // Push the allocated receiver to the stack. We need two copies
+    // because we may have to return the original one and the calling
+    // conventions dictate that the called function pops the receiver.
+    __ push(ebx);
+    __ push(ebx);
+
+    // Set up pointer to last argument.
+    __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    Label loop, entry;
+    __ mov(ecx, eax);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ push(Operand(ebx, ecx, times_4, 0));
+    __ bind(&entry);
+    __ dec(ecx);
+    __ j(greater_equal, &loop);
+
+    // Call the function.
+    if (is_api_function) {
+      __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                    CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(eax);
+      __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Store offset of return address for deoptimizer.
+    if (!is_api_function && !count_constructions) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    __ JumpIfSmi(eax, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+    __ j(above_equal, &exit);
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ mov(eax, Operand(esp, 0));
+
+    // Restore the arguments count and leave the construct frame.
+    __ bind(&exit);
+    __ mov(ebx, Operand(esp, kPointerSize));  // Get arguments count.
+
+    // Leave construct frame.
   }
 
-  // Allocate the new receiver object using the runtime call.
-  __ bind(&rt_call);
-  // Must restore edi (constructor) before calling runtime.
-  __ mov(edi, Operand(esp, 0));
-  // edi: function (constructor)
-  __ push(edi);
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ mov(ebx, Operand(eax));  // store result in ebx
-
-  // New object allocated.
-  // ebx: newly allocated object
-  __ bind(&allocated);
-  // Retrieve the function from the stack.
-  __ pop(edi);
-
-  // Retrieve smi-tagged arguments count from the stack.
-  __ mov(eax, Operand(esp, 0));
-  __ SmiUntag(eax);
-
-  // Push the allocated receiver to the stack. We need two copies
-  // because we may have to return the original one and the calling
-  // conventions dictate that the called function pops the receiver.
-  __ push(ebx);
-  __ push(ebx);
-
-  // Setup pointer to last argument.
-  __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
-  // Copy arguments and receiver to the expression stack.
-  Label loop, entry;
-  __ mov(ecx, Operand(eax));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ push(Operand(ebx, ecx, times_4, 0));
-  __ bind(&entry);
-  __ dec(ecx);
-  __ j(greater_equal, &loop);
-
-  // Call the function.
-  if (is_api_function) {
-    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
-                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Restore context from the frame.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  __ JumpIfSmi(eax, &use_receiver);
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-  __ j(above_equal, &exit);
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ mov(eax, Operand(esp, 0));
-
-  // Restore the arguments count and leave the construct frame.
-  __ bind(&exit);
-  __ mov(ebx, Operand(esp, kPointerSize));  // get arguments count
-  __ LeaveConstructFrame();
-
   // Remove caller arguments from the stack and return.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ pop(ecx);
@@ -399,57 +384,58 @@
 
 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
                                              bool is_construct) {
-  // Clear the context before we push it when entering the JS frame.
+  // Clear the context before we push it when entering the internal frame.
   __ Set(esi, Immediate(0));
 
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Load the previous frame pointer (ebx) to access C arguments
-  __ mov(ebx, Operand(ebp, 0));
+    // Load the previous frame pointer (ebx) to access C arguments
+    __ mov(ebx, Operand(ebp, 0));
 
-  // Get the function from the frame and setup the context.
-  __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
-  __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
+    // Get the function from the frame and setup the context.
+    __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+    __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
 
-  // Push the function and the receiver onto the stack.
-  __ push(ecx);
-  __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+    // Push the function and the receiver onto the stack.
+    __ push(ecx);
+    __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
 
-  // Load the number of arguments and setup pointer to the arguments.
-  __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
-  __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+    // Load the number of arguments and setup pointer to the arguments.
+    __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+    __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
 
-  // Copy arguments to the stack in a loop.
-  Label loop, entry;
-  __ Set(ecx, Immediate(0));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
-  __ push(Operand(edx, 0));  // dereference handle
-  __ inc(Operand(ecx));
-  __ bind(&entry);
-  __ cmp(ecx, Operand(eax));
-  __ j(not_equal, &loop);
+    // Copy arguments to the stack in a loop.
+    Label loop, entry;
+    __ Set(ecx, Immediate(0));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
+    __ push(Operand(edx, 0));  // dereference handle
+    __ inc(ecx);
+    __ bind(&entry);
+    __ cmp(ecx, eax);
+    __ j(not_equal, &loop);
 
-  // Get the function from the stack and call it.
-  __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));  // +1 ~ receiver
+    // Get the function from the stack and call it.
+    // kPointerSize for the receiver.
+    __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
 
-  // Invoke the code.
-  if (is_construct) {
-    __ call(masm->isolate()->builtins()->JSConstructCall(),
-            RelocInfo::CODE_TARGET);
-  } else {
-    ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the code.
+    if (is_construct) {
+      CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+      __ CallStub(&stub);
+    } else {
+      ParameterCount actual(eax);
+      __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Exit the internal frame. Notice that this also removes the empty.
+    // context and the function left on the stack by the code
+    // invocation.
   }
-
-  // Exit the JS frame. Notice that this also removes the empty
-  // context and the function left on the stack by the code
-  // invocation.
-  __ LeaveInternalFrame();
-  __ ret(1 * kPointerSize);  // remove receiver
+  __ ret(kPointerSize);  // Remove receiver.
 }
 
 
@@ -464,68 +450,68 @@
 
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function.
-  __ push(edi);
-  // Push call kind information.
-  __ push(ecx);
+    // Push a copy of the function.
+    __ push(edi);
+    // Push call kind information.
+    __ push(ecx);
 
-  __ push(edi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyCompile, 1);
+    __ push(edi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyCompile, 1);
 
-  // Restore call kind information.
-  __ pop(ecx);
-  // Restore receiver.
-  __ pop(edi);
+    // Restore call kind information.
+    __ pop(ecx);
+    // Restore receiver.
+    __ pop(edi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(Operand(eax));
+  __ jmp(eax);
 }
 
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function onto the stack.
-  __ push(edi);
-  // Push call kind information.
-  __ push(ecx);
+    // Push a copy of the function onto the stack.
+    __ push(edi);
+    // Push call kind information.
+    __ push(ecx);
 
-  __ push(edi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
+    __ push(edi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-  // Restore call kind information.
-  __ pop(ecx);
-  // Restore receiver.
-  __ pop(edi);
+    // Restore call kind information.
+    __ pop(ecx);
+    // Restore receiver.
+    __ pop(edi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(Operand(eax));
+  __ jmp(eax);
 }
 
 
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Pass the function and deoptimization type to the runtime system.
-  __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
-  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+    // Pass deoptimization type to the runtime system.
+    __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Get the full codegen state from the stack and untag it.
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
@@ -566,9 +552,10 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ pushad();
-  __ EnterInternalFrame();
-  __ CallRuntime(Runtime::kNotifyOSR, 0);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
   __ popad();
   __ ret(0);
 }
@@ -579,7 +566,7 @@
 
   // 1. Make sure we have at least one argument.
   { Label done;
-    __ test(eax, Operand(eax));
+    __ test(eax, eax);
     __ j(not_zero, &done);
     __ pop(ebx);
     __ push(Immediate(factory->undefined_value()));
@@ -631,18 +618,21 @@
     __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ SmiTag(eax);
-    __ push(eax);
 
-    __ push(ebx);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(ebx, eax);
-    __ Set(edx, Immediate(0));  // restore
+    { // In order to preserve argument count.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(eax);
+      __ push(eax);
 
-    __ pop(eax);
-    __ SmiUntag(eax);
-    __ LeaveInternalFrame();
+      __ push(ebx);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mov(ebx, eax);
+      __ Set(edx, Immediate(0));  // restore
+
+      __ pop(eax);
+      __ SmiUntag(eax);
+    }
+
     // Restore the function to edi.
     __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
     __ jmp(&patch_receiver);
@@ -695,22 +685,23 @@
   // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
   //     or a function proxy via CALL_FUNCTION_PROXY.
   { Label function, non_proxy;
-    __ test(edx, Operand(edx));
+    __ test(edx, edx);
     __ j(zero, &function);
     __ Set(ebx, Immediate(0));
-    __ SetCallKind(ecx, CALL_AS_METHOD);
-    __ cmp(Operand(edx), Immediate(1));
+    __ cmp(edx, Immediate(1));
     __ j(not_equal, &non_proxy);
 
     __ pop(edx);   // return address
     __ push(edi);  // re-add proxy object as additional argument
     __ push(edx);
     __ inc(eax);
+    __ SetCallKind(ecx, CALL_AS_FUNCTION);
     __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
     __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
            RelocInfo::CODE_TARGET);
 
     __ bind(&non_proxy);
+    __ SetCallKind(ecx, CALL_AS_METHOD);
     __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
     __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
            RelocInfo::CODE_TARGET);
@@ -726,13 +717,13 @@
   __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
   __ SmiUntag(ebx);
   __ SetCallKind(ecx, CALL_AS_METHOD);
-  __ cmp(eax, Operand(ebx));
+  __ cmp(eax, ebx);
   __ j(not_equal,
        masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
 
   ParameterCount expected(0);
-  __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
-                NullCallWrapper(), CALL_AS_METHOD);
+  __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
+                CALL_AS_METHOD);
 }
 
 
@@ -740,163 +731,160 @@
   static const int kArgumentsOffset = 2 * kPointerSize;
   static const int kReceiverOffset = 3 * kPointerSize;
   static const int kFunctionOffset = 4 * kPointerSize;
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
 
-  __ EnterInternalFrame();
+    __ push(Operand(ebp, kFunctionOffset));  // push this
+    __ push(Operand(ebp, kArgumentsOffset));  // push arguments
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  __ push(Operand(ebp, kFunctionOffset));  // push this
-  __ push(Operand(ebp, kArgumentsOffset));  // push arguments
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    ExternalReference real_stack_limit =
+        ExternalReference::address_of_real_stack_limit(masm->isolate());
+    __ mov(edi, Operand::StaticVariable(real_stack_limit));
+    // Make ecx the space we have left. The stack might already be overflowed
+    // here which will cause ecx to become negative.
+    __ mov(ecx, esp);
+    __ sub(ecx, edi);
+    // Make edx the space we need for the array when it is unrolled onto the
+    // stack.
+    __ mov(edx, eax);
+    __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+    // Check if the arguments will overflow the stack.
+    __ cmp(ecx, edx);
+    __ j(greater, &okay);  // Signed comparison.
 
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  ExternalReference real_stack_limit =
-      ExternalReference::address_of_real_stack_limit(masm->isolate());
-  __ mov(edi, Operand::StaticVariable(real_stack_limit));
-  // Make ecx the space we have left. The stack might already be overflowed
-  // here which will cause ecx to become negative.
-  __ mov(ecx, Operand(esp));
-  __ sub(ecx, Operand(edi));
-  // Make edx the space we need for the array when it is unrolled onto the
-  // stack.
-  __ mov(edx, Operand(eax));
-  __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
-  // Check if the arguments will overflow the stack.
-  __ cmp(ecx, Operand(edx));
-  __ j(greater, &okay);  // Signed comparison.
+    // Out of stack space.
+    __ push(Operand(ebp, 4 * kPointerSize));  // push this
+    __ push(eax);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    __ bind(&okay);
+    // End of stack check.
 
-  // Out of stack space.
-  __ push(Operand(ebp, 4 * kPointerSize));  // push this
-  __ push(eax);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  __ bind(&okay);
-  // End of stack check.
+    // Push current index and limit.
+    const int kLimitOffset =
+        StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+    const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+    __ push(eax);  // limit
+    __ push(Immediate(0));  // index
 
-  // Push current index and limit.
-  const int kLimitOffset =
-      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
-  __ push(eax);  // limit
-  __ push(Immediate(0));  // index
+    // Get the receiver.
+    __ mov(ebx, Operand(ebp, kReceiverOffset));
 
-  // Get the receiver.
-  __ mov(ebx, Operand(ebp, kReceiverOffset));
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ mov(edi, Operand(ebp, kFunctionOffset));
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+    __ j(not_equal, &push_receiver);
 
-  // Check that the function is a JS function (otherwise it must be a proxy).
-  Label push_receiver;
-  __ mov(edi, Operand(ebp, kFunctionOffset));
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &push_receiver);
+    // Change context eagerly to get the right global object if necessary.
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
-  // Change context eagerly to get the right global object if necessary.
-  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+    __ j(not_equal, &push_receiver);
 
-  // Compute the receiver.
-  // Do not transform the receiver for strict mode functions.
-  Label call_to_object, use_global_receiver;
-  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
-            1 << SharedFunctionInfo::kStrictModeBitWithinByte);
-  __ j(not_equal, &push_receiver);
+    Factory* factory = masm->isolate()->factory();
 
-  Factory* factory = masm->isolate()->factory();
+    // Do not transform the receiver for natives (shared already in ecx).
+    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+              1 << SharedFunctionInfo::kNativeBitWithinByte);
+    __ j(not_equal, &push_receiver);
 
-  // Do not transform the receiver for natives (shared already in ecx).
-  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
-            1 << SharedFunctionInfo::kNativeBitWithinByte);
-  __ j(not_equal, &push_receiver);
+    // Compute the receiver in non-strict mode.
+    // Call ToObject on the receiver if it is not an object, or use the
+    // global object if it is null or undefined.
+    __ JumpIfSmi(ebx, &call_to_object);
+    __ cmp(ebx, factory->null_value());
+    __ j(equal, &use_global_receiver);
+    __ cmp(ebx, factory->undefined_value());
+    __ j(equal, &use_global_receiver);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+    __ j(above_equal, &push_receiver);
 
-  // Compute the receiver in non-strict mode.
-  // Call ToObject on the receiver if it is not an object, or use the
-  // global object if it is null or undefined.
-  __ JumpIfSmi(ebx, &call_to_object);
-  __ cmp(ebx, factory->null_value());
-  __ j(equal, &use_global_receiver);
-  __ cmp(ebx, factory->undefined_value());
-  __ j(equal, &use_global_receiver);
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
-  __ j(above_equal, &push_receiver);
+    __ bind(&call_to_object);
+    __ push(ebx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(ebx, eax);
+    __ jmp(&push_receiver);
 
-  __ bind(&call_to_object);
-  __ push(ebx);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ mov(ebx, Operand(eax));
-  __ jmp(&push_receiver);
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ mov(ebx, FieldOperand(esi, kGlobalOffset));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+    __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ mov(ebx, FieldOperand(esi, kGlobalOffset));
-  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
-  __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
-  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    // Push the receiver.
+    __ bind(&push_receiver);
+    __ push(ebx);
 
-  // Push the receiver.
-  __ bind(&push_receiver);
-  __ push(ebx);
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ mov(eax, Operand(ebp, kIndexOffset));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ mov(eax, Operand(ebp, kIndexOffset));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
+    // Use inline caching to speed up access to arguments.
+    Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // It is important that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to indicate that
+    // we have generated an inline version of the keyed load.  In this
+    // case, we know that we are not generating a test instruction next.
 
-  // Use inline caching to speed up access to arguments.
-  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // It is important that we do not have a test instruction after the
-  // call.  A test instruction after the call is used to indicate that
-  // we have generated an inline version of the keyed load.  In this
-  // case, we know that we are not generating a test instruction next.
+    // Push the nth argument.
+    __ push(eax);
 
-  // Push the nth argument.
-  __ push(eax);
+    // Update the index on the stack and in register eax.
+    __ mov(eax, Operand(ebp, kIndexOffset));
+    __ add(eax, Immediate(1 << kSmiTagSize));
+    __ mov(Operand(ebp, kIndexOffset), eax);
 
-  // Update the index on the stack and in register eax.
-  __ mov(eax, Operand(ebp, kIndexOffset));
-  __ add(Operand(eax), Immediate(1 << kSmiTagSize));
-  __ mov(Operand(ebp, kIndexOffset), eax);
+    __ bind(&entry);
+    __ cmp(eax, Operand(ebp, kLimitOffset));
+    __ j(not_equal, &loop);
 
-  __ bind(&entry);
-  __ cmp(eax, Operand(ebp, kLimitOffset));
-  __ j(not_equal, &loop);
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(eax);
+    __ SmiUntag(eax);
+    __ mov(edi, Operand(ebp, kFunctionOffset));
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+    __ j(not_equal, &call_proxy);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
 
-  // Invoke the function.
-  Label call_proxy;
-  ParameterCount actual(eax);
-  __ SmiUntag(eax);
-  __ mov(edi, Operand(ebp, kFunctionOffset));
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &call_proxy);
-  __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    frame_scope.GenerateLeaveFrame();
+    __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 
-  __ LeaveInternalFrame();
-  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(edi);  // add function proxy as last argument
+    __ inc(eax);
+    __ Set(ebx, Immediate(0));
+    __ SetCallKind(ecx, CALL_AS_METHOD);
+    __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+    __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
 
-  // Invoke the function proxy.
-  __ bind(&call_proxy);
-  __ push(edi);  // add function proxy as last argument
-  __ inc(eax);
-  __ Set(ebx, Immediate(0));
-  __ SetCallKind(ecx, CALL_AS_METHOD);
-  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
-  __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
-
-  __ LeaveInternalFrame();
+    // Leave internal frame.
+  }
   __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 }
 
 
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
-
-
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. If the parameter initial_capacity is larger than zero an elements
 // backing store is allocated with this size and filled with the hole values.
@@ -907,13 +895,11 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
-                                 int initial_capacity,
                                  Label* gc_required) {
-  ASSERT(initial_capacity >= 0);
+  const int initial_capacity = JSArray::kPreallocatedArrayElements;
+  STATIC_ASSERT(initial_capacity >= 0);
 
-  // Load the initial map from the array function.
-  __ mov(scratch1, FieldOperand(array_function,
-                                JSFunction::kPrototypeOrInitialMapOffset));
+  __ LoadInitialArrayMap(array_function, scratch2, scratch1);
 
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
@@ -968,7 +954,6 @@
   // Fill the FixedArray with the hole value. Inline the code if short.
   // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
   static const int kLoopUnfoldLimit = 4;
-  STATIC_ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
   if (initial_capacity <= kLoopUnfoldLimit) {
     // Use a scratch register here to have only one reloc info when unfolding
     // the loop.
@@ -980,13 +965,17 @@
     }
   } else {
     Label loop, entry;
+    __ mov(scratch2, Immediate(initial_capacity));
     __ jmp(&entry);
     __ bind(&loop);
-    __ mov(Operand(scratch1, 0), factory->the_hole_value());
-    __ add(Operand(scratch1), Immediate(kPointerSize));
+    __ mov(FieldOperand(scratch1,
+                        scratch2,
+                        times_pointer_size,
+                        FixedArray::kHeaderSize),
+           factory->the_hole_value());
     __ bind(&entry);
-    __ cmp(scratch1, Operand(scratch2));
-    __ j(below, &loop);
+    __ dec(scratch2);
+    __ j(not_sign, &loop);
   }
 }
 
@@ -1013,10 +1002,7 @@
   ASSERT(!fill_with_hole || array_size.is(ecx));  // rep stos count
   ASSERT(!fill_with_hole || !result.is(eax));  // result is never eax
 
-  // Load the initial map from the array function.
-  __ mov(elements_array,
-         FieldOperand(array_function,
-                      JSFunction::kPrototypeOrInitialMapOffset));
+  __ LoadInitialArrayMap(array_function, scratch, elements_array);
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested elements.
@@ -1082,7 +1068,7 @@
     __ bind(&loop);
     __ stos();
     __ bind(&entry);
-    __ cmp(edi, Operand(elements_array_end));
+    __ cmp(edi, elements_array_end);
     __ j(below, &loop);
     __ bind(&done);
   }
@@ -1107,7 +1093,7 @@
                             bool construct_call,
                             Label* call_generic_code) {
   Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
-        empty_array, not_empty_array;
+      empty_array, not_empty_array, finish, cant_transition_map, not_double;
 
   // Push the constructor and argc. No need to tag argc as a smi, as there will
   // be no garbage collection with this on the stack.
@@ -1120,7 +1106,7 @@
   __ push(eax);
 
   // Check for array construction with zero arguments.
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(not_zero, &argc_one_or_more);
 
   __ bind(&empty_array);
@@ -1131,7 +1117,6 @@
                        ebx,
                        ecx,
                        edi,
-                       kPreallocatedArrayElements,
                        &prepare_generic_code_call);
   __ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
   __ pop(ebx);
@@ -1147,7 +1132,7 @@
   __ j(not_equal, &argc_two_or_more);
   STATIC_ASSERT(kSmiTag == 0);
   __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(not_zero, &not_empty_array);
 
   // The single argument passed is zero, so we jump to the code above used to
@@ -1160,7 +1145,7 @@
     __ mov(eax, Operand(esp, i * kPointerSize));
     __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
   }
-  __ add(Operand(esp), Immediate(2 * kPointerSize));  // Drop two stack slots.
+  __ Drop(2);  // Drop two stack slots.
   __ push(Immediate(0));  // Treat this as a call with argc of zero.
   __ jmp(&empty_array);
 
@@ -1218,39 +1203,44 @@
                   false,
                   &prepare_generic_code_call);
   __ IncrementCounter(counters->array_function_native(), 1);
-  __ mov(eax, ebx);
-  __ pop(ebx);
-  if (construct_call) {
-    __ pop(edi);
-  }
-  __ push(eax);
-  // eax: JSArray
+  __ push(ebx);
+  __ mov(ebx, Operand(esp, kPointerSize));
   // ebx: argc
   // edx: elements_array_end (untagged)
   // esp[0]: JSArray
-  // esp[4]: return address
-  // esp[8]: last argument
+  // esp[4]: argc
+  // esp[8]: constructor (only if construct_call)
+  // esp[12]: return address
+  // esp[16]: last argument
 
   // Location of the last argument
-  __ lea(edi, Operand(esp, 2 * kPointerSize));
+  int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
+  __ lea(edi, Operand(esp, last_arg_offset));
 
   // Location of the first array element (Parameter fill_with_holes to
-  // AllocateJSArrayis false, so the FixedArray is returned in ecx).
+  // AllocateJSArray is false, so the FixedArray is returned in ecx).
   __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
 
+  Label has_non_smi_element;
+
   // ebx: argc
   // edx: location of the first array element
   // edi: location of the last argument
   // esp[0]: JSArray
-  // esp[4]: return address
-  // esp[8]: last argument
+  // esp[4]: argc
+  // esp[8]: constructor (only if construct_call)
+  // esp[12]: return address
+  // esp[16]: last argument
   Label loop, entry;
   __ mov(ecx, ebx);
   __ jmp(&entry);
   __ bind(&loop);
   __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+  if (FLAG_smi_only_arrays) {
+    __ JumpIfNotSmi(eax, &has_non_smi_element);
+  }
   __ mov(Operand(edx, 0), eax);
-  __ add(Operand(edx), Immediate(kPointerSize));
+  __ add(edx, Immediate(kPointerSize));
   __ bind(&entry);
   __ dec(ecx);
   __ j(greater_equal, &loop);
@@ -1258,13 +1248,56 @@
   // Remove caller arguments from the stack and return.
   // ebx: argc
   // esp[0]: JSArray
-  // esp[4]: return address
-  // esp[8]: last argument
+  // esp[4]: argc
+  // esp[8]: constructor (only if construct_call)
+  // esp[12]: return address
+  // esp[16]: last argument
+  __ bind(&finish);
+  __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
   __ pop(eax);
-  __ pop(ecx);
-  __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
-  __ push(ecx);
-  __ ret(0);
+  __ pop(ebx);
+  __ lea(esp, Operand(esp, ebx, times_pointer_size,
+                      last_arg_offset - kPointerSize));
+  __ jmp(ecx);
+
+  __ bind(&has_non_smi_element);
+  // Double values are handled by the runtime.
+  __ CheckMap(eax,
+              masm->isolate()->factory()->heap_number_map(),
+              &not_double,
+              DONT_DO_SMI_CHECK);
+  __ bind(&cant_transition_map);
+  // Throw away the array that's only been partially constructed.
+  __ pop(eax);
+  __ UndoAllocationInNewSpace(eax);
+  __ jmp(&prepare_generic_code_call);
+
+  __ bind(&not_double);
+  // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+  __ mov(ebx, Operand(esp, 0));
+  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ LoadTransitionedArrayMapConditional(
+      FAST_SMI_ONLY_ELEMENTS,
+      FAST_ELEMENTS,
+      edi,
+      eax,
+      &cant_transition_map);
+  __ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
+  __ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
+                      kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+  // Prepare to re-enter the loop
+  __ lea(edi, Operand(esp, last_arg_offset));
+
+  // Finish the array initialization loop.
+  Label loop2;
+  __ bind(&loop2);
+  __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+  __ mov(Operand(edx, 0), eax);
+  __ add(edx, Immediate(kPointerSize));
+  __ dec(ecx);
+  __ j(greater_equal, &loop2);
+  __ jmp(&finish);
 
   // Restore argc and constructor before running the generic code.
   __ bind(&prepare_generic_code_call);
@@ -1276,6 +1309,40 @@
 }
 
 
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the InternalArray function.
+  __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray function should be a map.
+    __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ Assert(not_zero, "Unexpected initial map for InternalArray function");
+    __ CmpObjectType(ebx, MAP_TYPE, ecx);
+    __ Assert(equal, "Unexpected initial map for InternalArray function");
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  ArrayNativeCode(masm, false, &generic_array_code);
+
+  // Jump to the generic internal array code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_array_code);
+  Handle<Code> array_code =
+      masm->isolate()->builtins()->InternalArrayCodeGeneric();
+  __ jmp(array_code, RelocInfo::CODE_TARGET);
+}
+
+
 void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : argc
@@ -1288,7 +1355,7 @@
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
 
   if (FLAG_debug_code) {
-    // Initial map for the builtin Array function shoud be a map.
+    // Initial map for the builtin Array function should be a map.
     __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi.
     __ test(ebx, Immediate(kSmiTagMask));
@@ -1356,14 +1423,14 @@
 
   if (FLAG_debug_code) {
     __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
-    __ cmp(edi, Operand(ecx));
+    __ cmp(edi, ecx);
     __ Assert(equal, "Unexpected String function");
   }
 
   // Load the first argument into eax and get rid of the rest
   // (including the receiver).
   Label no_arguments;
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(zero, &no_arguments);
   __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
   __ pop(ecx);
@@ -1439,12 +1506,13 @@
   // Invoke the conversion builtin and put the result into ebx.
   __ bind(&convert_argument);
   __ IncrementCounter(counters->string_ctor_conversions(), 1);
-  __ EnterInternalFrame();
-  __ push(edi);  // Preserve the function.
-  __ push(eax);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  __ pop(edi);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(edi);  // Preserve the function.
+    __ push(eax);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+    __ pop(edi);
+  }
   __ mov(ebx, eax);
   __ jmp(&argument_is_string);
 
@@ -1461,17 +1529,18 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1);
-  __ EnterInternalFrame();
-  __ push(ebx);
-  __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(ebx);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
   __ ret(0);
 }
 
 
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ push(ebp);
-  __ mov(ebp, Operand(esp));
+  __ mov(ebp, esp);
 
   // Store the arguments adaptor context sentinel.
   __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -1515,7 +1584,7 @@
   __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
 
   Label enough, too_few;
-  __ cmp(eax, Operand(ebx));
+  __ cmp(eax, ebx);
   __ j(less, &too_few);
   __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
   __ j(equal, &dont_adapt_arguments);
@@ -1533,8 +1602,8 @@
     __ bind(&copy);
     __ inc(edi);
     __ push(Operand(eax, 0));
-    __ sub(Operand(eax), Immediate(kPointerSize));
-    __ cmp(edi, Operand(ebx));
+    __ sub(eax, Immediate(kPointerSize));
+    __ cmp(edi, ebx);
     __ j(less, &copy);
     __ jmp(&invoke);
   }
@@ -1547,17 +1616,17 @@
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(edi, Operand(ebp, eax, times_4, offset));
     // ebx = expected - actual.
-    __ sub(ebx, Operand(eax));
+    __ sub(ebx, eax);
     // eax = -actual - 1
     __ neg(eax);
-    __ sub(Operand(eax), Immediate(1));
+    __ sub(eax, Immediate(1));
 
     Label copy;
     __ bind(&copy);
     __ inc(eax);
     __ push(Operand(edi, 0));
-    __ sub(Operand(edi), Immediate(kPointerSize));
-    __ test(eax, Operand(eax));
+    __ sub(edi, Immediate(kPointerSize));
+    __ test(eax, eax);
     __ j(not_zero, &copy);
 
     // Fill remaining expected arguments with undefined values.
@@ -1565,7 +1634,7 @@
     __ bind(&fill);
     __ inc(eax);
     __ push(Immediate(masm->isolate()->factory()->undefined_value()));
-    __ cmp(eax, Operand(ebx));
+    __ cmp(eax, ebx);
     __ j(less, &fill);
   }
 
@@ -1573,7 +1642,10 @@
   __ bind(&invoke);
   // Restore function pointer.
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ call(Operand(edx));
+  __ call(edx);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
 
   // Leave frame and return.
   LeaveArgumentsAdaptorFrame(masm);
@@ -1583,13 +1655,13 @@
   // Dont adapt arguments.
   // -------------------------------------------
   __ bind(&dont_adapt_arguments);
-  __ jmp(Operand(edx));
+  __ jmp(edx);
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
   CpuFeatures::TryForceFeatureScope scope(SSE2);
-  if (!CpuFeatures::IsSupported(SSE2)) {
+  if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) {
     __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
     return;
   }
@@ -1616,20 +1688,22 @@
 
   // Pass the function to optimize as the argument to the on-stack
   // replacement runtime function.
-  __ EnterInternalFrame();
-  __ push(eax);
-  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(eax);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
   Label skip;
-  __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
+  __ cmp(eax, Immediate(Smi::FromInt(-1)));
   __ j(not_equal, &skip, Label::kNear);
   __ ret(0);
 
-  // If we decide not to perform on-stack replacement we perform a
-  // stack guard check to enable interrupts.
+  // Insert a stack guard check so that if we decide not to perform
+  // on-stack replacement right away, the function calling this stub can
+  // still be interrupted.
   __ bind(&stack_check);
   Label ok;
   ExternalReference stack_limit =
@@ -1638,7 +1712,9 @@
   __ j(above_equal, &ok, Label::kNear);
   StackCheckStub stub;
   __ TailCallStub(&stub);
-  __ Abort("Unreachable code: returned from tail call.");
+  if (FLAG_debug_code) {
+    __ Abort("Unreachable code: returned from tail call.");
+  }
   __ bind(&ok);
   __ ret(0);
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 8a5bd50..4faa6a4 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,6 +34,8 @@
 #include "isolate.h"
 #include "jsregexp.h"
 #include "regexp-macro-assembler.h"
+#include "stub-cache.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -49,7 +51,7 @@
   __ bind(&check_heap_number);
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   Factory* factory = masm->isolate()->factory();
-  __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+  __ cmp(ebx, Immediate(factory->heap_number_map()));
   __ j(not_equal, &call_builtin, Label::kNear);
   __ ret(0);
 
@@ -70,9 +72,9 @@
   // Get the function info from the stack.
   __ mov(edx, Operand(esp, 1 * kPointerSize));
 
-  int map_index = strict_mode_ == kStrictMode
-      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
-      : Context::FUNCTION_MAP_INDEX;
+  int map_index = (language_mode_ == CLASSIC_MODE)
+      ? Context::FUNCTION_MAP_INDEX
+      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -126,14 +128,14 @@
   // Get the function from the stack.
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
 
-  // Setup the object header.
+  // Set up the object header.
   Factory* factory = masm->isolate()->factory();
   __ mov(FieldOperand(eax, HeapObject::kMapOffset),
          factory->function_context_map());
   __ mov(FieldOperand(eax, Context::kLengthOffset),
          Immediate(Smi::FromInt(length)));
 
-  // Setup the fixed slots.
+  // Set up the fixed slots.
   __ Set(ebx, Immediate(0));  // Set to NULL.
   __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
   __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
@@ -150,7 +152,7 @@
   }
 
   // Return and remove the on-stack parameter.
-  __ mov(esi, Operand(eax));
+  __ mov(esi, eax);
   __ ret(1 * kPointerSize);
 
   // Need to collect. Call into runtime system.
@@ -159,6 +161,139 @@
 }
 
 
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [esp + (1 * kPointerSize)]: function
+  // [esp + (2 * kPointerSize)]: serialized scope info
+
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        eax, ebx, ecx, &gc, TAG_OBJECT);
+
+  // Get the function or sentinel from the stack.
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));
+
+  // Get the serialized scope info from the stack.
+  __ mov(ebx, Operand(esp, 2 * kPointerSize));
+
+  // Set up the object header.
+  Factory* factory = masm->isolate()->factory();
+  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+         factory->block_context_map());
+  __ mov(FieldOperand(eax, Context::kLengthOffset),
+         Immediate(Smi::FromInt(length)));
+
+  // If this block context is nested in the global context we get a smi
+  // sentinel instead of a function. The block context should get the
+  // canonical empty function of the global context as its closure which
+  // we still have to look up.
+  Label after_sentinel;
+  __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
+  if (FLAG_debug_code) {
+    const char* message = "Expected 0 as a Smi sentinel";
+    __ cmp(ecx, 0);
+    __ Assert(equal, message);
+  }
+  __ mov(ecx, GlobalObjectOperand());
+  __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
+  __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
+  __ bind(&after_sentinel);
+
+  // Set up the fixed slots.
+  __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
+  __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
+  __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
+
+  // Copy the global object from the previous context.
+  __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
+  __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
+
+  // Initialize the rest of the slots to the hole value.
+  if (slots_ == 1) {
+    __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
+           factory->the_hole_value());
+  } else {
+    __ mov(ebx, factory->the_hole_value());
+    for (int i = 0; i < slots_; i++) {
+      __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
+    }
+  }
+
+  // Return and remove the on-stack parameters.
+  __ mov(esi, eax);
+  __ ret(2 * kPointerSize);
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
+  //
+  // ecx: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = 0;
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
+  }
+  int size = JSArray::kSize + elements_size;
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
+      __ mov(ebx, FieldOperand(ecx, i));
+      __ mov(FieldOperand(eax, i), ebx);
+    }
+  }
+
+  if (length > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+    __ lea(edx, Operand(eax, JSArray::kSize));
+    __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
+
+    // Copy the elements array.
+    if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
+      for (int i = 0; i < elements_size; i += kPointerSize) {
+        __ mov(ebx, FieldOperand(ecx, i));
+        __ mov(FieldOperand(edx, i), ebx);
+      }
+    } else {
+      ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
+      int i;
+      for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
+        __ mov(ebx, FieldOperand(ecx, i));
+        __ mov(FieldOperand(edx, i), ebx);
+      }
+      while (i < elements_size) {
+        __ fld_d(FieldOperand(ecx, i));
+        __ fstp_d(FieldOperand(edx, i));
+        i += kDoubleSize;
+      }
+      ASSERT(i == elements_size);
+    }
+  }
+}
+
+
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
   //
@@ -166,13 +301,8 @@
   // [esp + (2 * kPointerSize)]: literal index.
   // [esp + (3 * kPointerSize)]: literals array.
 
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
-  int size = JSArray::kSize + elements_size;
-
   // Load boilerplate object into ecx and check if we need to create a
   // boilerplate.
-  Label slow_case;
   __ mov(ecx, Operand(esp, 3 * kPointerSize));
   __ mov(eax, Operand(esp, 2 * kPointerSize));
   STATIC_ASSERT(kPointerSize == 4);
@@ -182,16 +312,43 @@
                            FixedArray::kHeaderSize));
   Factory* factory = masm->isolate()->factory();
   __ cmp(ecx, factory->undefined_value());
+  Label slow_case;
   __ j(equal, &slow_case);
 
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  // ecx is boilerplate object.
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
+    __ CheckMap(ebx, factory->fixed_cow_array_map(),
+                &check_fast_elements, DONT_DO_SMI_CHECK);
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&check_fast_elements);
+    __ CheckMap(ebx, factory->fixed_array_map(),
+                &double_elements, DONT_DO_SMI_CHECK);
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
   if (FLAG_debug_code) {
     const char* message;
     Handle<Map> expected_map;
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map = factory->fixed_array_map();
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+      message = "Expected (writable) fixed double array";
+      expected_map = factory->fixed_double_array_map();
     } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map = factory->fixed_cow_array_map();
     }
@@ -202,32 +359,7 @@
     __ pop(ecx);
   }
 
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
-      __ mov(ebx, FieldOperand(ecx, i));
-      __ mov(FieldOperand(eax, i), ebx);
-    }
-  }
-
-  if (length_ > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
-    __ lea(edx, Operand(eax, JSArray::kSize));
-    __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
-    // Copy the elements array.
-    for (int i = 0; i < elements_size; i += kPointerSize) {
-      __ mov(ebx, FieldOperand(ecx, i));
-      __ mov(FieldOperand(edx, i), ebx);
-    }
-  }
-
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
   // Return and remove the on-stack parameters.
   __ ret(3 * kPointerSize);
 
@@ -236,9 +368,57 @@
 }
 
 
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [esp + kPointerSize]: object literal flags.
+  // [esp + (2 * kPointerSize)]: constant properties.
+  // [esp + (3 * kPointerSize)]: literal index.
+  // [esp + (4 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into ecx and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ mov(ecx, Operand(esp, 4 * kPointerSize));
+  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  STATIC_ASSERT(kPointerSize == 4);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
+                           FixedArray::kHeaderSize));
+  Factory* factory = masm->isolate()->factory();
+  __ cmp(ecx, factory->undefined_value());
+  __ j(equal, &slow_case);
+
+  // Check that the boilerplate contains only fast properties and we can
+  // statically determine the instance size.
+  int size = JSObject::kHeaderSize + length_ * kPointerSize;
+  __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset));
+  __ cmp(eax, Immediate(size >> kPointerSizeLog2));
+  __ j(not_equal, &slow_case);
+
+  // Allocate the JS object and copy header together with all in-object
+  // properties from the boilerplate.
+  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+  for (int i = 0; i < size; i += kPointerSize) {
+    __ mov(ebx, FieldOperand(ecx, i));
+    __ mov(FieldOperand(eax, i), ebx);
+  }
+
+  // Return and remove the on-stack parameters.
+  __ ret(4 * kPointerSize);
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
+
+
 // The stub expects its argument on the stack and returns its result in tos_:
 // zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   Label patch;
   Factory* factory = masm->isolate()->factory();
   const Register argument = eax;
@@ -336,6 +516,41 @@
 }
 
 
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ pushad();
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      __ movdbl(Operand(esp, i * kDoubleSize), reg);
+    }
+  }
+  const int argument_count = 1;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, ecx);
+  __ mov(Operand(esp, 0 * kPointerSize),
+         Immediate(ExternalReference::isolate_address()));
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      __ movdbl(reg, Operand(esp, i * kDoubleSize));
+    }
+    __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+  }
+  __ popad();
+  __ ret(0);
+}
+
+
 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
                                  Type type,
                                  Heap::RootListIndex value,
@@ -470,27 +685,27 @@
     // Check whether the exponent is too big for a 64 bit signed integer.
     static const uint32_t kTooBigExponent =
         (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
-    __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+    __ cmp(scratch2, Immediate(kTooBigExponent));
     __ j(greater_equal, conversion_failure);
     // Load x87 register with heap number.
     __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
     // Reserve space for 64 bit answer.
-    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
+    __ sub(esp, Immediate(sizeof(uint64_t)));  // Nolint.
     // Do conversion, which cannot fail because we checked the exponent.
     __ fisttp_d(Operand(esp, 0));
     __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
-    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
+    __ add(esp, Immediate(sizeof(uint64_t)));  // Nolint.
   } else {
     // Load ecx with zero.  We use this either for the final shift or
     // for the answer.
-    __ xor_(ecx, Operand(ecx));
+    __ xor_(ecx, ecx);
     // Check whether the exponent matches a 32 bit signed int that cannot be
     // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
     // exponent is 30 (biased).  This is the exponent that we are fastest at and
     // also the highest exponent we can handle here.
     const uint32_t non_smi_exponent =
         (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+    __ cmp(scratch2, Immediate(non_smi_exponent));
     // If we have a match of the int32-but-not-Smi exponent then skip some
     // logic.
     __ j(equal, &right_exponent, Label::kNear);
@@ -503,7 +718,7 @@
       // >>> operator has a tendency to generate numbers with an exponent of 31.
       const uint32_t big_non_smi_exponent =
           (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
-      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+      __ cmp(scratch2, Immediate(big_non_smi_exponent));
       __ j(not_equal, conversion_failure);
       // We have the big exponent, typically from >>>.  This means the number is
       // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
@@ -522,9 +737,9 @@
       // Shift down 21 bits to get the most significant 11 bits or the low
       // mantissa word.
       __ shr(ecx, 32 - big_shift_distance);
-      __ or_(ecx, Operand(scratch2));
+      __ or_(ecx, scratch2);
       // We have the answer in ecx, but we may need to negate it.
-      __ test(scratch, Operand(scratch));
+      __ test(scratch, scratch);
       __ j(positive, &done, Label::kNear);
       __ neg(ecx);
       __ jmp(&done, Label::kNear);
@@ -534,18 +749,18 @@
     // Exponent word in scratch, exponent part of exponent word in scratch2.
     // Zero in ecx.
     // We know the exponent is smaller than 30 (biased).  If it is less than
-    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
     // it rounds to zero.
     const uint32_t zero_exponent =
         (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
-    __ sub(Operand(scratch2), Immediate(zero_exponent));
+    __ sub(scratch2, Immediate(zero_exponent));
     // ecx already has a Smi zero.
     __ j(less, &done, Label::kNear);
 
     // We have a shifted exponent between 0 and 30 in scratch2.
     __ shr(scratch2, HeapNumber::kExponentShift);
     __ mov(ecx, Immediate(30));
-    __ sub(ecx, Operand(scratch2));
+    __ sub(ecx, scratch2);
 
     __ bind(&right_exponent);
     // Here ecx is the shift, scratch is the exponent word.
@@ -565,19 +780,19 @@
     // Shift down 22 bits to get the most significant 10 bits or the low
     // mantissa word.
     __ shr(scratch2, 32 - shift_distance);
-    __ or_(scratch2, Operand(scratch));
+    __ or_(scratch2, scratch);
     // Move down according to the exponent.
     __ shr_cl(scratch2);
     // Now the unsigned answer is in scratch2.  We need to move it to ecx and
     // we may need to fix the sign.
     Label negative;
-    __ xor_(ecx, Operand(ecx));
+    __ xor_(ecx, ecx);
     __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
     __ j(greater, &negative, Label::kNear);
     __ mov(ecx, scratch2);
     __ jmp(&done, Label::kNear);
     __ bind(&negative);
-    __ sub(ecx, Operand(scratch2));
+    __ sub(ecx, scratch2);
     __ bind(&done);
   }
 }
@@ -679,13 +894,13 @@
   __ JumpIfNotSmi(eax, non_smi, non_smi_near);
 
   // We can't handle -0 with smis, so use a type transition for that case.
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(zero, slow, slow_near);
 
   // Try optimistic subtraction '0 - value', saving operand in eax for undo.
-  __ mov(edx, Operand(eax));
+  __ mov(edx, eax);
   __ Set(eax, Immediate(0));
-  __ sub(eax, Operand(edx));
+  __ sub(eax, edx);
   __ j(overflow, undo, undo_near);
   __ ret(0);
 }
@@ -706,7 +921,7 @@
 
 
 void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
-  __ mov(eax, Operand(edx));
+  __ mov(eax, edx);
 }
 
 
@@ -760,7 +975,7 @@
     __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
             Immediate(HeapNumber::kSignMask));  // Flip sign.
   } else {
-    __ mov(edx, Operand(eax));
+    __ mov(edx, eax);
     // edx: operand
 
     Label slow_allocate_heapnumber, heapnumber_allocated;
@@ -768,11 +983,12 @@
     __ jmp(&heapnumber_allocated, Label::kNear);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(edx);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ pop(edx);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(edx);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ pop(edx);
+    }
 
     __ bind(&heapnumber_allocated);
     // eax: allocated 'empty' number
@@ -815,15 +1031,16 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    // Push the original HeapNumber on the stack. The integer value can't
-    // be stored since it's untagged and not in the smi range (so we can't
-    // smi-tag it). We'll recalculate the value after the GC instead.
-    __ push(ebx);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    // New HeapNumber is in eax.
-    __ pop(edx);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Push the original HeapNumber on the stack. The integer value can't
+      // be stored since it's untagged and not in the smi range (so we can't
+      // smi-tag it). We'll recalculate the value after the GC instead.
+      __ push(ebx);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      // New HeapNumber is in eax.
+      __ pop(edx);
+    }
     // IntegerConvert uses ebx and edi as scratch registers.
     // This conversion won't go slow-case.
     IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
@@ -833,7 +1050,7 @@
   }
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope use_sse2(SSE2);
-    __ cvtsi2sd(xmm0, Operand(ecx));
+    __ cvtsi2sd(xmm0, ecx);
     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
   } else {
     __ push(ecx);
@@ -947,6 +1164,10 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
+
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -1022,7 +1243,7 @@
       // eax in case the result is not a smi.
       ASSERT(!left.is(ecx) && !right.is(ecx));
       __ mov(ecx, right);
-      __ or_(right, Operand(left));  // Bitwise or is commutative.
+      __ or_(right, left);  // Bitwise or is commutative.
       combined = right;
       break;
 
@@ -1034,7 +1255,7 @@
     case Token::DIV:
     case Token::MOD:
       __ mov(combined, right);
-      __ or_(combined, Operand(left));
+      __ or_(combined, left);
       break;
 
     case Token::SHL:
@@ -1044,7 +1265,7 @@
       // for the smi check register.
       ASSERT(!left.is(ecx) && !right.is(ecx));
       __ mov(ecx, right);
-      __ or_(right, Operand(left));
+      __ or_(right, left);
       combined = right;
       break;
 
@@ -1067,12 +1288,12 @@
 
     case Token::BIT_XOR:
       ASSERT(right.is(eax));
-      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
+      __ xor_(right, left);  // Bitwise xor is commutative.
       break;
 
     case Token::BIT_AND:
       ASSERT(right.is(eax));
-      __ and_(right, Operand(left));  // Bitwise and is commutative.
+      __ and_(right, left);  // Bitwise and is commutative.
       break;
 
     case Token::SHL:
@@ -1121,12 +1342,12 @@
 
     case Token::ADD:
       ASSERT(right.is(eax));
-      __ add(right, Operand(left));  // Addition is commutative.
+      __ add(right, left);  // Addition is commutative.
       __ j(overflow, &use_fp_on_smis);
       break;
 
     case Token::SUB:
-      __ sub(left, Operand(right));
+      __ sub(left, right);
       __ j(overflow, &use_fp_on_smis);
       __ mov(eax, left);
       break;
@@ -1140,7 +1361,7 @@
       // Remove tag from one of the operands (but keep sign).
       __ SmiUntag(right);
       // Do multiplication.
-      __ imul(right, Operand(left));  // Multiplication is commutative.
+      __ imul(right, left);  // Multiplication is commutative.
       __ j(overflow, &use_fp_on_smis);
       // Check for negative zero result.  Use combined = left | right.
       __ NegativeZeroTest(right, combined, &use_fp_on_smis);
@@ -1151,7 +1372,7 @@
       // save the left operand.
       __ mov(edi, left);
       // Check for 0 divisor.
-      __ test(right, Operand(right));
+      __ test(right, right);
       __ j(zero, &use_fp_on_smis);
       // Sign extend left into edx:eax.
       ASSERT(left.is(eax));
@@ -1167,7 +1388,7 @@
       // Check for negative zero result.  Use combined = left | right.
       __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
       // Check that the remainder is zero.
-      __ test(edx, Operand(edx));
+      __ test(edx, edx);
       __ j(not_zero, &use_fp_on_smis);
       // Tag the result and store it in register eax.
       __ SmiTag(eax);
@@ -1175,7 +1396,7 @@
 
     case Token::MOD:
       // Check for 0 divisor.
-      __ test(right, Operand(right));
+      __ test(right, right);
       __ j(zero, &not_smis);
 
       // Sign extend left into edx:eax.
@@ -1226,11 +1447,11 @@
         break;
       case Token::ADD:
         // Revert right = right + left.
-        __ sub(right, Operand(left));
+        __ sub(right, left);
         break;
       case Token::SUB:
         // Revert left = left - right.
-        __ add(left, Operand(right));
+        __ add(left, right);
         break;
       case Token::MUL:
         // Right was clobbered but a copy is in ebx.
@@ -1268,7 +1489,7 @@
           ASSERT_EQ(Token::SHL, op_);
           if (CpuFeatures::IsSupported(SSE2)) {
             CpuFeatures::Scope use_sse2(SSE2);
-            __ cvtsi2sd(xmm0, Operand(left));
+            __ cvtsi2sd(xmm0, left);
             __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
           } else {
             __ mov(Operand(esp, 1 * kPointerSize), left);
@@ -1290,11 +1511,11 @@
         switch (op_) {
           case Token::ADD:
             // Revert right = right + left.
-            __ sub(right, Operand(left));
+            __ sub(right, left);
             break;
           case Token::SUB:
             // Revert left = left - right.
-            __ add(left, Operand(right));
+            __ add(left, right);
             break;
           case Token::MUL:
             // Right was clobbered but a copy is in ebx.
@@ -1486,7 +1707,7 @@
         // Check result type if it is currently Int32.
         if (result_type_ <= BinaryOpIC::INT32) {
           __ cvttsd2si(ecx, Operand(xmm0));
-          __ cvtsi2sd(xmm2, Operand(ecx));
+          __ cvtsi2sd(xmm2, ecx);
           __ ucomisd(xmm0, xmm2);
           __ j(not_zero, &not_int32);
           __ j(carry, &not_int32);
@@ -1548,9 +1769,9 @@
       FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
                                                         &not_int32);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::BIT_OR:  __ or_(eax, ecx); break;
+        case Token::BIT_AND: __ and_(eax, ecx); break;
+        case Token::BIT_XOR: __ xor_(eax, ecx); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -1574,7 +1795,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
+        __ mov(ebx, eax);  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -1594,7 +1815,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1675,7 +1896,7 @@
   __ cmp(edx, factory->undefined_value());
   __ j(not_equal, &check, Label::kNear);
   if (Token::IsBitOp(op_)) {
-    __ xor_(edx, Operand(edx));
+    __ xor_(edx, edx);
   } else {
     __ mov(edx, Immediate(factory->nan_value()));
   }
@@ -1684,7 +1905,7 @@
   __ cmp(eax, factory->undefined_value());
   __ j(not_equal, &done, Label::kNear);
   if (Token::IsBitOp(op_)) {
-    __ xor_(eax, Operand(eax));
+    __ xor_(eax, eax);
   } else {
     __ mov(eax, Immediate(factory->nan_value()));
   }
@@ -1762,9 +1983,9 @@
                                                   use_sse3_,
                                                   &not_floats);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::BIT_OR:  __ or_(eax, ecx); break;
+        case Token::BIT_AND: __ and_(eax, ecx); break;
+        case Token::BIT_XOR: __ xor_(eax, ecx); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -1788,7 +2009,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
+        __ mov(ebx, eax);  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -1808,7 +2029,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1961,9 +2182,9 @@
                                                   use_sse3_,
                                                   &call_runtime);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::BIT_OR:  __ or_(eax, ecx); break;
+        case Token::BIT_AND: __ and_(eax, ecx); break;
+        case Token::BIT_XOR: __ xor_(eax, ecx); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -1987,7 +2208,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
+        __ mov(ebx, eax);  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -2007,7 +2228,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2117,10 +2338,10 @@
       __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
       // Now edx can be overwritten losing one of the arguments as we are
       // now done and will not need it any more.
-      __ mov(edx, Operand(ebx));
+      __ mov(edx, ebx);
       __ bind(&skip_allocation);
       // Use object in edx as a result holder
-      __ mov(eax, Operand(edx));
+      __ mov(eax, edx);
       break;
     }
     case OVERWRITE_RIGHT:
@@ -2178,7 +2399,7 @@
     // Then load the low and high words of the double into ebx, edx.
     STATIC_ASSERT(kSmiTagSize == 1);
     __ sar(eax, 1);
-    __ sub(Operand(esp), Immediate(2 * kPointerSize));
+    __ sub(esp, Immediate(2 * kPointerSize));
     __ mov(Operand(esp, 0), eax);
     __ fild_s(Operand(esp, 0));
     __ fst_d(Operand(esp, 0));
@@ -2189,7 +2410,7 @@
     // Check if input is a HeapNumber.
     __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
     Factory* factory = masm->isolate()->factory();
-    __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+    __ cmp(ebx, Immediate(factory->heap_number_map()));
     __ j(not_equal, &runtime_call);
     // Input is a HeapNumber. Push it on the FPU stack and load its
     // low and high words into ebx, edx.
@@ -2201,12 +2422,12 @@
   } else {  // UNTAGGED.
     if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope sse4_scope(SSE4_1);
-      __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
+      __ pextrd(edx, xmm1, 0x1);  // copy xmm1[63..32] to edx.
     } else {
       __ pshufd(xmm0, xmm1, 0x1);
-      __ movd(Operand(edx), xmm0);
+      __ movd(edx, xmm0);
     }
-    __ movd(Operand(ebx), xmm1);
+    __ movd(ebx, xmm1);
   }
 
   // ST[0] or xmm1  == double value
@@ -2215,15 +2436,15 @@
   // Compute hash (the shifts are arithmetic):
   //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
   __ mov(ecx, ebx);
-  __ xor_(ecx, Operand(edx));
+  __ xor_(ecx, edx);
   __ mov(eax, ecx);
   __ sar(eax, 16);
-  __ xor_(ecx, Operand(eax));
+  __ xor_(ecx, eax);
   __ mov(eax, ecx);
   __ sar(eax, 8);
-  __ xor_(ecx, Operand(eax));
+  __ xor_(ecx, eax);
   ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
-  __ and_(Operand(ecx),
+  __ and_(ecx,
           Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
 
   // ST[0] or xmm1 == double value.
@@ -2238,7 +2459,7 @@
   __ mov(eax, Operand(eax, cache_array_index));
   // Eax points to the cache for the type type_.
   // If NULL, the cache hasn't been initialized yet, so go through runtime.
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(zero, &runtime_call_clear_stack);
 #ifdef DEBUG
   // Check that the layout of cache elements match expectations.
@@ -2264,6 +2485,8 @@
   __ cmp(edx, Operand(ecx, kIntSize));
   __ j(not_equal, &cache_miss, Label::kNear);
   // Cache hit!
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
   __ mov(eax, Operand(ecx, 2 * kIntSize));
   if (tagged) {
     __ fstp(0);
@@ -2274,6 +2497,7 @@
   }
 
   __ bind(&cache_miss);
+  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
   // Update cache with new value.
   // We are short on registers, so use no_reg as scratch.
   // This gives slightly larger code.
@@ -2281,12 +2505,12 @@
     __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
   } else {  // UNTAGGED.
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
-    __ sub(Operand(esp), Immediate(kDoubleSize));
+    __ sub(esp, Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
-    __ add(Operand(esp), Immediate(kDoubleSize));
+    __ add(esp, Immediate(kDoubleSize));
   }
-  GenerateOperation(masm);
+  GenerateOperation(masm, type_);
   __ mov(Operand(ecx, 0), ebx);
   __ mov(Operand(ecx, kIntSize), edx);
   __ mov(Operand(ecx, 2 * kIntSize), eax);
@@ -2299,20 +2523,21 @@
 
     // Skip cache and return answer directly, only in untagged case.
     __ bind(&skip_cache);
-    __ sub(Operand(esp), Immediate(kDoubleSize));
+    __ sub(esp, Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
-    GenerateOperation(masm);
+    GenerateOperation(masm, type_);
     __ fstp_d(Operand(esp, 0));
     __ movdbl(xmm1, Operand(esp, 0));
-    __ add(Operand(esp), Immediate(kDoubleSize));
+    __ add(esp, Immediate(kDoubleSize));
     // We return the value in xmm1 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
-    // Allocate an unused object bigger than a HeapNumber.
-    __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Allocate an unused object bigger than a HeapNumber.
+      __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 
@@ -2329,10 +2554,11 @@
     __ bind(&runtime_call);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
-    __ EnterInternalFrame();
-    __ push(eax);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(eax);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -2343,6 +2569,7 @@
   switch (type_) {
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -2351,12 +2578,15 @@
 }
 
 
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
+void TranscendentalCacheStub::GenerateOperation(
+    MacroAssembler* masm, TranscendentalCache::Type type) {
   // Only free register is edi.
   // Input value is on FP stack, and also in ebx/edx.
   // Input value is possibly in xmm1.
   // Address of result (a newly allocated HeapNumber) may be in eax.
-  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
+  if (type == TranscendentalCache::SIN ||
+      type == TranscendentalCache::COS ||
+      type == TranscendentalCache::TAN) {
     // Both fsin and fcos require arguments in the range +/-2^63 and
     // return NaN for infinities and NaN. They can share all code except
     // the actual fsin/fcos operation.
@@ -2364,13 +2594,13 @@
     // If argument is outside the range -2^63..2^63, fsin/cos doesn't
     // work. We must reduce it to the appropriate range.
     __ mov(edi, edx);
-    __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
+    __ and_(edi, Immediate(0x7ff00000));  // Exponent only.
     int supported_exponent_limit =
         (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
-    __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+    __ cmp(edi, Immediate(supported_exponent_limit));
     __ j(below, &in_range, Label::kNear);
     // Check for infinity and NaN. Both return NaN for sin.
-    __ cmp(Operand(edi), Immediate(0x7ff00000));
+    __ cmp(edi, Immediate(0x7ff00000));
     Label non_nan_result;
     __ j(not_equal, &non_nan_result, Label::kNear);
     // Input is +/-Infinity or NaN. Result is NaN.
@@ -2379,7 +2609,7 @@
     __ push(Immediate(0x7ff80000));
     __ push(Immediate(0));
     __ fld_d(Operand(esp, 0));
-    __ add(Operand(esp), Immediate(2 * kPointerSize));
+    __ add(esp, Immediate(2 * kPointerSize));
     __ jmp(&done, Label::kNear);
 
     __ bind(&non_nan_result);
@@ -2395,7 +2625,7 @@
       __ fwait();
       __ fnstsw_ax();
       // Clear if Illegal Operand or Zero Division exceptions are set.
-      __ test(Operand(eax), Immediate(5));
+      __ test(eax, Immediate(5));
       __ j(zero, &no_exceptions, Label::kNear);
       __ fnclex();
       __ bind(&no_exceptions);
@@ -2408,7 +2638,7 @@
       __ fprem1();
       __ fwait();
       __ fnstsw_ax();
-      __ test(Operand(eax), Immediate(0x400 /* C2 */));
+      __ test(eax, Immediate(0x400 /* C2 */));
       // If C2 is set, computation only has partial result. Loop to
       // continue computation.
       __ j(not_zero, &partial_remainder_loop);
@@ -2420,19 +2650,25 @@
 
     // FPU Stack: input % 2*pi
     __ bind(&in_range);
-    switch (type_) {
+    switch (type) {
       case TranscendentalCache::SIN:
         __ fsin();
         break;
       case TranscendentalCache::COS:
         __ fcos();
         break;
+      case TranscendentalCache::TAN:
+        // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
+        // FP register stack.
+        __ fptan();
+        __ fstp(0);  // Pop FP register stack.
+        break;
       default:
         UNREACHABLE();
     }
     __ bind(&done);
   } else {
-    ASSERT(type_ == TranscendentalCache::LOG);
+    ASSERT(type == TranscendentalCache::LOG);
     __ fldln2();
     __ fxch();
     __ fyl2x();
@@ -2541,13 +2777,13 @@
 
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm0, Operand(edx));
+  __ cvtsi2sd(xmm0, edx);
   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
   __ jmp(&load_eax);
 
   __ bind(&load_smi_eax);
   __ SmiUntag(eax);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm1, Operand(eax));
+  __ cvtsi2sd(xmm1, eax);
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
 
   __ bind(&done);
@@ -2571,12 +2807,12 @@
   __ jmp(not_numbers);  // Argument in eax is not a number.
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm0, Operand(edx));
+  __ cvtsi2sd(xmm0, edx);
   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
   __ jmp(&load_eax);
   __ bind(&load_smi_eax);
   __ SmiUntag(eax);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm1, Operand(eax));
+  __ cvtsi2sd(xmm1, eax);
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
   __ jmp(&done, Label::kNear);
   __ bind(&load_float_eax);
@@ -2592,11 +2828,11 @@
   __ mov(scratch, left);
   ASSERT(!scratch.is(right));  // We're about to clobber scratch.
   __ SmiUntag(scratch);
-  __ cvtsi2sd(xmm0, Operand(scratch));
+  __ cvtsi2sd(xmm0, scratch);
 
   __ mov(scratch, right);
   __ SmiUntag(scratch);
-  __ cvtsi2sd(xmm1, Operand(scratch));
+  __ cvtsi2sd(xmm1, scratch);
 }
 
 
@@ -2604,12 +2840,12 @@
                                                     Label* non_int32,
                                                     Register scratch) {
   __ cvttsd2si(scratch, Operand(xmm0));
-  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ cvtsi2sd(xmm2, scratch);
   __ ucomisd(xmm0, xmm2);
   __ j(not_zero, non_int32);
   __ j(carry, non_int32);
   __ cvttsd2si(scratch, Operand(xmm1));
-  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ cvtsi2sd(xmm2, scratch);
   __ ucomisd(xmm1, xmm2);
   __ j(not_zero, non_int32);
   __ j(carry, non_int32);
@@ -2703,157 +2939,263 @@
 
 
 void MathPowStub::Generate(MacroAssembler* masm) {
-  // Registers are used as follows:
-  // edx = base
-  // eax = exponent
-  // ecx = temporary, result
-
   CpuFeatures::Scope use_sse2(SSE2);
-  Label allocate_return, call_runtime;
-
-  // Load input parameters.
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
-  __ mov(eax, Operand(esp, 1 * kPointerSize));
-
-  // Save 1 in xmm3 - we need this several times later on.
-  __ mov(ecx, Immediate(1));
-  __ cvtsi2sd(xmm3, Operand(ecx));
-
-  Label exponent_nonsmi;
-  Label base_nonsmi;
-  // If the exponent is a heap number go to that specific case.
-  __ JumpIfNotSmi(eax, &exponent_nonsmi);
-  __ JumpIfNotSmi(edx, &base_nonsmi);
-
-  // Optimized version when both exponent and base are smis.
-  Label powi;
-  __ SmiUntag(edx);
-  __ cvtsi2sd(xmm0, Operand(edx));
-  __ jmp(&powi);
-  // exponent is smi and base is a heapnumber.
-  __ bind(&base_nonsmi);
   Factory* factory = masm->isolate()->factory();
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         factory->heap_number_map());
-  __ j(not_equal, &call_runtime);
+  const Register exponent = eax;
+  const Register base = edx;
+  const Register scratch = ecx;
+  const XMMRegister double_result = xmm3;
+  const XMMRegister double_base = xmm2;
+  const XMMRegister double_exponent = xmm1;
+  const XMMRegister double_scratch = xmm4;
 
-  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+  Label call_runtime, done, exponent_not_smi, int_exponent;
 
-  // Optimized version of pow if exponent is a smi.
-  // xmm0 contains the base.
-  __ bind(&powi);
-  __ SmiUntag(eax);
+  // Save 1 in double_result - we need this several times later on.
+  __ mov(scratch, Immediate(1));
+  __ cvtsi2sd(double_result, scratch);
 
-  // Save exponent in base as we need to check if exponent is negative later.
-  // We know that base and exponent are in different registers.
-  __ mov(edx, eax);
+  if (exponent_type_ == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack.
+    __ mov(base, Operand(esp, 2 * kPointerSize));
+    __ mov(exponent, Operand(esp, 1 * kPointerSize));
+
+    __ JumpIfSmi(base, &base_is_smi, Label::kNear);
+    __ cmp(FieldOperand(base, HeapObject::kMapOffset),
+           factory->heap_number_map());
+    __ j(not_equal, &call_runtime);
+
+    __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+    __ jmp(&unpack_exponent, Label::kNear);
+
+    __ bind(&base_is_smi);
+    __ SmiUntag(base);
+    __ cvtsi2sd(double_base, base);
+
+    __ bind(&unpack_exponent);
+    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+    __ SmiUntag(exponent);
+    __ jmp(&int_exponent);
+
+    __ bind(&exponent_not_smi);
+    __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
+           factory->heap_number_map());
+    __ j(not_equal, &call_runtime);
+    __ movdbl(double_exponent,
+              FieldOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type_ == TAGGED) {
+    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+    __ SmiUntag(exponent);
+    __ jmp(&int_exponent);
+
+    __ bind(&exponent_not_smi);
+    __ movdbl(double_exponent,
+              FieldOperand(exponent, HeapNumber::kValueOffset));
+  }
+
+  if (exponent_type_ != INTEGER) {
+    Label fast_power;
+    // Detect integer exponents stored as double.
+    __ cvttsd2si(exponent, Operand(double_exponent));
+    // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+    __ cmp(exponent, Immediate(0x80000000u));
+    __ j(equal, &call_runtime);
+    __ cvtsi2sd(double_scratch, exponent);
+    // Already ruled out NaNs for exponent.
+    __ ucomisd(double_exponent, double_scratch);
+    __ j(equal, &int_exponent);
+
+    if (exponent_type_ == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label continue_sqrt, continue_rsqrt, not_plus_half;
+      // Test for 0.5.
+      // Load double_scratch with 0.5.
+      __ mov(scratch, Immediate(0x3F000000u));
+      __ movd(double_scratch, scratch);
+      __ cvtss2sd(double_scratch, double_scratch);
+      // Already ruled out NaNs for exponent.
+      __ ucomisd(double_scratch, double_exponent);
+      __ j(not_equal, &not_plus_half, Label::kNear);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      // According to IEEE-754, single-precision -Infinity has the highest
+      // 9 bits set and the lowest 23 bits cleared.
+      __ mov(scratch, 0xFF800000u);
+      __ movd(double_scratch, scratch);
+      __ cvtss2sd(double_scratch, double_scratch);
+      __ ucomisd(double_base, double_scratch);
+      // Comparing -Infinity with NaN results in "unordered", which sets the
+      // zero flag as if both were equal.  However, it also sets the carry flag.
+      __ j(not_equal, &continue_sqrt, Label::kNear);
+      __ j(carry, &continue_sqrt, Label::kNear);
+
+      // Set result to Infinity in the special case.
+      __ xorps(double_result, double_result);
+      __ subsd(double_result, double_scratch);
+      __ jmp(&done);
+
+      __ bind(&continue_sqrt);
+      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+      __ xorps(double_scratch, double_scratch);
+      __ addsd(double_scratch, double_base);  // Convert -0 to +0.
+      __ sqrtsd(double_result, double_scratch);
+      __ jmp(&done);
+
+      // Test for -0.5.
+      __ bind(&not_plus_half);
+      // Load double_exponent with -0.5 by substracting 1.
+      __ subsd(double_scratch, double_result);
+      // Already ruled out NaNs for exponent.
+      __ ucomisd(double_scratch, double_exponent);
+      __ j(not_equal, &fast_power, Label::kNear);
+
+      // Calculates reciprocal of square root of base.  Check for the special
+      // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      // According to IEEE-754, single-precision -Infinity has the highest
+      // 9 bits set and the lowest 23 bits cleared.
+      __ mov(scratch, 0xFF800000u);
+      __ movd(double_scratch, scratch);
+      __ cvtss2sd(double_scratch, double_scratch);
+      __ ucomisd(double_base, double_scratch);
+      // Comparing -Infinity with NaN results in "unordered", which sets the
+      // zero flag as if both were equal.  However, it also sets the carry flag.
+      __ j(not_equal, &continue_rsqrt, Label::kNear);
+      __ j(carry, &continue_rsqrt, Label::kNear);
+
+      // Set result to 0 in the special case.
+      __ xorps(double_result, double_result);
+      __ jmp(&done);
+
+      __ bind(&continue_rsqrt);
+      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+      __ xorps(double_exponent, double_exponent);
+      __ addsd(double_exponent, double_base);  // Convert -0 to +0.
+      __ sqrtsd(double_exponent, double_exponent);
+      __ divsd(double_result, double_exponent);
+      __ jmp(&done);
+    }
+
+    // Using FPU instructions to calculate power.
+    Label fast_power_failed;
+    __ bind(&fast_power);
+    __ fnclex();  // Clear flags to catch exceptions later.
+    // Transfer (B)ase and (E)xponent onto the FPU register stack.
+    __ sub(esp, Immediate(kDoubleSize));
+    __ movdbl(Operand(esp, 0), double_exponent);
+    __ fld_d(Operand(esp, 0));  // E
+    __ movdbl(Operand(esp, 0), double_base);
+    __ fld_d(Operand(esp, 0));  // B, E
+
+    // Exponent is in st(1) and base is in st(0)
+    // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+    // FYL2X calculates st(1) * log2(st(0))
+    __ fyl2x();    // X
+    __ fld(0);     // X, X
+    __ frndint();  // rnd(X), X
+    __ fsub(1);    // rnd(X), X-rnd(X)
+    __ fxch(1);    // X - rnd(X), rnd(X)
+    // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+    __ f2xm1();    // 2^(X-rnd(X)) - 1, rnd(X)
+    __ fld1();     // 1, 2^(X-rnd(X)) - 1, rnd(X)
+    __ faddp(1);   // 1, 2^(X-rnd(X)), rnd(X)
+    // FSCALE calculates st(0) * 2^st(1)
+    __ fscale();   // 2^X, rnd(X)
+    __ fstp(1);
+    // Bail out to runtime in case of exceptions in the status word.
+    __ fnstsw_ax();
+    __ test_b(eax, 0x5F);  // We check for all but precision exception.
+    __ j(not_zero, &fast_power_failed, Label::kNear);
+    __ fstp_d(Operand(esp, 0));
+    __ movdbl(double_result, Operand(esp, 0));
+    __ add(esp, Immediate(kDoubleSize));
+    __ jmp(&done);
+
+    __ bind(&fast_power_failed);
+    __ fninit();
+    __ add(esp, Immediate(kDoubleSize));
+    __ jmp(&call_runtime);
+  }
+
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+  const XMMRegister double_scratch2 = double_exponent;
+  __ mov(scratch, exponent);  // Back up exponent.
+  __ movsd(double_scratch, double_base);  // Back up base.
+  __ movsd(double_scratch2, double_result);  // Load double_exponent with 1.
 
   // Get absolute value of exponent.
-  Label no_neg;
-  __ cmp(eax, 0);
-  __ j(greater_equal, &no_neg, Label::kNear);
-  __ neg(eax);
+  Label no_neg, while_true, no_multiply;
+  __ test(scratch, scratch);
+  __ j(positive, &no_neg, Label::kNear);
+  __ neg(scratch);
   __ bind(&no_neg);
 
-  // Load xmm1 with 1.
-  __ movsd(xmm1, xmm3);
-  Label while_true;
-  Label no_multiply;
-
   __ bind(&while_true);
-  __ shr(eax, 1);
+  __ shr(scratch, 1);
   __ j(not_carry, &no_multiply, Label::kNear);
-  __ mulsd(xmm1, xmm0);
+  __ mulsd(double_result, double_scratch);
   __ bind(&no_multiply);
-  __ mulsd(xmm0, xmm0);
+
+  __ mulsd(double_scratch, double_scratch);
   __ j(not_zero, &while_true);
 
-  // base has the original value of the exponent - if the exponent  is
-  // negative return 1/result.
-  __ test(edx, Operand(edx));
-  __ j(positive, &allocate_return);
-  // Special case if xmm1 has reached infinity.
-  __ mov(ecx, Immediate(0x7FB00000));
-  __ movd(xmm0, Operand(ecx));
-  __ cvtss2sd(xmm0, xmm0);
-  __ ucomisd(xmm0, xmm1);
-  __ j(equal, &call_runtime);
-  __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ jmp(&allocate_return);
+  // scratch has the original value of the exponent - if the exponent is
+  // negative, return 1/result.
+  __ test(exponent, exponent);
+  __ j(positive, &done);
+  __ divsd(double_scratch2, double_result);
+  __ movsd(double_result, double_scratch2);
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ xorps(double_scratch2, double_scratch2);
+  __ ucomisd(double_scratch2, double_result);  // Result cannot be NaN.
+  // double_exponent aliased as double_scratch2 has already been overwritten
+  // and may not have contained the exponent value in the first place when the
+  // exponent is a smi.  We reset it with exponent value before bailing out.
+  __ j(not_equal, &done);
+  __ cvtsi2sd(double_exponent, exponent);
 
-  // exponent (or both) is a heapnumber - no matter what we should now work
-  // on doubles.
-  __ bind(&exponent_nonsmi);
-  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-         factory->heap_number_map());
-  __ j(not_equal, &call_runtime);
-  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
-  // Test if exponent is nan.
-  __ ucomisd(xmm1, xmm1);
-  __ j(parity_even, &call_runtime);
+  // Returning or bailing out.
+  Counters* counters = masm->isolate()->counters();
+  if (exponent_type_ == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 
-  Label base_not_smi;
-  Label handle_special_cases;
-  __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
-  __ SmiUntag(edx);
-  __ cvtsi2sd(xmm0, Operand(edx));
-  __ jmp(&handle_special_cases, Label::kNear);
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in exponent.
+    __ bind(&done);
+    __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
+    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+    __ IncrementCounter(counters->math_pow(), 1);
+    __ ret(2 * kPointerSize);
+  } else {
+    __ bind(&call_runtime);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(4, scratch);
+      __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
+      __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()), 4);
+    }
+    // Return value is in st(0) on ia32.
+    // Store it into the (fixed) result register.
+    __ sub(esp, Immediate(kDoubleSize));
+    __ fstp_d(Operand(esp, 0));
+    __ movdbl(double_result, Operand(esp, 0));
+    __ add(esp, Immediate(kDoubleSize));
 
-  __ bind(&base_not_smi);
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         factory->heap_number_map());
-  __ j(not_equal, &call_runtime);
-  __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
-  __ and_(ecx, HeapNumber::kExponentMask);
-  __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
-  // base is NaN or +/-Infinity
-  __ j(greater_equal, &call_runtime);
-  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
-  // base is in xmm0 and exponent is in xmm1.
-  __ bind(&handle_special_cases);
-  Label not_minus_half;
-  // Test for -0.5.
-  // Load xmm2 with -0.5.
-  __ mov(ecx, Immediate(0xBF000000));
-  __ movd(xmm2, Operand(ecx));
-  __ cvtss2sd(xmm2, xmm2);
-  // xmm2 now has -0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half, Label::kNear);
-
-  // Calculates reciprocal of square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorps(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-  __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ jmp(&allocate_return);
-
-  // Test for 0.5.
-  __ bind(&not_minus_half);
-  // Load xmm2 with 0.5.
-  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
-  __ addsd(xmm2, xmm3);
-  // xmm2 now has 0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &call_runtime);
-  // Calculates square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorps(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-
-  __ bind(&allocate_return);
-  __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
-  __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
-  __ mov(eax, ecx);
-  __ ret(2 * kPointerSize);
-
-  __ bind(&call_runtime);
-  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+    __ bind(&done);
+    __ IncrementCounter(counters->math_pow(), 1);
+    __ ret(0);
+  }
 }
 
 
@@ -2873,13 +3215,13 @@
   Label adaptor;
   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor, Label::kNear);
 
   // Check index against formal parameters count limit passed in
   // through register eax. Use unsigned comparison to get negative
   // check for free.
-  __ cmp(edx, Operand(eax));
+  __ cmp(edx, eax);
   __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
@@ -2895,7 +3237,7 @@
   // comparison to get negative check for free.
   __ bind(&adaptor);
   __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ cmp(edx, Operand(ecx));
+  __ cmp(edx, ecx);
   __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
@@ -2926,7 +3268,7 @@
   Label runtime;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &runtime, Label::kNear);
 
   // Patch the arguments.length and the parameters pointer.
@@ -2957,7 +3299,7 @@
   Label adaptor_frame, try_allocate;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // No adaptor, parameter count = argument count.
@@ -2976,7 +3318,7 @@
   // esp[4] = parameter count (tagged)
   // esp[8] = address of receiver argument
   // Compute the mapped parameter count = min(ebx, ecx) in ebx.
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(less_equal, &try_allocate, Label::kNear);
   __ mov(ebx, ecx);
 
@@ -2990,7 +3332,7 @@
   const int kParameterMapHeaderSize =
       FixedArray::kHeaderSize + 2 * kPointerSize;
   Label no_parameter_map;
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(zero, &no_parameter_map, Label::kNear);
   __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
   __ bind(&no_parameter_map);
@@ -2999,7 +3341,7 @@
   __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
 
   // 3. Arguments object.
-  __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
+  __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
 
   // Do the allocation of all three objects in one go.
   __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
@@ -3014,7 +3356,7 @@
   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
   __ mov(ebx, Operand(esp, 0 * kPointerSize));
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(not_zero, &has_mapped_parameters, Label::kNear);
   __ mov(edi, Operand(edi,
          Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
@@ -3038,7 +3380,7 @@
     __ mov(FieldOperand(eax, i), edx);
   }
 
-  // Setup the callee in-object property.
+  // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ mov(edx, Operand(esp, 4 * kPointerSize));
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
@@ -3051,7 +3393,7 @@
                       Heap::kArgumentsLengthIndex * kPointerSize),
          ecx);
 
-  // Setup the elements pointer in the allocated arguments object.
+  // Set up the elements pointer in the allocated arguments object.
   // If we allocated a parameter map, edi will point there, otherwise to the
   // backing store.
   __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
@@ -3069,7 +3411,7 @@
 
   // Initialize parameter map. If there are no mapped arguments, we're done.
   Label skip_parameter_map;
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(zero, &skip_parameter_map);
 
   __ mov(FieldOperand(edi, FixedArray::kMapOffset),
@@ -3093,7 +3435,7 @@
   __ mov(eax, Operand(esp, 2 * kPointerSize));
   __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
   __ add(ebx, Operand(esp, 4 * kPointerSize));
-  __ sub(ebx, Operand(eax));
+  __ sub(ebx, eax);
   __ mov(ecx, FACTORY->the_hole_value());
   __ mov(edx, edi);
   __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
@@ -3110,12 +3452,12 @@
   __ jmp(&parameters_test, Label::kNear);
 
   __ bind(&parameters_loop);
-  __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+  __ sub(eax, Immediate(Smi::FromInt(1)));
   __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
   __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
-  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+  __ add(ebx, Immediate(Smi::FromInt(1)));
   __ bind(&parameters_test);
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(not_zero, &parameters_loop, Label::kNear);
   __ pop(ecx);
 
@@ -3135,18 +3477,18 @@
   Label arguments_loop, arguments_test;
   __ mov(ebx, Operand(esp, 1 * kPointerSize));
   __ mov(edx, Operand(esp, 4 * kPointerSize));
-  __ sub(Operand(edx), ebx);  // Is there a smarter way to do negative scaling?
-  __ sub(Operand(edx), ebx);
+  __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
+  __ sub(edx, ebx);
   __ jmp(&arguments_test, Label::kNear);
 
   __ bind(&arguments_loop);
-  __ sub(Operand(edx), Immediate(kPointerSize));
+  __ sub(edx, Immediate(kPointerSize));
   __ mov(eax, Operand(edx, 0));
   __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
-  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+  __ add(ebx, Immediate(Smi::FromInt(1)));
 
   __ bind(&arguments_test);
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(less, &arguments_loop, Label::kNear);
 
   // Restore.
@@ -3174,7 +3516,7 @@
   Label adaptor_frame, try_allocate, runtime;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // Get the length from the frame.
@@ -3193,11 +3535,11 @@
   // the arguments object and the elements array.
   Label add_arguments_object;
   __ bind(&try_allocate);
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(zero, &add_arguments_object, Label::kNear);
   __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
   __ bind(&add_arguments_object);
-  __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
+  __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
 
   // Do the allocation of both objects in one go.
   __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -3224,13 +3566,13 @@
 
   // If there are no actual arguments, we're done.
   Label done;
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(zero, &done, Label::kNear);
 
   // Get the parameters pointer from the stack.
   __ mov(edx, Operand(esp, 2 * kPointerSize));
 
-  // Setup the elements pointer in the allocated arguments object and
+  // Set up the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
   __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
   __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
@@ -3246,8 +3588,8 @@
   __ bind(&loop);
   __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
   __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
-  __ add(Operand(edi), Immediate(kPointerSize));
-  __ sub(Operand(edx), Immediate(kPointerSize));
+  __ add(edi, Immediate(kPointerSize));
+  __ sub(edx, Immediate(kPointerSize));
   __ dec(ecx);
   __ j(not_zero, &loop);
 
@@ -3268,10 +3610,6 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
-  if (!FLAG_regexp_entry_native) {
-    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-    return;
-  }
 
   // Stack frame on entry.
   //  esp[0]: return address
@@ -3294,7 +3632,7 @@
   ExternalReference address_of_regexp_stack_memory_size =
       ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
   __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(zero, &runtime);
 
   // Check that the first argument is a JSRegExp object.
@@ -3315,7 +3653,7 @@
   // ecx: RegExp data (FixedArray)
   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+  __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
   __ j(not_equal, &runtime);
 
   // ecx: RegExp data (FixedArray)
@@ -3325,7 +3663,7 @@
   // uses the asumption that smis are 2 * their untagged value.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(Operand(edx), Immediate(2));  // edx was a smi.
+  __ add(edx, Immediate(2));  // edx was a smi.
   // Check that the static offsets vector buffer is large enough.
   __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
   __ j(above, &runtime);
@@ -3347,7 +3685,7 @@
   // string length. A negative value will be greater (unsigned comparison).
   __ mov(eax, Operand(esp, kPreviousIndexOffset));
   __ JumpIfNotSmi(eax, &runtime);
-  __ cmp(eax, Operand(ebx));
+  __ cmp(eax, ebx);
   __ j(above_equal, &runtime);
 
   // ecx: RegExp data (FixedArray)
@@ -3367,8 +3705,8 @@
   // additional information.
   __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
   __ SmiUntag(eax);
-  __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
-  __ cmp(edx, Operand(eax));
+  __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
+  __ cmp(edx, eax);
   __ j(greater, &runtime);
 
   // Reset offset for possibly sliced string.
@@ -3380,27 +3718,40 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
   // First check for flat two byte string.
-  __ and_(ebx,
-          kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+  __ and_(ebx, kIsNotStringMask |
+               kStringRepresentationMask |
+               kStringEncodingMask |
+               kShortExternalStringMask);
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be a flat ascii string.
-  __ and_(Operand(ebx),
-          Immediate(kIsNotStringMask | kStringRepresentationMask));
+  // Any other flat string must be a flat ASCII string.  None of the following
+  // string type tests will succeed if subject is not a string or a short
+  // external string.
+  __ and_(ebx, Immediate(kIsNotStringMask |
+                         kStringRepresentationMask |
+                         kShortExternalStringMask));
   __ j(zero, &seq_ascii_string, Label::kNear);
 
+  // ebx: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, check_encoding;
+  Label cons_string, external_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ cmp(Operand(ebx), Immediate(kExternalStringTag));
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+  __ cmp(ebx, Immediate(kExternalStringTag));
   __ j(less, &cons_string);
-  __ j(equal, &runtime);
+  __ j(equal, &external_string);
+
+  // Catch non-string subject or short external string.
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+  __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
+  __ j(not_zero, &runtime);
 
   // String is sliced.
   __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
@@ -3422,16 +3773,16 @@
             kStringRepresentationMask | kStringEncodingMask);
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be ascii.
+  // Any other flat string must be sequential ASCII or external.
   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
             kStringRepresentationMask);
-  __ j(not_zero, &runtime);
+  __ j(not_zero, &external_string);
 
   __ bind(&seq_ascii_string);
-  // eax: subject string (flat ascii)
+  // eax: subject string (flat ASCII)
   // ecx: RegExp data (FixedArray)
   __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
-  __ Set(ecx, Immediate(1));  // Type is ascii.
+  __ Set(ecx, Immediate(1));  // Type is ASCII.
   __ jmp(&check_code, Label::kNear);
 
   __ bind(&seq_two_byte_string);
@@ -3448,7 +3799,7 @@
 
   // eax: subject string
   // edx: code
-  // ecx: encoding of subject string (1 if ascii, 0 if two_byte);
+  // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
   __ mov(ebx, Operand(esp, kPreviousIndexOffset));
@@ -3457,7 +3808,7 @@
   // eax: subject string
   // ebx: previous index
   // edx: code
-  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
+  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -3497,21 +3848,21 @@
   // esi: original subject string
   // eax: underlying subject string
   // ebx: previous index
-  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
+  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
   // edx: code
   // Argument 4: End of string data
   // Argument 3: Start of string data
   // Prepare start and end index of the input.
   // Load the length from the original sliced string if that is the case.
   __ mov(esi, FieldOperand(esi, String::kLengthOffset));
-  __ add(esi, Operand(edi));  // Calculate input end wrt offset.
+  __ add(esi, edi);  // Calculate input end wrt offset.
   __ SmiUntag(edi);
-  __ add(ebx, Operand(edi));  // Calculate input start wrt offset.
+  __ add(ebx, edi);  // Calculate input start wrt offset.
 
   // ebx: start index of the input string
   // esi: end index of the input string
   Label setup_two_byte, setup_rest;
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(zero, &setup_two_byte, Label::kNear);
   __ SmiUntag(esi);
   __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
@@ -3531,8 +3882,8 @@
   __ bind(&setup_rest);
 
   // Locate the code entry and call it.
-  __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
-  __ call(Operand(edx));
+  __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ call(edx);
 
   // Drop arguments and come back to JS mode.
   __ LeaveApiExitFrame();
@@ -3553,11 +3904,9 @@
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       masm->isolate());
-  __ mov(edx,
-         Operand::StaticVariable(ExternalReference::the_hole_value_location(
-             masm->isolate())));
+  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
   __ mov(eax, Operand::StaticVariable(pending_exception));
-  __ cmp(edx, Operand(eax));
+  __ cmp(edx, eax);
   __ j(equal, &runtime);
   // For exception, throw the exception again.
 
@@ -3574,11 +3923,11 @@
   __ Throw(eax);
 
   __ bind(&throw_termination_exception);
-  __ ThrowUncatchable(TERMINATION, eax);
+  __ ThrowUncatchable(eax);
 
   __ bind(&failure);
   // For failure to match, return null.
-  __ mov(Operand(eax), factory->null_value());
+  __ mov(eax, factory->null_value());
   __ ret(4 * kPointerSize);
 
   // Load RegExp data.
@@ -3589,7 +3938,7 @@
   // Calculate number of capture registers (number_of_captures + 1) * 2.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(Operand(edx), Immediate(2));  // edx was a smi.
+  __ add(edx, Immediate(2));  // edx was a smi.
 
   // edx: Number of capture registers
   // Load last_match_info which is still known to be a fast case JSArray.
@@ -3605,12 +3954,18 @@
   // Store last subject and last input.
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
-  __ mov(ecx, ebx);
-  __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
+  __ RecordWriteField(ebx,
+                      RegExpImpl::kLastSubjectOffset,
+                      eax,
+                      edi,
+                      kDontSaveFPRegs);
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
-  __ mov(ecx, ebx);
-  __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
+  __ RecordWriteField(ebx,
+                      RegExpImpl::kLastInputOffset,
+                      eax,
+                      edi,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -3624,7 +3979,7 @@
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
-  __ sub(Operand(edx), Immediate(1));
+  __ sub(edx, Immediate(1));
   __ j(negative, &done, Label::kNear);
   // Read the value from the static offsets vector buffer.
   __ mov(edi, Operand(ecx, edx, times_int_size, 0));
@@ -3642,6 +3997,27 @@
   __ mov(eax, Operand(esp, kLastMatchInfoOffset));
   __ ret(4 * kPointerSize);
 
+  // External string.  Short external strings have already been ruled out.
+  // eax: subject string (expected to be external)
+  // ebx: scratch
+  __ bind(&external_string);
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ test_b(ebx, kIsIndirectStringMask);
+    __ Assert(zero, "external string expected, but not found");
+  }
+  __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ test_b(ebx, kStringEncodingMask);
+  __ j(not_zero, &seq_ascii_string);
+  __ jmp(&seq_two_byte_string);
+
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -3655,7 +4031,7 @@
   Label done;
   __ mov(ebx, Operand(esp, kPointerSize * 3));
   __ JumpIfNotSmi(ebx, &slowcase);
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
+  __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
   __ j(above, &slowcase);
   // Smi-tagging is equivalent to multiplying by 2.
   STATIC_ASSERT(kSmiTag == 0);
@@ -3715,10 +4091,10 @@
   // ebx: Start of elements in FixedArray.
   // edx: the hole.
   Label loop;
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ bind(&loop);
   __ j(less_equal, &done, Label::kNear);  // Jump if ecx is negative or zero.
-  __ sub(Operand(ecx), Immediate(1));
+  __ sub(ecx, Immediate(1));
   __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
   __ jmp(&loop);
 
@@ -3743,16 +4119,16 @@
   Register scratch = scratch2;
 
   // Load the number string cache.
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
+  ExternalReference roots_array_start =
+      ExternalReference::roots_array_start(masm->isolate());
   __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
   __ mov(number_string_cache,
-         Operand::StaticArray(scratch, times_pointer_size, roots_address));
+         Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
   // Make the hash mask from the length of the number string cache. It
   // contains two elements (number and string) for each cache entry.
   __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
   __ shr(mask, kSmiTagSize + 1);  // Untag length and divide it by two.
-  __ sub(Operand(mask), Immediate(1));  // Make mask.
+  __ sub(mask, Immediate(1));  // Make mask.
 
   // Calculate the entry in the number string cache. The hash value in the
   // number string cache for smis is just the smi value, and the hash for
@@ -3778,7 +4154,7 @@
     __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
     __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
     // Object is heap number and hash is now in scratch. Calculate cache index.
-    __ and_(scratch, Operand(mask));
+    __ and_(scratch, mask);
     Register index = scratch;
     Register probe = mask;
     __ mov(probe,
@@ -3804,7 +4180,7 @@
 
   __ bind(&smi_hash_calculated);
   // Object is smi and hash is now in scratch. Calculate cache index.
-  __ and_(scratch, Operand(mask));
+  __ and_(scratch, mask);
   Register index = scratch;
   // Check if the entry is the smi we are looking for.
   __ cmp(object,
@@ -3856,10 +4232,10 @@
   // Compare two smis if required.
   if (include_smi_compare_) {
     Label non_smi, smi_done;
-    __ mov(ecx, Operand(edx));
-    __ or_(ecx, Operand(eax));
+    __ mov(ecx, edx);
+    __ or_(ecx, eax);
     __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
-    __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
+    __ sub(edx, eax);  // Return on the result of the subtraction.
     __ j(no_overflow, &smi_done, Label::kNear);
     __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
     __ bind(&smi_done);
@@ -3867,8 +4243,8 @@
     __ ret(0);
     __ bind(&non_smi);
   } else if (FLAG_debug_code) {
-    __ mov(ecx, Operand(edx));
-    __ or_(ecx, Operand(eax));
+    __ mov(ecx, edx);
+    __ or_(ecx, eax);
     __ test(ecx, Immediate(kSmiTagMask));
     __ Assert(not_zero, "Unexpected smi operands.");
   }
@@ -3880,7 +4256,7 @@
   // for NaN and undefined.
   {
     Label not_identical;
-    __ cmp(eax, Operand(edx));
+    __ cmp(eax, edx);
     __ j(not_equal, &not_identical);
 
     if (cc_ != equal) {
@@ -3929,7 +4305,7 @@
       __ Set(eax, Immediate(0));
       // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
       // bits.
-      __ add(edx, Operand(edx));
+      __ add(edx, edx);
       __ cmp(edx, kQuietNaNHighBitsMask << 1);
       if (cc_ == equal) {
         STATIC_ASSERT(EQUAL != 1);
@@ -3963,19 +4339,19 @@
     STATIC_ASSERT(kSmiTag == 0);
     ASSERT_EQ(0, Smi::FromInt(0));
     __ mov(ecx, Immediate(kSmiTagMask));
-    __ and_(ecx, Operand(eax));
-    __ test(ecx, Operand(edx));
+    __ and_(ecx, eax);
+    __ test(ecx, edx);
     __ j(not_zero, &not_smis, Label::kNear);
     // One operand is a smi.
 
     // Check whether the non-smi is a heap number.
     STATIC_ASSERT(kSmiTagMask == 1);
     // ecx still holds eax & kSmiTag, which is either zero or one.
-    __ sub(Operand(ecx), Immediate(0x01));
+    __ sub(ecx, Immediate(0x01));
     __ mov(ebx, edx);
-    __ xor_(ebx, Operand(eax));
-    __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
-    __ xor_(ebx, Operand(eax));
+    __ xor_(ebx, eax);
+    __ and_(ebx, ecx);  // ebx holds either 0 or eax ^ edx.
+    __ xor_(ebx, eax);
     // if eax was smi, ebx is now edx, else eax.
 
     // Check if the non-smi operand is a heap number.
@@ -4037,9 +4413,9 @@
       // Return a result of -1, 0, or 1, based on EFLAGS.
       __ mov(eax, 0);  // equal
       __ mov(ecx, Immediate(Smi::FromInt(1)));
-      __ cmov(above, eax, Operand(ecx));
+      __ cmov(above, eax, ecx);
       __ mov(ecx, Immediate(Smi::FromInt(-1)));
-      __ cmov(below, eax, Operand(ecx));
+      __ cmov(below, eax, ecx);
       __ ret(0);
     } else {
       FloatingPointHelper::CheckFloatOperands(
@@ -4100,7 +4476,7 @@
   __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
                                          &check_unequal_objects);
 
-  // Inline comparison of ascii strings.
+  // Inline comparison of ASCII strings.
   if (cc_ == equal) {
     StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      edx,
@@ -4198,43 +4574,89 @@
 }
 
 
+void InterruptStub::Generate(MacroAssembler* masm) {
+  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+  // Cache the called function in a global property cell.  Cache states
+  // are uninitialized, monomorphic (indicated by a JSFunction), and
+  // megamorphic.
+  // ebx : cache cell for call target
+  // edi : the function to call
+  Isolate* isolate = masm->isolate();
+  Label initialize, done;
+
+  // Load the cache state into ecx.
+  __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+
+  // A monomorphic cache hit or an already megamorphic state: invoke the
+  // function without changing the state.
+  __ cmp(ecx, edi);
+  __ j(equal, &done, Label::kNear);
+  __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+  __ j(equal, &done, Label::kNear);
+
+  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+  // megamorphic.
+  __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
+  __ j(equal, &initialize, Label::kNear);
+  // MegamorphicSentinel is an immortal immovable object (undefined) so no
+  // write-barrier is needed.
+  __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+         Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+  __ jmp(&done, Label::kNear);
+
+  // An uninitialized cache is patched with the function.
+  __ bind(&initialize);
+  __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
+  // No need for a write barrier here - cells are rescanned.
+
+  __ bind(&done);
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
+  // ebx : cache cell for call target
+  // edi : the function to call
+  Isolate* isolate = masm->isolate();
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
   // indicated by passing the hole as the receiver to the call
   // function stub.
   if (ReceiverMightBeImplicit()) {
-    Label call;
+    Label receiver_ok;
     // Get the receiver from the stack.
     // +1 ~ return address
     __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
     // Call as function is indicated with the hole.
-    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
-    __ j(not_equal, &call, Label::kNear);
+    __ cmp(eax, isolate->factory()->the_hole_value());
+    __ j(not_equal, &receiver_ok, Label::kNear);
     // Patch the receiver on the stack with the global receiver object.
-    __ mov(ebx, GlobalObjectOperand());
-    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
-    __ bind(&call);
+    __ mov(ecx, GlobalObjectOperand());
+    __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
+    __ bind(&receiver_ok);
   }
 
-  // Get the function to call from the stack.
-  // +2 ~ receiver, return address
-  __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
-
   // Check that the function really is a JavaScript function.
   __ JumpIfSmi(edi, &non_function);
   // Goto slow case if we do not have a function.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
   __ j(not_equal, &slow);
 
+  if (RecordCallTarget()) {
+    GenerateRecordCallTarget(masm);
+  }
+
   // Fast-case: Just invoke the function.
   ParameterCount actual(argc_);
 
   if (ReceiverMightBeImplicit()) {
     Label call_as_function;
-    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+    __ cmp(eax, isolate->factory()->the_hole_value());
     __ j(equal, &call_as_function);
     __ InvokeFunction(edi,
                       actual,
@@ -4251,6 +4673,13 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  if (RecordCallTarget()) {
+    // If there is a call target cache, mark it megamorphic in the
+    // non-function case.  MegamorphicSentinel is an immortal immovable
+    // object (undefined) so no write barrier is needed.
+    __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+           Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+  }
   // Check for function proxy.
   __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
   __ j(not_equal, &non_function);
@@ -4262,8 +4691,7 @@
   __ SetCallKind(ecx, CALL_AS_FUNCTION);
   __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
   {
-    Handle<Code> adaptor =
-      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
     __ jmp(adaptor, RelocInfo::CODE_TARGET);
   }
 
@@ -4275,19 +4703,86 @@
   __ Set(ebx, Immediate(0));
   __ SetCallKind(ecx, CALL_AS_METHOD);
   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
-  Handle<Code> adaptor =
-      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+  Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
   __ jmp(adaptor, RelocInfo::CODE_TARGET);
 }
 
 
+void CallConstructStub::Generate(MacroAssembler* masm) {
+  // eax : number of arguments
+  // ebx : cache cell for call target
+  // edi : constructor function
+  Label slow, non_function_call;
+
+  // Check that function is not a smi.
+  __ JumpIfSmi(edi, &non_function_call);
+  // Check that function is a JSFunction.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &slow);
+
+  if (RecordCallTarget()) {
+    GenerateRecordCallTarget(masm);
+  }
+
+  // Jump to the function-specific construct stub.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(ebx);
+
+  // edi: called object
+  // eax: number of arguments
+  // ecx: object map
+  Label do_call;
+  __ bind(&slow);
+  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+  __ j(not_equal, &non_function_call);
+  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
+  __ bind(&non_function_call);
+  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
+  // Set expected number of arguments to zero (not changing eax).
+  __ Set(ebx, Immediate(0));
+  Handle<Code> arguments_adaptor =
+      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+  __ SetCallKind(ecx, CALL_AS_METHOD);
+  __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
+}
+
+
 bool CEntryStub::NeedsImmovableCode() {
   return false;
 }
 
 
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
-  __ Throw(eax);
+bool CEntryStub::IsPregenerated() {
+  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+          result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  // It is important that the store buffer overflow stubs are generated first.
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  Handle<Code> code = save_doubles.GetCode();
+  code->set_is_pregenerated(true);
+  code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  Handle<Code> code = stub.GetCode();
+  code->set_is_pregenerated(true);
 }
 
 
@@ -4332,7 +4827,7 @@
   __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
   __ mov(Operand(esp, 2 * kPointerSize),
          Immediate(ExternalReference::isolate_address()));
-  __ call(Operand(ebx));
+  __ call(ebx);
   // Result is in eax or edx:eax - do not destroy these registers!
 
   if (always_allocate_scope) {
@@ -4364,8 +4859,7 @@
   // should have returned some failure value.
   if (FLAG_debug_code) {
     __ push(edx);
-    __ mov(edx, Operand::StaticVariable(
-        ExternalReference::the_hole_value_location(masm->isolate())));
+    __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
     Label okay;
     __ cmp(edx, Operand::StaticVariable(pending_exception_address));
     // Cannot use check here as it attempts to generate call into runtime.
@@ -4376,7 +4870,7 @@
   }
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame(save_doubles_);
+  __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
   __ ret(0);
 
   // Handling of failure.
@@ -4393,10 +4887,8 @@
   __ j(equal, throw_out_of_memory_exception);
 
   // Retrieve the pending exception and clear the variable.
-  ExternalReference the_hole_location =
-      ExternalReference::the_hole_value_location(masm->isolate());
   __ mov(eax, Operand::StaticVariable(pending_exception_address));
-  __ mov(edx, Operand::StaticVariable(the_hole_location));
+  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
   __ mov(Operand::StaticVariable(pending_exception_address), edx);
 
   // Special handling of termination exceptions which are uncatchable
@@ -4412,12 +4904,6 @@
 }
 
 
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
-                                          UncatchableExceptionType type) {
-  __ ThrowUncatchable(type, eax);
-}
-
-
 void CEntryStub::Generate(MacroAssembler* masm) {
   // eax: number of arguments including receiver
   // ebx: pointer to C function  (C callee-saved)
@@ -4431,7 +4917,7 @@
   // a garbage collection and retrying the builtin (twice).
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(save_doubles_);
+  __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
 
   // eax: result parameter for PerformGC, if any (setup below)
   // ebx: pointer to builtin function  (C callee-saved)
@@ -4471,23 +4957,34 @@
                true);
 
   __ bind(&throw_out_of_memory_exception);
-  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+  // Set external caught exception to false.
+  Isolate* isolate = masm->isolate();
+  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+                                    isolate);
+  __ mov(Operand::StaticVariable(external_caught), Immediate(false));
+
+  // Set pending exception and eax to out of memory exception.
+  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+                                      isolate);
+  __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+  __ mov(Operand::StaticVariable(pending_exception), eax);
+  // Fall through to the next label.
 
   __ bind(&throw_termination_exception);
-  GenerateThrowUncatchable(masm, TERMINATION);
+  __ ThrowUncatchable(eax);
 
   __ bind(&throw_normal_exception);
-  GenerateThrowTOS(masm);
+  __ Throw(eax);
 }
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  Label invoke, exit;
+  Label invoke, handler_entry, exit;
   Label not_outermost_js, not_outermost_js_2;
 
-  // Setup frame.
+  // Set up frame.
   __ push(ebp);
-  __ mov(ebp, Operand(esp));
+  __ mov(ebp, esp);
 
   // Push marker in two places.
   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
@@ -4515,38 +5012,38 @@
   __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
   __ bind(&cont);
 
-  // Call a faked try-block that does the invoke.
-  __ call(&invoke);
-
-  // Caught exception: Store result (exception) in the pending
-  // exception field in the JSEnv and return a failure sentinel.
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ jmp(&invoke);
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       masm->isolate());
   __ mov(Operand::StaticVariable(pending_exception), eax);
   __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
   __ jmp(&exit);
 
-  // Invoke: Link this frame into the handler chain.
+  // Invoke: Link this frame into the handler chain.  There's only one
+  // handler block in this code object, so its index is 0.
   __ bind(&invoke);
-  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
 
   // Clear any pending exceptions.
-  ExternalReference the_hole_location =
-      ExternalReference::the_hole_value_location(masm->isolate());
-  __ mov(edx, Operand::StaticVariable(the_hole_location));
+  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
   __ mov(Operand::StaticVariable(pending_exception), edx);
 
   // Fake a receiver (NULL).
   __ push(Immediate(0));  // receiver
 
-  // Invoke the function by calling through JS entry trampoline
-  // builtin and pop the faked function when we return. Notice that we
-  // cannot store a reference to the trampoline code directly in this
-  // stub, because the builtin stubs may not have been generated yet.
+  // Invoke the function by calling through JS entry trampoline builtin and
+  // pop the faked function when we return. Notice that we cannot store a
+  // reference to the trampoline code directly in this stub, because the
+  // builtin stubs may not have been generated yet.
   if (is_construct) {
-    ExternalReference construct_entry(
-        Builtins::kJSConstructEntryTrampoline,
-        masm->isolate());
+    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+                                      masm->isolate());
     __ mov(edx, Immediate(construct_entry));
   } else {
     ExternalReference entry(Builtins::kJSEntryTrampoline,
@@ -4555,7 +5052,7 @@
   }
   __ mov(edx, Operand(edx, 0));  // deref address
   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-  __ call(Operand(edx));
+  __ call(edx);
 
   // Unlink this frame from the handler chain.
   __ PopTryHandler();
@@ -4563,8 +5060,7 @@
   __ bind(&exit);
   // Check if the current stack frame is marked as the outermost JS frame.
   __ pop(ebx);
-  __ cmp(Operand(ebx),
-         Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   __ j(not_equal, &not_outermost_js_2);
   __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
   __ bind(&not_outermost_js_2);
@@ -4578,7 +5074,7 @@
   __ pop(ebx);
   __ pop(esi);
   __ pop(edi);
-  __ add(Operand(esp), Immediate(2 * kPointerSize));  // remove markers
+  __ add(esp, Immediate(2 * kPointerSize));  // remove markers
 
   // Restore frame pointer and return.
   __ pop(ebp);
@@ -4617,12 +5113,12 @@
   static const int kDeltaToCmpImmediate = 2;
   static const int kDeltaToMov = 8;
   static const int kDeltaToMovImmediate = 9;
-  static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81);
-  static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
+  static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
+  static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
   static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
 
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
+  ExternalReference roots_array_start =
+      ExternalReference::roots_array_start(masm->isolate());
 
   ASSERT_EQ(object.code(), InstanceofStub::left().code());
   ASSERT_EQ(function.code(), InstanceofStub::right().code());
@@ -4644,22 +5140,23 @@
     // Look up the function and the map in the instanceof cache.
     Label miss;
     __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
-    __ cmp(function,
-           Operand::StaticArray(scratch, times_pointer_size, roots_address));
+    __ cmp(function, Operand::StaticArray(scratch,
+                                          times_pointer_size,
+                                          roots_array_start));
     __ j(not_equal, &miss, Label::kNear);
     __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
     __ cmp(map, Operand::StaticArray(
-        scratch, times_pointer_size, roots_address));
+        scratch, times_pointer_size, roots_array_start));
     __ j(not_equal, &miss, Label::kNear);
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(eax, Operand::StaticArray(
-        scratch, times_pointer_size, roots_address));
+        scratch, times_pointer_size, roots_array_start));
     __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
     __ bind(&miss);
   }
 
   // Get the prototype of the function.
-  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(prototype, &slow);
@@ -4669,9 +5166,10 @@
   // map and function. The cached answer will be set when it is known below.
   if (!HasCallSiteInlineCheck()) {
   __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
-  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
+         map);
   __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
-  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
          function);
   } else {
     // The constants for the code patching are based on no push instructions
@@ -4681,12 +5179,13 @@
     __ mov(scratch, Operand(esp, 0 * kPointerSize));
     __ sub(scratch, Operand(esp, 1 * kPointerSize));
     if (FLAG_debug_code) {
-      __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1);
+      __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
       __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
-      __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2);
+      __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
       __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
     }
-    __ mov(Operand(scratch, kDeltaToCmpImmediate), map);
+    __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
+    __ mov(Operand(scratch, 0), map);
   }
 
   // Loop through the prototype chain of the object looking for the function
@@ -4694,10 +5193,10 @@
   __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
   Label loop, is_instance, is_not_instance;
   __ bind(&loop);
-  __ cmp(scratch, Operand(prototype));
+  __ cmp(scratch, prototype);
   __ j(equal, &is_instance, Label::kNear);
   Factory* factory = masm->isolate()->factory();
-  __ cmp(Operand(scratch), Immediate(factory->null_value()));
+  __ cmp(scratch, Immediate(factory->null_value()));
   __ j(equal, &is_not_instance, Label::kNear);
   __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
   __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
@@ -4708,7 +5207,7 @@
     __ Set(eax, Immediate(0));
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(Operand::StaticArray(scratch,
-                                times_pointer_size, roots_address), eax);
+                                times_pointer_size, roots_array_start), eax);
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->true_value());
@@ -4730,7 +5229,7 @@
     __ Set(eax, Immediate(Smi::FromInt(1)));
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(Operand::StaticArray(
-        scratch, times_pointer_size, roots_address), eax);
+        scratch, times_pointer_size, roots_array_start), eax);
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->false_value());
@@ -4788,13 +5287,14 @@
     __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
     // Call the builtin and convert 0/1 to true/false.
-    __ EnterInternalFrame();
-    __ push(object);
-    __ push(function);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(object);
+      __ push(function);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
     Label true_value, done;
-    __ test(eax, Operand(eax));
+    __ test(eax, eax);
     __ j(zero, &true_value, Label::kNear);
     __ mov(eax, factory->false_value());
     __ jmp(&done, Label::kNear);
@@ -4854,11 +5354,6 @@
 // StringCharCodeAtGenerator
 
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
-  Label flat_string;
-  Label ascii_string;
-  Label got_char_code;
-  Label sliced_string;
-
   // If the receiver is a smi trigger the non-string case.
   STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfSmi(object_, receiver_not_string_);
@@ -4873,85 +5368,26 @@
   // If the index is non-smi trigger the non-smi case.
   STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfNotSmi(index_, &index_not_smi_);
-
-  // Put smi-tagged index into scratch register.
-  __ mov(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
-  __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
+  __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
   __ j(above_equal, index_out_of_range_);
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result_, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string);
+  __ SmiUntag(index_);
 
-  // Handle non-flat strings.
-  __ and_(result_, kStringRepresentationMask);
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ cmp(result_, kExternalStringTag);
-  __ j(greater, &sliced_string, Label::kNear);
-  __ j(equal, &call_runtime_);
+  Factory* factory = masm->isolate()->factory();
+  StringCharLoadGenerator::Generate(
+      masm, factory, object_, index_, result_, &call_runtime_);
 
-  // ConsString.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  Label assure_seq_string;
-  __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
-         Immediate(masm->isolate()->factory()->empty_string()));
-  __ j(not_equal, &call_runtime_);
-  // Get the first of the two strings and load its instance type.
-  __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
-  __ jmp(&assure_seq_string, Label::kNear);
-
-  // SlicedString, unpack and add offset.
-  __ bind(&sliced_string);
-  __ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
-  __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
-
-  // Assure that we are dealing with a sequential string. Go to runtime if not.
-  __ bind(&assure_seq_string);
-  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
-  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result_, Immediate(kStringRepresentationMask));
-  __ j(not_zero, &call_runtime_);
-  __ jmp(&flat_string, Label::kNear);
-
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ test(result_, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string, Label::kNear);
-
-  // 2-byte string.
-  // Load the 2-byte character code into the result register.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ movzx_w(result_, FieldOperand(object_,
-                                   scratch_, times_1,  // Scratch is smi-tagged.
-                                   SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code, Label::kNear);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ SmiUntag(scratch_);
-  __ movzx_b(result_, FieldOperand(object_,
-                                   scratch_, times_1,
-                                   SeqAsciiString::kHeaderSize));
-  __ bind(&got_char_code);
   __ SmiTag(result_);
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
@@ -4963,7 +5399,6 @@
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ push(object_);
-  __ push(index_);
   __ push(index_);  // Consumed by runtime conversion function.
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4972,12 +5407,11 @@
     // NumberToSmi discards numbers that are not exact integers.
     __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
-  if (!scratch_.is(eax)) {
+  if (!index_.is(eax)) {
     // Save the conversion result before the pop instructions below
     // have a chance to overwrite it.
-    __ mov(scratch_, eax);
+    __ mov(index_, eax);
   }
-  __ pop(index_);
   __ pop(object_);
   // Reload the instance type.
   __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -4985,7 +5419,7 @@
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
   STATIC_ASSERT(kSmiTag == 0);
-  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  __ JumpIfNotSmi(index_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -4995,6 +5429,7 @@
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
   __ push(object_);
+  __ SmiTag(index_);
   __ push(index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   if (!result_.is(eax)) {
@@ -5025,7 +5460,7 @@
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize == 1);
   STATIC_ASSERT(kSmiShiftSize == 0);
-  // At this point code register contains smi tagged ascii char code.
+  // At this point code register contains smi tagged ASCII char code.
   __ mov(result_, FieldOperand(result_,
                                code_, times_half_pointer_size,
                                FixedArray::kHeaderSize));
@@ -5036,7 +5471,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5063,14 +5499,15 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  Label string_add_runtime, call_builtin;
+  Label call_runtime, call_builtin;
   Builtins::JavaScript builtin_id = Builtins::ADD;
 
   // Load the two arguments.
@@ -5079,14 +5516,14 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (flags_ == NO_STRING_ADD_FLAGS) {
-    __ JumpIfSmi(eax, &string_add_runtime);
+    __ JumpIfSmi(eax, &call_runtime);
     __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
-    __ j(above_equal, &string_add_runtime);
+    __ j(above_equal, &call_runtime);
 
     // First argument is a a string, test second.
-    __ JumpIfSmi(edx, &string_add_runtime);
+    __ JumpIfSmi(edx, &call_runtime);
     __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
-    __ j(above_equal, &string_add_runtime);
+    __ j(above_equal, &call_runtime);
   } else {
     // Here at least one of the arguments is definitely a string.
     // We convert the one that is not known to be a string.
@@ -5110,7 +5547,7 @@
   Label second_not_zero_length, both_not_zero_length;
   __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(not_zero, &second_not_zero_length, Label::kNear);
   // Second string is empty, result is first string which is already in eax.
   Counters* counters = masm->isolate()->counters();
@@ -5119,7 +5556,7 @@
   __ bind(&second_not_zero_length);
   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(not_zero, &both_not_zero_length, Label::kNear);
   // First string is empty, result is second string which is in edx.
   __ mov(eax, edx);
@@ -5134,18 +5571,17 @@
   // Look at the length of the result of adding the two strings.
   Label string_add_flat_result, longer_than_two;
   __ bind(&both_not_zero_length);
-  __ add(ebx, Operand(ecx));
+  __ add(ebx, ecx);
   STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
   // Handle exceptionally long strings in the runtime system.
-  __ j(overflow, &string_add_runtime);
+  __ j(overflow, &call_runtime);
   // Use the symbol table when adding two one character strings, as it
   // helps later optimizations to return a symbol here.
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
+  __ cmp(ebx, Immediate(Smi::FromInt(2)));
   __ j(not_equal, &longer_than_two);
 
-  // Check that both strings are non-external ascii strings.
-  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
-                                         &string_add_runtime);
+  // Check that both strings are non-external ASCII strings.
+  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
 
   // Get the two characters forming the new string.
   __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
@@ -5170,14 +5606,10 @@
   __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
   __ bind(&make_two_character_string_no_reload);
   __ IncrementCounter(counters->string_add_make_two_char(), 1);
-  __ AllocateAsciiString(eax,  // Result.
-                         2,    // Length.
-                         edi,  // Scratch 1.
-                         edx,  // Scratch 2.
-                         &string_add_runtime);
+  __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
   // Pack both characters in ebx.
   __ shl(ecx, kBitsPerByte);
-  __ or_(ebx, Operand(ecx));
+  __ or_(ebx, ecx);
   // Set the characters in the new string.
   __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -5185,24 +5617,24 @@
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+  __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
   __ j(below, &string_add_flat_result);
 
   // If result is not supposed to be flat allocate a cons string object. If both
-  // strings are ascii the result is an ascii cons string.
+  // strings are ASCII the result is an ASCII cons string.
   Label non_ascii, allocated, ascii_data;
   __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
-  __ and_(ecx, Operand(edi));
+  __ and_(ecx, edi);
   STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ test(ecx, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii);
   __ bind(&ascii_data);
-  // Allocate an acsii cons string.
-  __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
+  // Allocate an ASCII cons string.
+  __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
   if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
@@ -5216,77 +5648,106 @@
   __ ret(2 * kPointerSize);
   __ bind(&non_ascii);
   // At least one of the strings is two-byte. Check whether it happens
-  // to contain only ascii characters.
+  // to contain only ASCII characters.
   // ecx: first instance type AND second instance type.
   // edi: second instance type.
   __ test(ecx, Immediate(kAsciiDataHintMask));
   __ j(not_zero, &ascii_data);
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ xor_(edi, Operand(ecx));
+  __ xor_(edi, ecx);
   STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
   __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
   __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
   __ j(equal, &ascii_data);
   // Allocate a two byte cons string.
-  __ AllocateTwoByteConsString(ecx, edi, no_reg, &string_add_runtime);
+  __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
   __ jmp(&allocated);
 
-  // Handle creating a flat result. First check that both strings are not
-  // external strings.
+  // We cannot encounter sliced strings or cons strings here since:
+  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
+  // Handle creating a flat result from either external or sequential strings.
+  // Locate the first characters' locations.
   // eax: first string
   // ebx: length of resulting flat string as a smi
   // edx: second string
+  Label first_prepared, second_prepared;
+  Label first_is_sequential, second_is_sequential;
   __ bind(&string_add_flat_result);
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ and_(ecx, kStringRepresentationMask);
-  __ cmp(ecx, kExternalStringTag);
-  __ j(equal, &string_add_runtime);
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ and_(ecx, kStringRepresentationMask);
-  __ cmp(ecx, kExternalStringTag);
-  __ j(equal, &string_add_runtime);
-  // We cannot encounter sliced strings here since:
-  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
-  // Now check if both strings are ascii strings.
-  // eax: first string
-  // ebx: length of resulting flat string as a smi
-  // edx: second string
-  Label non_ascii_string_add_flat_result;
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
-  __ j(zero, &non_ascii_string_add_flat_result);
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
-  __ j(zero, &string_add_runtime);
+  // ecx: instance type of first string
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ test_b(ecx, kStringRepresentationMask);
+  __ j(zero, &first_is_sequential, Label::kNear);
+  // Rule out short external string and load string resource.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ test_b(ecx, kShortExternalStringMask);
+  __ j(not_zero, &call_runtime);
+  __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ jmp(&first_prepared, Label::kNear);
+  __ bind(&first_is_sequential);
+  __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ bind(&first_prepared);
 
-  // Both strings are ascii strings.  As they are short they are both flat.
+  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+  __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
+  // Check whether both strings have same encoding.
+  // edi: instance type of second string
+  __ xor_(ecx, edi);
+  __ test_b(ecx, kStringEncodingMask);
+  __ j(not_zero, &call_runtime);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ test_b(edi, kStringRepresentationMask);
+  __ j(zero, &second_is_sequential, Label::kNear);
+  // Rule out short external string and load string resource.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ test_b(edi, kShortExternalStringMask);
+  __ j(not_zero, &call_runtime);
+  __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ jmp(&second_prepared, Label::kNear);
+  __ bind(&second_is_sequential);
+  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ bind(&second_prepared);
+
+  // Push the addresses of both strings' first characters onto the stack.
+  __ push(edx);
+  __ push(eax);
+
+  Label non_ascii_string_add_flat_result, call_runtime_drop_two;
+  // edi: instance type of second string
+  // First string and second string have the same encoding.
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ test_b(edi, kStringEncodingMask);
+  __ j(zero, &non_ascii_string_add_flat_result);
+
+  // Both strings are ASCII strings.
   // ebx: length of resulting flat string as a smi
   __ SmiUntag(ebx);
-  __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+  __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
   // eax: result string
   __ mov(ecx, eax);
   // Locate first character of result.
-  __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // Load first argument and locate first character.
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
+  __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Load first argument's length and first character location.  Account for
+  // values currently on the stack when fetching arguments from it.
+  __ mov(edx, Operand(esp, 4 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ pop(edx);
   // eax: result string
   // ecx: first character of result
   // edx: first char of first argument
   // edi: length of first argument
   StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
-  // Load second argument and locate first character.
-  __ mov(edx, Operand(esp, 1 * kPointerSize));
+  // Load second argument's length and first character location.  Account for
+  // values currently on the stack when fetching arguments from it.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ pop(edx);
   // eax: result string
   // ecx: next character of result
   // edx: first char of second argument
@@ -5300,34 +5761,30 @@
   // ebx: length of resulting flat string as a smi
   // edx: second string
   __ bind(&non_ascii_string_add_flat_result);
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
-  __ j(not_zero, &string_add_runtime);
-  // Both strings are two byte strings. As they are short they are both
-  // flat.
+  // Both strings are two byte strings.
   __ SmiUntag(ebx);
-  __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+  __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
   // eax: result string
   __ mov(ecx, eax);
   // Locate first character of result.
-  __ add(Operand(ecx),
-         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // Load first argument and locate first character.
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
+  __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Load second argument's length and first character location.  Account for
+  // values currently on the stack when fetching arguments from it.
+  __ mov(edx, Operand(esp, 4 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx),
-         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ pop(edx);
   // eax: result string
   // ecx: first character of result
   // edx: first char of first argument
   // edi: length of first argument
   StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
-  // Load second argument and locate first character.
-  __ mov(edx, Operand(esp, 1 * kPointerSize));
+  // Load second argument's length and first character location.  Account for
+  // values currently on the stack when fetching arguments from it.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ pop(edx);
   // eax: result string
   // ecx: next character of result
   // edx: first char of second argument
@@ -5336,8 +5793,11 @@
   __ IncrementCounter(counters->string_add_native(), 1);
   __ ret(2 * kPointerSize);
 
+  // Recover stack pointer before jumping to runtime.
+  __ bind(&call_runtime_drop_two);
+  __ Drop(2);
   // Just jump to runtime to add the two strings.
-  __ bind(&string_add_runtime);
+  __ bind(&call_runtime);
   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 
   if (call_builtin.is_linked()) {
@@ -5403,15 +5863,15 @@
   if (ascii) {
     __ mov_b(scratch, Operand(src, 0));
     __ mov_b(Operand(dest, 0), scratch);
-    __ add(Operand(src), Immediate(1));
-    __ add(Operand(dest), Immediate(1));
+    __ add(src, Immediate(1));
+    __ add(dest, Immediate(1));
   } else {
     __ mov_w(scratch, Operand(src, 0));
     __ mov_w(Operand(dest, 0), scratch);
-    __ add(Operand(src), Immediate(2));
-    __ add(Operand(dest), Immediate(2));
+    __ add(src, Immediate(2));
+    __ add(dest, Immediate(2));
   }
-  __ sub(Operand(count), Immediate(1));
+  __ sub(count, Immediate(1));
   __ j(not_zero, &loop);
 }
 
@@ -5434,7 +5894,7 @@
 
   // Nothing to do for zero characters.
   Label done;
-  __ test(count, Operand(count));
+  __ test(count, count);
   __ j(zero, &done);
 
   // Make count the number of bytes to copy.
@@ -5459,7 +5919,7 @@
 
   // Check if there are more bytes to copy.
   __ bind(&last_bytes);
-  __ test(count, Operand(count));
+  __ test(count, count);
   __ j(zero, &done);
 
   // Copy remaining characters.
@@ -5467,9 +5927,9 @@
   __ bind(&loop);
   __ mov_b(scratch, Operand(src, 0));
   __ mov_b(Operand(dest, 0), scratch);
-  __ add(Operand(src), Immediate(1));
-  __ add(Operand(dest), Immediate(1));
-  __ sub(Operand(count), Immediate(1));
+  __ add(src, Immediate(1));
+  __ add(dest, Immediate(1));
+  __ sub(count, Immediate(1));
   __ j(not_zero, &loop);
 
   __ bind(&done);
@@ -5491,12 +5951,12 @@
   // different hash algorithm. Don't try to look for these in the symbol table.
   Label not_array_index;
   __ mov(scratch, c1);
-  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
-  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+  __ sub(scratch, Immediate(static_cast<int>('0')));
+  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
   __ j(above, &not_array_index, Label::kNear);
   __ mov(scratch, c2);
-  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
-  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+  __ sub(scratch, Immediate(static_cast<int>('0')));
+  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
   __ j(below_equal, not_probed);
 
   __ bind(&not_array_index);
@@ -5509,24 +5969,24 @@
   // Collect the two characters in a register.
   Register chars = c1;
   __ shl(c2, kBitsPerByte);
-  __ or_(chars, Operand(c2));
+  __ or_(chars, c2);
 
   // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
   // hash:  hash of two character string.
 
   // Load the symbol table.
   Register symbol_table = c2;
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
+  ExternalReference roots_array_start =
+      ExternalReference::roots_array_start(masm->isolate());
   __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
   __ mov(symbol_table,
-         Operand::StaticArray(scratch, times_pointer_size, roots_address));
+         Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
 
   // Calculate capacity mask from the symbol table capacity.
   Register mask = scratch2;
   __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
   __ SmiUntag(mask);
-  __ sub(Operand(mask), Immediate(1));
+  __ sub(mask, Immediate(1));
 
   // Registers
   // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
@@ -5544,9 +6004,9 @@
     // Calculate entry in symbol table.
     __ mov(scratch, hash);
     if (i > 0) {
-      __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+      __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
     }
-    __ and_(scratch, Operand(mask));
+    __ and_(scratch, mask);
 
     // Load the entry from the symbol table.
     STATIC_ASSERT(SymbolTable::kEntrySize == 1);
@@ -5560,7 +6020,7 @@
     Factory* factory = masm->isolate()->factory();
     __ cmp(candidate, factory->undefined_value());
     __ j(equal, not_found);
-    __ cmp(candidate, factory->null_value());
+    __ cmp(candidate, factory->the_hole_value());
     __ j(equal, &next_probe[i]);
 
     // If length is not 2 the string is not a candidate.
@@ -5573,7 +6033,7 @@
     __ push(mask);
     Register temp = mask;
 
-    // Check that the candidate is a non-external ascii string.
+    // Check that the candidate is a non-external ASCII string.
     __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
     __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
     __ JumpIfInstanceTypeIsNotSequentialAscii(
@@ -5582,7 +6042,7 @@
     // Check if the two characters match.
     __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
     __ and_(temp, 0x0000ffff);
-    __ cmp(chars, Operand(temp));
+    __ cmp(chars, temp);
     __ j(equal, &found_in_symbol_table);
     __ bind(&next_probe_pop_mask[i]);
     __ pop(mask);
@@ -5608,17 +6068,17 @@
                                     Register scratch) {
   // hash = (seed + character) + ((seed + character) << 10);
   if (Serializer::enabled()) {
-    ExternalReference roots_address =
-        ExternalReference::roots_address(masm->isolate());
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(masm->isolate());
     __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
     __ mov(scratch, Operand::StaticArray(scratch,
                                          times_pointer_size,
-                                         roots_address));
+                                         roots_array_start));
     __ SmiUntag(scratch);
-    __ add(scratch, Operand(character));
+    __ add(scratch, character);
     __ mov(hash, scratch);
     __ shl(scratch, 10);
-    __ add(hash, Operand(scratch));
+    __ add(hash, scratch);
   } else {
     int32_t seed = masm->isolate()->heap()->HashSeed();
     __ lea(scratch, Operand(character, seed));
@@ -5628,7 +6088,7 @@
   // hash ^= hash >> 6;
   __ mov(scratch, hash);
   __ shr(scratch, 6);
-  __ xor_(hash, Operand(scratch));
+  __ xor_(hash, scratch);
 }
 
 
@@ -5637,15 +6097,15 @@
                                             Register character,
                                             Register scratch) {
   // hash += character;
-  __ add(hash, Operand(character));
+  __ add(hash, character);
   // hash += hash << 10;
   __ mov(scratch, hash);
   __ shl(scratch, 10);
-  __ add(hash, Operand(scratch));
+  __ add(hash, scratch);
   // hash ^= hash >> 6;
   __ mov(scratch, hash);
   __ shr(scratch, 6);
-  __ xor_(hash, Operand(scratch));
+  __ xor_(hash, scratch);
 }
 
 
@@ -5655,15 +6115,15 @@
   // hash += hash << 3;
   __ mov(scratch, hash);
   __ shl(scratch, 3);
-  __ add(hash, Operand(scratch));
+  __ add(hash, scratch);
   // hash ^= hash >> 11;
   __ mov(scratch, hash);
   __ shr(scratch, 11);
-  __ xor_(hash, Operand(scratch));
+  __ xor_(hash, scratch);
   // hash += hash << 15;
   __ mov(scratch, hash);
   __ shl(scratch, 15);
-  __ add(hash, Operand(scratch));
+  __ add(hash, scratch);
 
   __ and_(hash, String::kHashBitMask);
 
@@ -5695,100 +6155,70 @@
   // ebx: instance type
 
   // Calculate length of sub string using the smi values.
-  Label result_longer_than_two;
   __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
   __ JumpIfNotSmi(ecx, &runtime);
   __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
   __ JumpIfNotSmi(edx, &runtime);
-  __ sub(ecx, Operand(edx));
+  __ sub(ecx, edx);
   __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
-  Label return_eax;
-  __ j(equal, &return_eax);
-  // Special handling of sub-strings of length 1 and 2. One character strings
-  // are handled in the runtime system (looked up in the single character
-  // cache). Two character strings are looked for in the symbol cache.
-  __ SmiUntag(ecx);  // Result length is no longer smi.
-  __ cmp(ecx, 2);
-  __ j(greater, &result_longer_than_two);
-  __ j(less, &runtime);
+  Label not_original_string;
+  __ j(not_equal, &not_original_string, Label::kNear);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->sub_string_native(), 1);
+  __ ret(3 * kPointerSize);
+  __ bind(&not_original_string);
 
-  // Sub string of length 2 requested.
   // eax: string
   // ebx: instance type
-  // ecx: sub string length (value is 2)
+  // ecx: sub string length (smi)
   // edx: from index (smi)
-  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
+  // Deal with different string types: update the index if necessary
+  // and put the underlying string into edi.
+  Label underlying_unpacked, sliced_string, seq_or_external_string;
+  // If the string is not indirect, it can only be sequential or external.
+  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+  STATIC_ASSERT(kIsIndirectStringMask != 0);
+  __ test(ebx, Immediate(kIsIndirectStringMask));
+  __ j(zero, &seq_or_external_string, Label::kNear);
 
-  // Get the two characters forming the sub string.
-  __ SmiUntag(edx);  // From index is no longer smi.
-  __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
-  __ movzx_b(ecx,
-             FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
-
-  // Try to lookup two character string in symbol table.
-  Label make_two_character_string;
-  StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, ebx, ecx, eax, edx, edi,
-      &make_two_character_string, &make_two_character_string);
-  __ ret(3 * kPointerSize);
-
-  __ bind(&make_two_character_string);
-  // Setup registers for allocating the two character string.
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  Factory* factory = masm->isolate()->factory();
+  __ test(ebx, Immediate(kSlicedNotConsMask));
+  __ j(not_zero, &sliced_string, Label::kNear);
+  // Cons string.  Check whether it is flat, then fetch first part.
+  // Flat cons strings have an empty second part.
+  __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
+         factory->empty_string());
+  __ j(not_equal, &runtime);
+  __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
+  // Update instance type.
+  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ Set(ecx, Immediate(2));
+  __ jmp(&underlying_unpacked, Label::kNear);
+
+  __ bind(&sliced_string);
+  // Sliced string.  Fetch parent and adjust start index by offset.
+  __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
+  __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
+  // Update instance type.
+  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ jmp(&underlying_unpacked, Label::kNear);
+
+  __ bind(&seq_or_external_string);
+  // Sequential or external string.  Just move string to the expected register.
+  __ mov(edi, eax);
+
+  __ bind(&underlying_unpacked);
 
   if (FLAG_string_slices) {
     Label copy_routine;
-    // If coming from the make_two_character_string path, the string
-    // is too short to be sliced anyways.
-    STATIC_ASSERT(2 < SlicedString::kMinLength);
-    __ jmp(&copy_routine);
-    __ bind(&result_longer_than_two);
-
-    // eax: string
-    // ebx: instance type
-    // ecx: sub string length
-    // edx: from index (smi)
-    Label allocate_slice, sliced_string, seq_string;
-    __ cmp(ecx, SlicedString::kMinLength);
+    // edi: underlying subject string
+    // ebx: instance type of underlying subject string
+    // edx: adjusted start index (smi)
+    // ecx: length (smi)
+    __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
     // Short slice.  Copy instead of slicing.
     __ j(less, &copy_routine);
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ test(ebx, Immediate(kStringRepresentationMask));
-    __ j(zero, &seq_string, Label::kNear);
-    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-    STATIC_ASSERT(kIsIndirectStringMask != 0);
-    __ test(ebx, Immediate(kIsIndirectStringMask));
-    // External string.  Jump to runtime.
-    __ j(zero, &runtime);
-
-    Factory* factory = masm->isolate()->factory();
-    __ test(ebx, Immediate(kSlicedNotConsMask));
-    __ j(not_zero, &sliced_string, Label::kNear);
-    // Cons string.  Check whether it is flat, then fetch first part.
-    __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
-           factory->empty_string());
-    __ j(not_equal, &runtime);
-    __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
-    __ jmp(&allocate_slice, Label::kNear);
-
-    __ bind(&sliced_string);
-    // Sliced string.  Fetch parent and correct start index by offset.
-    __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
-    __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
-    __ jmp(&allocate_slice, Label::kNear);
-
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
-    __ mov(edi, eax);
-
-    __ bind(&allocate_slice);
-    // edi: underlying subject string
-    // ebx: instance type of original subject string
-    // edx: offset
-    // ecx: length
     // Allocate new sliced string.  At this point we do not reload the instance
     // type including the string encoding because we simply rely on the info
     // provided by the original string.  It does not matter if the original
@@ -5804,41 +6234,62 @@
     __ bind(&two_byte_slice);
     __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
     __ bind(&set_slice_header);
-    __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
-    __ SmiTag(ecx);
     __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
-    __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
     __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
            Immediate(String::kEmptyHashField));
-    __ jmp(&return_eax);
+    __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
+    __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
+    __ IncrementCounter(counters->sub_string_native(), 1);
+    __ ret(3 * kPointerSize);
 
     __ bind(&copy_routine);
-  } else {
-    __ bind(&result_longer_than_two);
   }
 
-  // eax: string
-  // ebx: instance type
-  // ecx: result string length
-  // Check for flat ascii string
-  Label non_ascii_flat;
-  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
+  // edi: underlying subject string
+  // ebx: instance type of underlying subject string
+  // edx: adjusted start index (smi)
+  // ecx: length (smi)
+  // The subject string can only be external or sequential string of either
+  // encoding at this point.
+  Label two_byte_sequential, runtime_drop_two, sequential_string;
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ test_b(ebx, kExternalStringTag);
+  __ j(zero, &sequential_string);
 
-  // Allocate the result.
-  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
+  // Handle external string.
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ test_b(ebx, kShortExternalStringMask);
+  __ j(not_zero, &runtime);
+  __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  __ bind(&sequential_string);
+  // Stash away (adjusted) index and (underlying) string.
+  __ push(edx);
+  __ push(edi);
+  __ SmiUntag(ecx);
+  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+  __ test_b(ebx, kStringEncodingMask);
+  __ j(zero, &two_byte_sequential);
+
+  // Sequential ASCII string.  Allocate the result.
+  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
 
   // eax: result string
   // ecx: result string length
   __ mov(edx, esi);  // esi used by following code.
   // Locate first character of result.
   __ mov(edi, eax);
-  __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
-  __ mov(esi, Operand(esp, 3 * kPointerSize));
-  __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
+  __ pop(esi);
+  __ pop(ebx);
   __ SmiUntag(ebx);
-  __ add(esi, Operand(ebx));
+  __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
 
   // eax: result string
   // ecx: result length
@@ -5847,38 +6298,28 @@
   // esi: character of sub string start
   StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
   __ mov(esi, edx);  // Restore esi.
-  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(3 * kPointerSize);
 
-  __ bind(&non_ascii_flat);
-  // eax: string
-  // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
-  // ecx: result string length
-  // Check for flat two byte string
-  __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
-  __ j(not_equal, &runtime);
-
-  // Allocate the result.
-  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
+  __ bind(&two_byte_sequential);
+  // Sequential two-byte string.  Allocate the result.
+  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
 
   // eax: result string
   // ecx: result string length
   __ mov(edx, esi);  // esi used by following code.
   // Locate first character of result.
   __ mov(edi, eax);
-  __ add(Operand(edi),
+  __ add(edi,
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
-  __ mov(esi, Operand(esp, 3 * kPointerSize));
-  __ add(Operand(esi),
-         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
+  __ pop(esi);
+  __ pop(ebx);
   // As from is a smi it is 2 times the value which matches the size of a two
   // byte character.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(esi, Operand(ebx));
+  __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
 
   // eax: result string
   // ecx: result length
@@ -5887,11 +6328,13 @@
   // esi: character of sub string start
   StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
   __ mov(esi, edx);  // Restore esi.
-
-  __ bind(&return_eax);
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(3 * kPointerSize);
 
+  // Drop pushed values on the stack before tail call.
+  __ bind(&runtime_drop_two);
+  __ Drop(2);
+
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kSubString, 3, 1);
@@ -5918,7 +6361,7 @@
   Label compare_chars;
   __ bind(&check_zero_length);
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(length, Operand(length));
+  __ test(length, length);
   __ j(not_zero, &compare_chars, Label::kNear);
   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
   __ ret(0);
@@ -5953,14 +6396,14 @@
 
   __ j(less_equal, &left_shorter, Label::kNear);
   // Right string is shorter. Change scratch1 to be length of right string.
-  __ sub(scratch1, Operand(length_delta));
+  __ sub(scratch1, length_delta);
   __ bind(&left_shorter);
 
   Register min_length = scratch1;
 
   // If either length is zero, just compare lengths.
   Label compare_lengths;
-  __ test(min_length, Operand(min_length));
+  __ test(min_length, min_length);
   __ j(zero, &compare_lengths, Label::kNear);
 
   // Compare characters.
@@ -5970,7 +6413,7 @@
 
   // Compare lengths -  strings up to min-length are equal.
   __ bind(&compare_lengths);
-  __ test(length_delta, Operand(length_delta));
+  __ test(length_delta, length_delta);
   __ j(not_zero, &result_not_equal, Label::kNear);
 
   // Result is EQUAL.
@@ -6019,7 +6462,7 @@
   __ mov_b(scratch, Operand(left, index, times_1, 0));
   __ cmpb(scratch, Operand(right, index, times_1, 0));
   __ j(not_equal, chars_not_equal, chars_not_equal_near);
-  __ add(Operand(index), Immediate(1));
+  __ inc(index);
   __ j(not_zero, &loop);
 }
 
@@ -6036,7 +6479,7 @@
   __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
 
   Label not_same;
-  __ cmp(edx, Operand(eax));
+  __ cmp(edx, eax);
   __ j(not_equal, &not_same, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -6046,13 +6489,13 @@
 
   __ bind(&not_same);
 
-  // Check that both objects are sequential ascii strings.
+  // Check that both objects are sequential ASCII strings.
   __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
 
-  // Compare flat ascii strings.
+  // Compare flat ASCII strings.
   // Drop arguments from the stack.
   __ pop(ecx);
-  __ add(Operand(esp), Immediate(2 * kPointerSize));
+  __ add(esp, Immediate(2 * kPointerSize));
   __ push(ecx);
   GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
 
@@ -6066,16 +6509,16 @@
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
   Label miss;
-  __ mov(ecx, Operand(edx));
-  __ or_(ecx, Operand(eax));
+  __ mov(ecx, edx);
+  __ or_(ecx, eax);
   __ JumpIfNotSmi(ecx, &miss, Label::kNear);
 
   if (GetCondition() == equal) {
     // For equality we do not care about the sign of the result.
-    __ sub(eax, Operand(edx));
+    __ sub(eax, edx);
   } else {
     Label done;
-    __ sub(edx, Operand(eax));
+    __ sub(edx, eax);
     __ j(no_overflow, &done, Label::kNear);
     // Correct sign of result in case of overflow.
     __ not_(edx);
@@ -6093,16 +6536,16 @@
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
   Label generic_stub;
-  Label unordered;
+  Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss;
-  __ mov(ecx, Operand(edx));
-  __ and_(ecx, Operand(eax));
+  __ mov(ecx, edx);
+  __ and_(ecx, eax);
   __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
 
   __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
-  __ j(not_equal, &miss, Label::kNear);
+  __ j(not_equal, &maybe_undefined1, Label::kNear);
   __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
-  __ j(not_equal, &miss, Label::kNear);
+  __ j(not_equal, &maybe_undefined2, Label::kNear);
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or SS2 or CMOV is unsupported.
@@ -6124,18 +6567,32 @@
     // Performing mov, because xor would destroy the flag register.
     __ mov(eax, 0);  // equal
     __ mov(ecx, Immediate(Smi::FromInt(1)));
-    __ cmov(above, eax, Operand(ecx));
+    __ cmov(above, eax, ecx);
     __ mov(ecx, Immediate(Smi::FromInt(-1)));
-    __ cmov(below, eax, Operand(ecx));
+    __ cmov(below, eax, ecx);
     __ ret(0);
-
-    __ bind(&unordered);
   }
 
+  __ bind(&unordered);
   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
   __ bind(&generic_stub);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
+  __ bind(&maybe_undefined1);
+  if (Token::IsOrderedRelationalCompareOp(op_)) {
+    __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+    __ j(not_equal, &miss);
+    __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
+    __ j(not_equal, &maybe_undefined2, Label::kNear);
+    __ jmp(&unordered);
+  }
+
+  __ bind(&maybe_undefined2);
+  if (Token::IsOrderedRelationalCompareOp(op_)) {
+    __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
+    __ j(equal, &unordered);
+  }
+
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -6153,9 +6610,9 @@
 
   // Check that both operands are heap objects.
   Label miss;
-  __ mov(tmp1, Operand(left));
+  __ mov(tmp1, left);
   STATIC_ASSERT(kSmiTag == 0);
-  __ and_(tmp1, Operand(right));
+  __ and_(tmp1, right);
   __ JumpIfSmi(tmp1, &miss, Label::kNear);
 
   // Check that both operands are symbols.
@@ -6164,13 +6621,13 @@
   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSymbolTag != 0);
-  __ and_(tmp1, Operand(tmp2));
+  __ and_(tmp1, tmp2);
   __ test(tmp1, Immediate(kIsSymbolMask));
   __ j(zero, &miss, Label::kNear);
 
   // Symbols are compared by identity.
   Label done;
-  __ cmp(left, Operand(right));
+  __ cmp(left, right);
   // Make sure eax is non-zero. At this point input operands are
   // guaranteed to be non-zero.
   ASSERT(right.is(eax));
@@ -6188,9 +6645,10 @@
 
 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::STRINGS);
-  ASSERT(GetCondition() == equal);
   Label miss;
 
+  bool equality = Token::IsEqualityOp(op_);
+
   // Registers containing left and right operands respectively.
   Register left = edx;
   Register right = eax;
@@ -6199,9 +6657,9 @@
   Register tmp3 = edi;
 
   // Check that both operands are heap objects.
-  __ mov(tmp1, Operand(left));
+  __ mov(tmp1, left);
   STATIC_ASSERT(kSmiTag == 0);
-  __ and_(tmp1, Operand(right));
+  __ and_(tmp1, right);
   __ JumpIfSmi(tmp1, &miss);
 
   // Check that both operands are strings. This leaves the instance
@@ -6212,13 +6670,13 @@
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
   __ mov(tmp3, tmp1);
   STATIC_ASSERT(kNotStringTag != 0);
-  __ or_(tmp3, Operand(tmp2));
+  __ or_(tmp3, tmp2);
   __ test(tmp3, Immediate(kIsNotStringMask));
   __ j(not_zero, &miss);
 
   // Fast check for identical strings.
   Label not_same;
-  __ cmp(left, Operand(right));
+  __ cmp(left, right);
   __ j(not_equal, &not_same, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -6229,25 +6687,33 @@
   __ bind(&not_same);
 
   // Check that both strings are symbols. If they are, we're done
-  // because we already know they are not identical.
-  Label do_compare;
-  STATIC_ASSERT(kSymbolTag != 0);
-  __ and_(tmp1, Operand(tmp2));
-  __ test(tmp1, Immediate(kIsSymbolMask));
-  __ j(zero, &do_compare, Label::kNear);
-  // Make sure eax is non-zero. At this point input operands are
-  // guaranteed to be non-zero.
-  ASSERT(right.is(eax));
-  __ ret(0);
+  // because we already know they are not identical.  But in the case of
+  // non-equality compare, we still need to determine the order.
+  if (equality) {
+    Label do_compare;
+    STATIC_ASSERT(kSymbolTag != 0);
+    __ and_(tmp1, tmp2);
+    __ test(tmp1, Immediate(kIsSymbolMask));
+    __ j(zero, &do_compare, Label::kNear);
+    // Make sure eax is non-zero. At this point input operands are
+    // guaranteed to be non-zero.
+    ASSERT(right.is(eax));
+    __ ret(0);
+    __ bind(&do_compare);
+  }
 
   // Check that both strings are sequential ASCII.
   Label runtime;
-  __ bind(&do_compare);
   __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
 
   // Compare flat ASCII strings. Returns when done.
-  StringCompareStub::GenerateFlatAsciiStringEquals(
-      masm, left, right, tmp1, tmp2);
+  if (equality) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(
+        masm, left, right, tmp1, tmp2);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(
+        masm, left, right, tmp1, tmp2, tmp3);
+  }
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
@@ -6255,7 +6721,11 @@
   __ push(left);
   __ push(right);
   __ push(tmp1);
-  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+  if (equality) {
+    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+  } else {
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+  }
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -6265,8 +6735,8 @@
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::OBJECTS);
   Label miss;
-  __ mov(ecx, Operand(edx));
-  __ and_(ecx, Operand(eax));
+  __ mov(ecx, edx);
+  __ and_(ecx, eax);
   __ JumpIfSmi(ecx, &miss, Label::kNear);
 
   __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
@@ -6275,7 +6745,28 @@
   __ j(not_equal, &miss, Label::kNear);
 
   ASSERT(GetCondition() == equal);
-  __ sub(eax, Operand(edx));
+  __ sub(eax, edx);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+  Label miss;
+  __ mov(ecx, edx);
+  __ and_(ecx, eax);
+  __ JumpIfSmi(ecx, &miss, Label::kNear);
+
+  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ cmp(ecx, known_map_);
+  __ j(not_equal, &miss, Label::kNear);
+  __ cmp(ebx, known_map_);
+  __ j(not_equal, &miss, Label::kNear);
+
+  __ sub(eax, edx);
   __ ret(0);
 
   __ bind(&miss);
@@ -6284,33 +6775,25 @@
 
 
 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  // Save the registers.
-  __ pop(ecx);
-  __ push(edx);
-  __ push(eax);
-  __ push(ecx);
-
-  // Call the runtime system in a fresh internal frame.
-  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
-                                             masm->isolate());
-  __ EnterInternalFrame();
-  __ push(edx);
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(op_)));
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
-
-  // Compute the entry point of the rewritten stub.
-  __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
-
-  // Restore registers.
-  __ pop(ecx);
-  __ pop(eax);
-  __ pop(edx);
-  __ push(ecx);
+  {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+                                               masm->isolate());
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(edx);  // Preserve edx and eax.
+    __ push(eax);
+    __ push(edx);  // And also use them as the arguments.
+    __ push(eax);
+    __ push(Immediate(Smi::FromInt(op_)));
+    __ CallExternalReference(miss, 3);
+    // Compute the entry point of the rewritten stub.
+    __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+    __ pop(eax);
+    __ pop(edx);
+  }
 
   // Do a tail call to the rewritten stub.
-  __ jmp(Operand(edi));
+  __ jmp(edi);
 }
 
 
@@ -6319,28 +6802,27 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register properties,
-    String* name,
-    Register r0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register properties,
+                                                        Handle<String> name,
+                                                        Register r0) {
   ASSERT(name->IsSymbol());
 
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
   // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
+  // (their names are the hole value).
   for (int i = 0; i < kInlinedProbes; i++) {
     // Compute the masked index: (hash + i + i * i) & mask.
     Register index = r0;
     // Capacity is smi 2^n.
     __ mov(index, FieldOperand(properties, kCapacityOffset));
     __ dec(index);
-    __ and_(Operand(index),
-           Immediate(Smi::FromInt(name->Hash() +
+    __ and_(index,
+            Immediate(Smi::FromInt(name->Hash() +
                                    StringDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
@@ -6358,11 +6840,17 @@
     __ cmp(entity_name, Handle<String>(name));
     __ j(equal, miss);
 
+    Label the_hole;
+    // Check for the hole and skip.
+    __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
+    __ j(equal, &the_hole, Label::kNear);
+
     // Check if the entry name is not a symbol.
     __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
     __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
               kIsSymbolMask);
     __ j(zero, miss);
+    __ bind(&the_hole);
   }
 
   StringDictionaryLookupStub stub(properties,
@@ -6371,12 +6859,10 @@
                                   StringDictionaryLookupStub::NEGATIVE_LOOKUP);
   __ push(Immediate(Handle<Object>(name)));
   __ push(Immediate(name->Hash()));
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
-  __ test(r0, Operand(r0));
+  __ CallStub(&stub);
+  __ test(r0, r0);
   __ j(not_zero, miss);
   __ jmp(done);
-  return result;
 }
 
 
@@ -6391,6 +6877,11 @@
                                                         Register name,
                                                         Register r0,
                                                         Register r1) {
+  ASSERT(!elements.is(r0));
+  ASSERT(!elements.is(r1));
+  ASSERT(!name.is(r0));
+  ASSERT(!name.is(r1));
+
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -6406,9 +6897,9 @@
     __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
     __ shr(r0, String::kHashShift);
     if (i > 0) {
-      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
+      __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
     }
-    __ and_(r0, Operand(r1));
+    __ and_(r0, r1);
 
     // Scale the index by multiplying by the entry size.
     ASSERT(StringDictionary::kEntrySize == 3);
@@ -6432,13 +6923,15 @@
   __ push(r0);
   __ CallStub(&stub);
 
-  __ test(r1, Operand(r1));
+  __ test(r1, r1);
   __ j(zero, miss);
   __ jmp(done);
 }
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Stack frame on entry:
   //  esp[0 * kPointerSize]: return address.
   //  esp[1 * kPointerSize]: key's hash.
@@ -6469,8 +6962,7 @@
     // Compute the masked index: (hash + i + i * i) & mask.
     __ mov(scratch, Operand(esp, 2 * kPointerSize));
     if (i > 0) {
-      __ add(Operand(scratch),
-             Immediate(StringDictionary::GetProbeOffset(i)));
+      __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
     }
     __ and_(scratch, Operand(esp, 0));
 
@@ -6526,6 +7018,369 @@
 }
 
 
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
+  { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal and CallFunctionStub.
+  { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField and
+  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
+  // GenerateStoreField calls the stub with two different permutations of
+  // registers.  This is the second.
+  { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
+  // StoreIC::GenerateNormal via GenerateDictionaryStore
+  { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
+  // KeyedStoreIC::GenerateGeneric.
+  { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
+  { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
+  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+  // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+  // and ElementsTransitionGenerator::GenerateDoubleToObject
+  { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
+  { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
+  // ElementsTransitionGenerator::GenerateDoubleToObject
+  { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
+  { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
+  // StoreArrayLiteralElementStub::Generate
+  { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
+  // Null termination.
+  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
+};
+
+#undef REG
+
+bool RecordWriteStub::IsPregenerated() {
+  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+
+  CpuFeatures::TryForceFeatureScope scope(SSE2);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    StoreBufferOverflowStub stub2(kSaveFPRegs);
+    stub2.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two instructions are generated with labels so as to get the
+  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
+  // forth between a compare instructions (a nop in this position) and the
+  // real branch when we start and stop incremental heap marking.
+  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+  __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  masm->set_byte_at(0, kTwoByteNopInstruction);
+  masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     not_zero,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm,
+        kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
+        mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm,
+      kReturnOnNoNeedToInformIncrementalMarker,
+      mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ mov(Operand(esp, 1 * kPointerSize), regs_.address());  // Slot.
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+    __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0());  // Value.
+  }
+  __ mov(Operand(esp, 2 * kPointerSize),
+         Immediate(ExternalReference::isolate_address()));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label object_is_black, need_incremental, need_incremental_pop_object;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(),
+                 regs_.scratch0(),
+                 regs_.scratch1(),
+                 &object_is_black,
+                 Label::kNear);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&object_is_black);
+
+  // Get the value from the slot.
+  __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     zero,
+                     &ensure_not_white,
+                     Label::kNear);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     not_zero,
+                     &ensure_not_white,
+                     Label::kNear);
+
+    __ jmp(&need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need an extra register for this, so we push the object register
+  // temporarily.
+  __ push(regs_.object());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    &need_incremental_pop_object,
+                    Label::kNear);
+  __ pop(regs_.object());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&need_incremental_pop_object);
+  __ pop(regs_.object());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : element value to store
+  //  -- ebx    : array literal
+  //  -- edi    : map of array literal
+  //  -- ecx    : element index as smi
+  //  -- edx    : array literal index in function
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label slow_elements_from_double;
+  Label fast_elements;
+
+  __ CheckFastElements(edi, &double_elements);
+
+  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+  __ JumpIfSmi(eax, &smi_element);
+  __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+
+  __ bind(&slow_elements);
+  __ pop(edi);  // Pop return address and remember to put back later for tail
+                // call.
+  __ push(ebx);
+  __ push(ecx);
+  __ push(eax);
+  __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+  __ push(edx);
+  __ push(edi);  // Return return address so that tail call returns to right
+                 // place.
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  __ bind(&slow_elements_from_double);
+  __ pop(edx);
+  __ jmp(&slow_elements);
+
+  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+  __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
+                           FixedArrayBase::kHeaderSize));
+  __ mov(Operand(ecx, 0), eax);
+  // Update the write barrier for the array store.
+  __ RecordWrite(ebx, ecx, eax,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ ret(0);
+
+  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+  // FAST_ELEMENTS, and value is Smi.
+  __ bind(&smi_element);
+  __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+  __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
+                      FixedArrayBase::kHeaderSize), eax);
+  __ ret(0);
+
+  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+
+  __ push(edx);
+  __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
+  __ StoreNumberToDoubleElements(eax,
+                                 edx,
+                                 ecx,
+                                 edi,
+                                 xmm0,
+                                 &slow_elements_from_double,
+                                 false);
+  __ pop(edx);
+  __ ret(0);
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index fa255da..803a711 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,6 +49,8 @@
                           ArgumentType argument_type)
       : type_(type), argument_type_(argument_type) {}
   void Generate(MacroAssembler* masm);
+  static void GenerateOperation(MacroAssembler* masm,
+                                TranscendentalCache::Type type);
  private:
   TranscendentalCache::Type type_;
   ArgumentType argument_type_;
@@ -56,7 +58,25 @@
   Major MajorKey() { return TranscendentalCache; }
   int MinorKey() { return type_ | argument_type_; }
   Runtime::FunctionId RuntimeFunction();
-  void GenerateOperation(MacroAssembler* masm);
+};
+
+
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated() { return true; }
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
 };
 
 
@@ -128,7 +148,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -215,7 +235,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -402,13 +422,12 @@
 
   void Generate(MacroAssembler* masm);
 
-  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register properties,
-      String* name,
-      Register r0);
+  static void GenerateNegativeLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register properties,
+                                     Handle<String> name,
+                                     Register r0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -418,6 +437,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -430,7 +451,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return DictionaryBits::encode(dictionary_.code()) |
@@ -451,6 +472,265 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
+  static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
+
+  static const byte kFiveByteNopInstruction = 0x3d;  // Cmpl eax, #imm32.
+  static const byte kFiveByteJumpInstruction = 0xe9;  // Jmp #imm32.
+
+  static Mode GetMode(Code* stub) {
+    byte first_instruction = stub->instruction_start()[0];
+    byte second_instruction = stub->instruction_start()[2];
+
+    if (first_instruction == kTwoByteJumpInstruction) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(first_instruction == kTwoByteNopInstruction);
+
+    if (second_instruction == kFiveByteJumpInstruction) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(second_instruction == kFiveByteNopInstruction);
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteNopInstruction;
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteJumpInstruction;
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteJumpInstruction;
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 7);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers, where the third
+  // is always ecx (needed for shift operations).  The input is two registers
+  // that must be preserved and one scratch register provided by the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_orig_(object),
+          address_orig_(address),
+          scratch0_orig_(scratch0),
+          object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
+      if (scratch0.is(ecx)) {
+        scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
+      }
+      if (object.is(ecx)) {
+        object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
+      }
+      if (address.is(ecx)) {
+        address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
+      }
+      ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!address_orig_.is(object_));
+      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+      // We don't have to save scratch0_orig_ because it was given to us as
+      // a scratch register.  But if we had to switch to a different reg then
+      // we should save the new scratch0_.
+      if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+      if (!ecx.is(scratch0_orig_) &&
+          !ecx.is(object_orig_) &&
+          !ecx.is(address_orig_)) {
+        masm->push(ecx);
+      }
+      masm->push(scratch1_);
+      if (!address_.is(address_orig_)) {
+        masm->push(address_);
+        masm->mov(address_, address_orig_);
+      }
+      if (!object_.is(object_orig_)) {
+        masm->push(object_);
+        masm->mov(object_, object_orig_);
+      }
+    }
+
+    void Restore(MacroAssembler* masm) {
+      // These will have been preserved the entire time, so we just need to move
+      // them back.  Only in one case is the orig_ reg different from the plain
+      // one, since only one of them can alias with ecx.
+      if (!object_.is(object_orig_)) {
+        masm->mov(object_orig_, object_);
+        masm->pop(object_);
+      }
+      if (!address_.is(address_orig_)) {
+        masm->mov(address_orig_, address_);
+        masm->pop(address_);
+      }
+      masm->pop(scratch1_);
+      if (!ecx.is(scratch0_orig_) &&
+          !ecx.is(object_orig_) &&
+          !ecx.is(address_orig_)) {
+        masm->pop(ecx);
+      }
+      if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The caller saved
+    // registers are eax, ecx and edx.  The three scratch registers (incl. ecx)
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
+      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(SSE2);
+        masm->sub(esp,
+                  Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+        // Save all XMM registers except XMM0.
+        for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+          XMMRegister reg = XMMRegister::from_code(i);
+          masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+        }
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(SSE2);
+        // Restore all XMM registers except XMM0.
+        for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+          XMMRegister reg = XMMRegister::from_code(i);
+          masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+        }
+        masm->add(esp,
+                  Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+      }
+      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
+      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_orig_;
+    Register address_orig_;
+    Register scratch0_orig_;
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+    // Third scratch register is always ecx.
+
+    Register GetRegThatIsNotEcxOr(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(ecx)) continue;
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  }
+;
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 3> {};
+  class ValueBits: public BitField<int, 3, 3> {};
+  class AddressBits: public BitField<int, 6, 3> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  RegisterAllocation regs_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 3a657bd..ea61910 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,8 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "codegen.h"
+#include "heap.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -39,17 +41,100 @@
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
 
 #define __ masm.
 
+
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
+  size_t actual_size;
+  // Allocate buffer in executable space.
+  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+                                                 &actual_size,
+                                                 true));
+  if (buffer == NULL) {
+    // Fallback to library function if function cannot be created.
+    switch (type) {
+      case TranscendentalCache::SIN: return &sin;
+      case TranscendentalCache::COS: return &cos;
+      case TranscendentalCache::TAN: return &tan;
+      case TranscendentalCache::LOG: return &log;
+      default: UNIMPLEMENTED();
+    }
+  }
+
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+  // esp[1 * kPointerSize]: raw double input
+  // esp[0 * kPointerSize]: return address
+  // Move double input into registers.
+
+  __ push(ebx);
+  __ push(edx);
+  __ push(edi);
+  __ fld_d(Operand(esp, 4 * kPointerSize));
+  __ mov(ebx, Operand(esp, 4 * kPointerSize));
+  __ mov(edx, Operand(esp, 5 * kPointerSize));
+  TranscendentalCacheStub::GenerateOperation(&masm, type);
+  // The return value is expected to be on ST(0) of the FPU stack.
+  __ pop(edi);
+  __ pop(edx);
+  __ pop(ebx);
+  __ Ret();
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  ASSERT(desc.reloc_size == 0);
+
+  CPU::FlushICache(buffer, actual_size);
+  OS::ProtectCode(buffer, actual_size);
+  return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+  size_t actual_size;
+  // Allocate buffer in executable space.
+  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+                                                 &actual_size,
+                                                 true));
+  // If SSE2 is not available, we can use libc's implementation to ensure
+  // consistency since code by fullcodegen's calls into runtime in that case.
+  if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+  // esp[1 * kPointerSize]: raw double input
+  // esp[0 * kPointerSize]: return address
+  // Move double input into registers.
+  {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+    __ sqrtsd(xmm0, xmm0);
+    __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+    // Load result into floating point register as return value.
+    __ fld_d(Operand(esp, 1 * kPointerSize));
+    __ Ret();
+  }
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  ASSERT(desc.reloc_size == 0);
+
+  CPU::FlushICache(buffer, actual_size);
+  OS::ProtectCode(buffer, actual_size);
+  return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
 static void MemCopyWrapper(void* dest, const void* src, size_t size) {
   memcpy(dest, src, size);
 }
@@ -108,14 +193,14 @@
     __ mov(edx, dst);
     __ and_(edx, 0xF);
     __ neg(edx);
-    __ add(Operand(edx), Immediate(16));
-    __ add(dst, Operand(edx));
-    __ add(src, Operand(edx));
-    __ sub(Operand(count), edx);
+    __ add(edx, Immediate(16));
+    __ add(dst, edx);
+    __ add(src, edx);
+    __ sub(count, edx);
 
     // edi is now aligned. Check if esi is also aligned.
     Label unaligned_source;
-    __ test(Operand(src), Immediate(0x0F));
+    __ test(src, Immediate(0x0F));
     __ j(not_zero, &unaligned_source);
     {
       // Copy loop for aligned source and destination.
@@ -130,11 +215,11 @@
         __ prefetch(Operand(src, 0x20), 1);
         __ movdqa(xmm0, Operand(src, 0x00));
         __ movdqa(xmm1, Operand(src, 0x10));
-        __ add(Operand(src), Immediate(0x20));
+        __ add(src, Immediate(0x20));
 
         __ movdqa(Operand(dst, 0x00), xmm0);
         __ movdqa(Operand(dst, 0x10), xmm1);
-        __ add(Operand(dst), Immediate(0x20));
+        __ add(dst, Immediate(0x20));
 
         __ dec(loop_count);
         __ j(not_zero, &loop);
@@ -142,12 +227,12 @@
 
       // At most 31 bytes to copy.
       Label move_less_16;
-      __ test(Operand(count), Immediate(0x10));
+      __ test(count, Immediate(0x10));
       __ j(zero, &move_less_16);
       __ movdqa(xmm0, Operand(src, 0));
-      __ add(Operand(src), Immediate(0x10));
+      __ add(src, Immediate(0x10));
       __ movdqa(Operand(dst, 0), xmm0);
-      __ add(Operand(dst), Immediate(0x10));
+      __ add(dst, Immediate(0x10));
       __ bind(&move_less_16);
 
       // At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -176,11 +261,11 @@
         __ prefetch(Operand(src, 0x20), 1);
         __ movdqu(xmm0, Operand(src, 0x00));
         __ movdqu(xmm1, Operand(src, 0x10));
-        __ add(Operand(src), Immediate(0x20));
+        __ add(src, Immediate(0x20));
 
         __ movdqa(Operand(dst, 0x00), xmm0);
         __ movdqa(Operand(dst, 0x10), xmm1);
-        __ add(Operand(dst), Immediate(0x20));
+        __ add(dst, Immediate(0x20));
 
         __ dec(loop_count);
         __ j(not_zero, &loop);
@@ -188,12 +273,12 @@
 
       // At most 31 bytes to copy.
       Label move_less_16;
-      __ test(Operand(count), Immediate(0x10));
+      __ test(count, Immediate(0x10));
       __ j(zero, &move_less_16);
       __ movdqu(xmm0, Operand(src, 0));
-      __ add(Operand(src), Immediate(0x10));
+      __ add(src, Immediate(0x10));
       __ movdqa(Operand(dst, 0), xmm0);
-      __ add(Operand(dst), Immediate(0x10));
+      __ add(dst, Immediate(0x10));
       __ bind(&move_less_16);
 
       // At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -228,10 +313,10 @@
     __ mov(edx, dst);
     __ and_(edx, 0x03);
     __ neg(edx);
-    __ add(Operand(edx), Immediate(4));  // edx = 4 - (dst & 3)
-    __ add(dst, Operand(edx));
-    __ add(src, Operand(edx));
-    __ sub(Operand(count), edx);
+    __ add(edx, Immediate(4));  // edx = 4 - (dst & 3)
+    __ add(dst, edx);
+    __ add(src, edx);
+    __ sub(count, edx);
     // edi is now aligned, ecx holds number of remaning bytes to copy.
 
     __ mov(edx, count);
@@ -261,6 +346,401 @@
 
 #undef __
 
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ebx    : target map
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  // Set transitioned map.
+  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+  __ RecordWriteField(edx,
+                      HeapObject::kMapOffset,
+                      ebx,
+                      edi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ebx    : target map
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label loop, entry, convert_hole, gc_required, only_change_map;
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+  __ j(equal, &only_change_map);
+
+  __ push(eax);
+  __ push(ebx);
+
+  __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
+
+  // Allocate new FixedDoubleArray.
+  // edx: receiver
+  // edi: length of source FixedArray (smi-tagged)
+  __ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize));
+  __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
+
+  // eax: destination FixedDoubleArray
+  // edi: number of elements
+  // edx: receiver
+  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+         Immediate(masm->isolate()->factory()->fixed_double_array_map()));
+  __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
+  __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
+  // Replace receiver's backing store with newly created FixedDoubleArray.
+  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
+  __ mov(ebx, eax);
+  __ RecordWriteField(edx,
+                      JSObject::kElementsOffset,
+                      ebx,
+                      edi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
+
+  // Prepare for conversion loop.
+  ExternalReference canonical_the_hole_nan_reference =
+      ExternalReference::address_of_the_hole_nan();
+  XMMRegister the_hole_nan = xmm1;
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(the_hole_nan,
+              Operand::StaticVariable(canonical_the_hole_nan_reference));
+  }
+  __ jmp(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  // Restore registers before jumping into runtime.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ pop(ebx);
+  __ pop(eax);
+  __ jmp(fail);
+
+  // Convert and copy elements
+  // esi: source FixedArray
+  __ bind(&loop);
+  __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
+  // ebx: current element from source
+  // edi: index of current element
+  __ JumpIfNotSmi(ebx, &convert_hole);
+
+  // Normal smi, convert it to double and store.
+  __ SmiUntag(ebx);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope fscope(SSE2);
+    __ cvtsi2sd(xmm0, ebx);
+    __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+              xmm0);
+  } else {
+    __ push(ebx);
+    __ fild_s(Operand(esp, 0));
+    __ pop(ebx);
+    __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
+  }
+  __ jmp(&entry);
+
+  // Found hole, store hole_nan_as_double instead.
+  __ bind(&convert_hole);
+
+  if (FLAG_debug_code) {
+    __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
+    __ Assert(equal, "object found in smi-only array");
+  }
+
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+              the_hole_nan);
+  } else {
+    __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
+    __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
+  }
+
+  __ bind(&entry);
+  __ sub(edi, Immediate(Smi::FromInt(1)));
+  __ j(not_sign, &loop);
+
+  __ pop(ebx);
+  __ pop(eax);
+
+  // Restore esi.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  __ bind(&only_change_map);
+  // eax: value
+  // ebx: target map
+  // Set transitioned map.
+  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+  __ RecordWriteField(edx,
+                      HeapObject::kMapOffset,
+                      ebx,
+                      edi,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ebx    : target map
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label loop, entry, convert_hole, gc_required, only_change_map, success;
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+  __ j(equal, &only_change_map);
+
+  __ push(eax);
+  __ push(edx);
+  __ push(ebx);
+
+  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
+
+  // Allocate new FixedArray.
+  // ebx: length of source FixedDoubleArray (smi-tagged)
+  __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
+  __ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+
+  // eax: destination FixedArray
+  // ebx: number of elements
+  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+         Immediate(masm->isolate()->factory()->fixed_array_map()));
+  __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+
+  __ jmp(&entry);
+
+  // ebx: target map
+  // edx: receiver
+  // Set transitioned map.
+  __ bind(&only_change_map);
+  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+  __ RecordWriteField(edx,
+                      HeapObject::kMapOffset,
+                      ebx,
+                      edi,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&success);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ pop(ebx);
+  __ pop(edx);
+  __ pop(eax);
+  __ jmp(fail);
+
+  // Box doubles into heap numbers.
+  // edi: source FixedDoubleArray
+  // eax: destination FixedArray
+  __ bind(&loop);
+  // ebx: index of current element (smi-tagged)
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
+  __ j(equal, &convert_hole);
+
+  // Non-hole double, copy value into a heap number.
+  __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
+  // edx: new heap number
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope fscope(SSE2);
+    __ movdbl(xmm0,
+              FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
+    __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+  } else {
+    __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
+    __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
+    __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
+    __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
+  }
+  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
+  __ mov(esi, ebx);
+  __ RecordWriteArray(eax,
+                      edx,
+                      esi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&entry, Label::kNear);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
+         masm->isolate()->factory()->the_hole_value());
+
+  __ bind(&entry);
+  __ sub(ebx, Immediate(Smi::FromInt(1)));
+  __ j(not_sign, &loop);
+
+  __ pop(ebx);
+  __ pop(edx);
+  // ebx: target map
+  // edx: receiver
+  // Set transitioned map.
+  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+  __ RecordWriteField(edx,
+                      HeapObject::kMapOffset,
+                      ebx,
+                      edi,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
+  __ RecordWriteField(edx,
+                      JSObject::kElementsOffset,
+                      eax,
+                      edi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Restore registers.
+  __ pop(eax);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  __ bind(&success);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Factory* factory,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ test(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential, Label::kNear);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ test(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string, Label::kNear);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ SmiUntag(result);
+  __ add(index, result);
+  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded, Label::kNear);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
+         Immediate(factory->empty_string()));
+  __ j(not_equal, call_runtime);
+  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label seq_string;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ test(result, Immediate(kStringRepresentationMask));
+  __ j(zero, &seq_string, Label::kNear);
+
+  // Handle external strings.
+  Label ascii_external, done;
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ test(result, Immediate(kIsIndirectStringMask));
+    __ Assert(zero, "external string expected, but not found");
+  }
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ test_b(result, kShortExternalStringMask);
+  __ j(not_zero, call_runtime);
+  // Check encoding.
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ test_b(result, kStringEncodingMask);
+  __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+  __ j(not_equal, &ascii_external, Label::kNear);
+  // Two-byte string.
+  __ movzx_w(result, Operand(result, index, times_2, 0));
+  __ jmp(&done, Label::kNear);
+  __ bind(&ascii_external);
+  // Ascii string.
+  __ movzx_b(result, Operand(result, index, times_1, 0));
+  __ jmp(&done, Label::kNear);
+
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii;
+  __ bind(&seq_string);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ test(result, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii, Label::kNear);
+
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  __ movzx_w(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  // Ascii string.
+  // Load the byte into the result register.
+  __ bind(&ascii);
+  __ movzx_b(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
+  __ bind(&done);
+}
+
+#undef __
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index c85fa83..f4ab0b5 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -72,6 +72,22 @@
 };
 
 
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Factory* factory,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 57e66df..9eabb2a 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -41,7 +41,7 @@
 namespace v8 {
 namespace internal {
 
-void CPU::Setup() {
+void CPU::SetUp() {
   CpuFeatures::Probe();
 }
 
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 2389948..d13fa75 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -100,63 +100,64 @@
                                           RegList non_object_regs,
                                           bool convert_call_to_jmp) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as a smi causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  for (int i = 0; i < kNumJSCallerSaved; i++) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    if ((object_regs & (1 << r)) != 0) {
-      __ push(reg);
-    }
-    if ((non_object_regs & (1 << r)) != 0) {
-      if (FLAG_debug_code) {
-        __ test(reg, Immediate(0xc0000000));
-        __ Assert(zero, "Unable to encode value as smi");
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((object_regs & (1 << r)) != 0) {
+        __ push(reg);
       }
-      __ SmiTag(reg);
-      __ push(reg);
+      if ((non_object_regs & (1 << r)) != 0) {
+        if (FLAG_debug_code) {
+          __ test(reg, Immediate(0xc0000000));
+          __ Assert(zero, "Unable to encode value as smi");
+        }
+        __ SmiTag(reg);
+        __ push(reg);
+      }
     }
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ Set(eax, Immediate(0));  // No arguments.
-  __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
+    __ Set(eax, Immediate(0));  // No arguments.
+    __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values containing object pointers from the expression
-  // stack.
-  for (int i = kNumJSCallerSaved; --i >= 0;) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    if (FLAG_debug_code) {
-      __ Set(reg, Immediate(kDebugZapValue));
+    // Restore the register values containing object pointers from the
+    // expression stack.
+    for (int i = kNumJSCallerSaved; --i >= 0;) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if (FLAG_debug_code) {
+        __ Set(reg, Immediate(kDebugZapValue));
+      }
+      if ((object_regs & (1 << r)) != 0) {
+        __ pop(reg);
+      }
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ pop(reg);
+        __ SmiUntag(reg);
+      }
     }
-    if ((object_regs & (1 << r)) != 0) {
-      __ pop(reg);
-    }
-    if ((non_object_regs & (1 << r)) != 0) {
-      __ pop(reg);
-      __ SmiUntag(reg);
-    }
+
+    // Get rid of the internal frame.
   }
 
-  // Get rid of the internal frame.
-  __ LeaveInternalFrame();
-
   // If this call did not replace a call but patched other code then there will
   // be an unwanted return address left on the stack. Here we get rid of that.
   if (convert_call_to_jmp) {
-    __ add(Operand(esp), Immediate(kPointerSize));
+    __ add(esp, Immediate(kPointerSize));
   }
 
   // Now that the break point has been handled, resume normal execution by
@@ -221,8 +222,36 @@
 }
 
 
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
   // Register state just before return from JS function (from codegen-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- eax: return value
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
+}
+
+
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+  // Register state for CallFunctionStub (from code-stubs-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- edi: function
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
+}
+
+
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+  // Register state for CallFunctionStub (from code-stubs-ia32.cc).
+  // ----------- S t a t e -------------
+  //  -- ebx: cache cell for call target
+  //  -- edi: function
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+  // Register state for CallConstructStub (from code-stubs-ia32.cc).
   // eax is the actual number of arguments not encoded as a smi see comment
   // above IC call.
   // ----------- S t a t e -------------
@@ -234,21 +263,17 @@
 }
 
 
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
-  // Register state just before return from JS function (from codegen-ia32.cc).
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+  // Register state for CallConstructStub (from code-stubs-ia32.cc).
+  // eax is the actual number of arguments not encoded as a smi see comment
+  // above IC call.
   // ----------- S t a t e -------------
-  //  -- eax: return value
+  //  -- eax: number of arguments (not smi)
+  //  -- ebx: cache cell for call target
+  //  -- edi: constructor function
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
-}
-
-
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
-  // Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
-  // ----------- S t a t e -------------
-  //  No registers used on entry.
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, 0, 0, false);
+  // The number of arguments in eax is not smi encoded.
+  Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
 }
 
 
@@ -257,9 +282,7 @@
   Label check_codesize;
   __ bind(&check_codesize);
   __ RecordDebugBreakSlot();
-  for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
-    __ nop();
-  }
+  __ Nop(Assembler::kDebugBreakSlotLength);
   ASSERT_EQ(Assembler::kDebugBreakSlotLength,
             masm->SizeOfCodeGeneratedSince(&check_codesize));
 }
@@ -298,7 +321,7 @@
   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
 
   // Re-run JSFunction, edi is function, esi is context.
-  __ jmp(Operand(edx));
+  __ jmp(edx);
 }
 
 const bool Debug::kFrameDropperSupported = true;
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 080ad64..92d7cc1 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -99,7 +99,7 @@
         new_reloc->GetDataStartAddress() + padding, 0);
     intptr_t comment_string
         = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
-    RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
+    RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
     for (int i = 0; i < additional_comments; ++i) {
 #ifdef DEBUG
       byte* pos_before = reloc_info_writer.pos();
@@ -156,7 +156,8 @@
     // We use RUNTIME_ENTRY for deoptimization bailouts.
     RelocInfo rinfo(call_address + 1,  // 1 after the call opcode.
                     RelocInfo::RUNTIME_ENTRY,
-                    reinterpret_cast<intptr_t>(deopt_entry));
+                    reinterpret_cast<intptr_t>(deopt_entry),
+                    NULL);
     reloc_info_writer.Write(&rinfo);
     ASSERT_GE(reloc_info_writer.pos(),
               reloc_info->address() + ByteArray::kHeaderSize);
@@ -188,6 +189,11 @@
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -199,12 +205,22 @@
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x13;
+static const byte kJaeInstruction = 0x73;
+static const byte kJaeOffset = 0x07;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
-  ASSERT(check_code->entry() ==
-         Assembler::target_address_at(call_target_address));
+  ASSERT_EQ(check_code->entry(),
+            Assembler::target_address_at(call_target_address));
   // The stack check code matches the pattern:
   //
   //     cmp esp, <limit>
@@ -221,31 +237,50 @@
   //     call <on-stack replacment>
   //     test eax, <loop nesting depth>
   // ok:
-  ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
-         *(call_target_address - 2) == 0x07 &&  // offset
-         *(call_target_address - 1) == 0xe8);   // call
-  *(call_target_address - 3) = 0x90;  // nop
-  *(call_target_address - 2) = 0x90;  // nop
+
+  if (FLAG_count_based_interrupts) {
+    ASSERT_EQ(*(call_target_address - 3), kJnsInstruction);
+    ASSERT_EQ(*(call_target_address - 2), kJnsOffset);
+  } else {
+    ASSERT_EQ(*(call_target_address - 3), kJaeInstruction);
+    ASSERT_EQ(*(call_target_address - 2), kJaeOffset);
+  }
+  ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
+  *(call_target_address - 3) = kNopByteOne;
+  *(call_target_address - 2) = kNopByteTwo;
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, call_target_address, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+                                         Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
-  ASSERT(replacement_code->entry() ==
-         Assembler::target_address_at(call_target_address));
+  ASSERT_EQ(replacement_code->entry(),
+            Assembler::target_address_at(call_target_address));
+
   // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
   // restore the conditional branch.
-  ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
-         *(call_target_address - 2) == 0x90 &&  // nop
-         *(call_target_address - 1) == 0xe8);   // call
-  *(call_target_address - 3) = 0x73;  // jae
-  *(call_target_address - 2) = 0x07;  // offset
+  ASSERT_EQ(*(call_target_address - 3), kNopByteOne);
+  ASSERT_EQ(*(call_target_address - 2), kNopByteTwo);
+  ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
+  if (FLAG_count_based_interrupts) {
+    *(call_target_address - 3) = kJnsInstruction;
+    *(call_target_address - 2) = kJnsOffset;
+  } else {
+    *(call_target_address - 3) = kJaeInstruction;
+    *(call_target_address - 2) = kJaeOffset;
+  }
   Assembler::set_target_address_at(call_target_address,
                                    check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, call_target_address, check_code);
 }
 
 
@@ -285,12 +320,13 @@
   ASSERT(Translation::BEGIN == opcode);
   USE(opcode);
   int count = iterator.Next();
+  iterator.Next();  // Drop JS frames count.
   ASSERT(count == 1);
   USE(count);
 
   opcode = static_cast<Translation::Opcode>(iterator.Next());
   USE(opcode);
-  ASSERT(Translation::FRAME == opcode);
+  ASSERT(Translation::JS_FRAME == opcode);
   unsigned node_id = iterator.Next();
   USE(node_id);
   ASSERT(node_id == ast_id);
@@ -326,9 +362,7 @@
   output_ = new FrameDescription*[1];
   output_[0] = new(output_frame_size) FrameDescription(
       output_frame_size, function_);
-#ifdef DEBUG
-  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
 
   // Clear the incoming parameters in the optimized frame to avoid
   // confusing the garbage collector.
@@ -392,7 +426,7 @@
     output_[0] = input_;
     output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
   } else {
-    // Setup the frame pointer and the context pointer.
+    // Set up the frame pointer and the context pointer.
     output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
     output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
 
@@ -416,13 +450,208 @@
 }
 
 
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
-                                 int frame_index) {
-  // Read the ast node id, function, and frame height for this output frame.
-  Translation::Opcode opcode =
-      static_cast<Translation::Opcode>(iterator->Next());
-  USE(opcode);
-  ASSERT(Translation::FRAME == opcode);
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+                                                 int frame_index) {
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating arguments adaptor => height=%d\n", height_in_bytes);
+  }
+
+  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+  // Arguments adaptor can not be topmost or bottommost.
+  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address of the frame is computed from the previous
+  // frame's top and this frame's size.
+  uint32_t top_address;
+  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = height;
+  unsigned output_offset = output_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+
+  // Read caller's PC from the previous frame.
+  output_offset -= kPointerSize;
+  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+  output_frame->SetFrameSlot(output_offset, callers_pc);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, callers_pc);
+  }
+
+  // Read caller's FP from the previous frame, and set this frame's FP.
+  output_offset -= kPointerSize;
+  intptr_t value = output_[frame_index - 1]->GetFp();
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  output_frame->SetFp(fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // A marker value is used in place of the context.
+  output_offset -= kPointerSize;
+  intptr_t context = reinterpret_cast<intptr_t>(
+      Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  output_frame->SetFrameSlot(output_offset, context);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
+           top_address + output_offset, output_offset, context);
+  }
+
+  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(function);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Number of incoming arguments.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+           top_address + output_offset, output_offset, value, height - 1);
+  }
+
+  ASSERT(0 == output_offset);
+
+  Builtins* builtins = isolate_->builtins();
+  Code* adaptor_trampoline =
+      builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+  uint32_t pc = reinterpret_cast<uint32_t>(
+      adaptor_trampoline->instruction_start() +
+      isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+  output_frame->SetPc(pc);
+}
+
+
+void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
+                                              int frame_index) {
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating construct stub => height=%d\n", height_in_bytes);
+  }
+
+  unsigned fixed_frame_size = 6 * kPointerSize;
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+  output_frame->SetFrameType(StackFrame::CONSTRUCT);
+
+  // Construct stub can not be topmost or bottommost.
+  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address of the frame is computed from the previous
+  // frame's top and this frame's size.
+  uint32_t top_address;
+  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = height;
+  unsigned output_offset = output_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+
+  // Read caller's PC from the previous frame.
+  output_offset -= kPointerSize;
+  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+  output_frame->SetFrameSlot(output_offset, callers_pc);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, callers_pc);
+  }
+
+  // Read caller's FP from the previous frame, and set this frame's FP.
+  output_offset -= kPointerSize;
+  intptr_t value = output_[frame_index - 1]->GetFp();
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  output_frame->SetFp(fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // The context can be gotten from the previous frame.
+  output_offset -= kPointerSize;
+  value = output_[frame_index - 1]->GetContext();
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // A marker value is used in place of the function.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Number of incoming arguments.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+           top_address + output_offset, output_offset, value, height - 1);
+  }
+
+  // The newly allocated object was passed as receiver in the artificial
+  // constructor stub environment created by HEnvironment::CopyForInlining().
+  output_offset -= kPointerSize;
+  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  ASSERT(0 == output_offset);
+
+  Builtins* builtins = isolate_->builtins();
+  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+  uint32_t pc = reinterpret_cast<uint32_t>(
+      construct_stub->instruction_start() +
+      isolate_->heap()->construct_stub_deopt_pc_offset()->value());
+  output_frame->SetPc(pc);
+}
+
+
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+                                   int frame_index) {
   int node_id = iterator->Next();
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
@@ -442,9 +671,7 @@
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, function);
-#ifdef DEBUG
-  output_frame->SetKind(Code::FUNCTION);
-#endif
+  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
 
   bool is_bottommost = (0 == frame_index);
   bool is_topmost = (output_count_ - 1 == frame_index);
@@ -530,6 +757,7 @@
     value = reinterpret_cast<uint32_t>(function->context());
   }
   output_frame->SetFrameSlot(output_offset, value);
+  output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(esi.code(), value);
   if (FLAG_trace_deopt) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
@@ -616,7 +844,7 @@
 
   const int kDoubleRegsSize = kDoubleSize *
                               XMMRegister::kNumAllocatableRegisters;
-  __ sub(Operand(esp), Immediate(kDoubleRegsSize));
+  __ sub(esp, Immediate(kDoubleRegsSize));
   for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
     XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
     int offset = i * kDoubleSize;
@@ -640,7 +868,7 @@
     __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
     __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
   }
-  __ sub(edx, Operand(ebp));
+  __ sub(edx, ebp);
   __ neg(edx);
 
   // Allocate a new deoptimizer object.
@@ -653,7 +881,10 @@
   __ mov(Operand(esp, 4 * kPointerSize), edx);  // Fp-to-sp delta.
   __ mov(Operand(esp, 5 * kPointerSize),
          Immediate(ExternalReference::isolate_address()));
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
 
   // Preserve deoptimizer object in register eax and get the input
   // frame descriptor pointer.
@@ -676,15 +907,15 @@
 
   // Remove the bailout id and the double registers from the stack.
   if (type() == EAGER) {
-    __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
+    __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
   } else {
-    __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
+    __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
   }
 
   // Compute a pointer to the unwinding limit in register ecx; that is
   // the first stack slot not part of the input frame.
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
-  __ add(ecx, Operand(esp));
+  __ add(ecx, esp);
 
   // Unwind the stack down to - but not including - the unwinding
   // limit and copy the contents of the activation frame to the input
@@ -693,16 +924,19 @@
   Label pop_loop;
   __ bind(&pop_loop);
   __ pop(Operand(edx, 0));
-  __ add(Operand(edx), Immediate(sizeof(uint32_t)));
-  __ cmp(ecx, Operand(esp));
+  __ add(edx, Immediate(sizeof(uint32_t)));
+  __ cmp(ecx, esp);
   __ j(not_equal, &pop_loop);
 
   // Compute the output frame in the deoptimizer.
   __ push(eax);
   __ PrepareCallCFunction(1, ebx);
   __ mov(Operand(esp, 0 * kPointerSize), eax);
-  __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 1);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 1);
+  }
   __ pop(eax);
 
   // Replace the current frame with the output frames.
@@ -717,12 +951,12 @@
   __ mov(ebx, Operand(eax, 0));
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
   __ bind(&inner_push_loop);
-  __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
+  __ sub(ecx, Immediate(sizeof(uint32_t)));
   __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(not_zero, &inner_push_loop);
-  __ add(Operand(eax), Immediate(kPointerSize));
-  __ cmp(eax, Operand(edx));
+  __ add(eax, Immediate(kPointerSize));
+  __ cmp(eax, edx);
   __ j(below, &outer_push_loop);
 
   // In case of OSR, we have to restore the XMM registers.
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index a936277..b5ddcca 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -55,6 +55,7 @@
 
 
 static const ByteMnemonic two_operands_instr[] = {
+  {0x01, "add", OPER_REG_OP_ORDER},
   {0x03, "add", REG_OPER_OP_ORDER},
   {0x09, "or", OPER_REG_OP_ORDER},
   {0x0B, "or", REG_OPER_OP_ORDER},
@@ -117,6 +118,19 @@
 };
 
 
+// Generally we don't want to generate these because they are subject to partial
+// register stalls.  They are included for completeness and because the cmp
+// variant is used by the RecordWrite stub.  Because it does not update the
+// register it is not subject to partial register stalls.
+static ByteMnemonic byte_immediate_instr[] = {
+  {0x0c, "or", UNSET_OP_ORDER},
+  {0x24, "and", UNSET_OP_ORDER},
+  {0x34, "xor", UNSET_OP_ORDER},
+  {0x3c, "cmp", UNSET_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
 static const char* const jump_conditional_mnem[] = {
   /*0*/ "jo", "jno", "jc", "jnc",
   /*4*/ "jz", "jnz", "jna", "ja",
@@ -149,7 +163,8 @@
   REGISTER_INSTR,
   MOVE_REG_INSTR,
   CALL_JUMP_INSTR,
-  SHORT_IMMEDIATE_INSTR
+  SHORT_IMMEDIATE_INSTR,
+  BYTE_IMMEDIATE_INSTR
 };
 
 
@@ -164,6 +179,10 @@
  public:
   InstructionTable();
   const InstructionDesc& Get(byte x) const { return instructions_[x]; }
+  static InstructionTable* get_instance() {
+    static InstructionTable table;
+    return &table;
+  }
 
  private:
   InstructionDesc instructions_[256];
@@ -198,6 +217,7 @@
   CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
   CopyTable(call_jump_instr, CALL_JUMP_INSTR);
   CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+  CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
   AddJumpConditionalShort();
   SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
   SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
@@ -243,15 +263,13 @@
 }
 
 
-static InstructionTable instruction_table;
-
-
 // The IA32 disassembler implementation.
 class DisassemblerIA32 {
  public:
   DisassemblerIA32(const NameConverter& converter,
                    bool abort_on_unimplemented = true)
       : converter_(converter),
+        instruction_table_(InstructionTable::get_instance()),
         tmp_buffer_pos_(0),
         abort_on_unimplemented_(abort_on_unimplemented) {
     tmp_buffer_[0] = '\0';
@@ -265,11 +283,11 @@
 
  private:
   const NameConverter& converter_;
+  InstructionTable* instruction_table_;
   v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
   unsigned int tmp_buffer_pos_;
   bool abort_on_unimplemented_;
 
-
   enum {
     eax = 0,
     ecx = 1,
@@ -745,10 +763,13 @@
             case 0xEB: mnem = "fldpi"; break;
             case 0xED: mnem = "fldln2"; break;
             case 0xEE: mnem = "fldz"; break;
+            case 0xF0: mnem = "f2xm1"; break;
             case 0xF1: mnem = "fyl2x"; break;
             case 0xF5: mnem = "fprem1"; break;
             case 0xF7: mnem = "fincstp"; break;
             case 0xF8: mnem = "fprem"; break;
+            case 0xFC: mnem = "frndint"; break;
+            case 0xFD: mnem = "fscale"; break;
             case 0xFE: mnem = "fsin"; break;
             case 0xFF: mnem = "fcos"; break;
             default: UnimplementedInstruction();
@@ -770,6 +791,8 @@
         has_register = true;
       } else if (modrm_byte  == 0xE2) {
         mnem = "fclex";
+      } else if (modrm_byte == 0xE3) {
+        mnem = "fninit";
       } else {
         UnimplementedInstruction();
       }
@@ -868,7 +891,7 @@
   }
   bool processed = true;  // Will be set to false if the current instruction
                           // is not in 'instructions' table.
-  const InstructionDesc& idesc = instruction_table.Get(*data);
+  const InstructionDesc& idesc = instruction_table_->Get(*data);
   switch (idesc.type) {
     case ZERO_OPERANDS_INSTR:
       AppendToBuffer(idesc.mnem);
@@ -912,6 +935,12 @@
       break;
     }
 
+    case BYTE_IMMEDIATE_INSTR: {
+      AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
+      data += 2;
+      break;
+    }
+
     case NO_INSTR:
       processed = false;
       break;
@@ -963,7 +992,7 @@
         break;
 
       case 0x0F:
-        { byte f0byte = *(data+1);
+        { byte f0byte = data[1];
           const char* f0mnem = F0Mnem(f0byte);
           if (f0byte == 0x18) {
             int mod, regop, rm;
@@ -971,6 +1000,25 @@
             const char* suffix[] = {"nta", "1", "2", "3"};
             AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
             data += PrintRightOperand(data);
+          } else if (f0byte == 0x1F && data[2] == 0) {
+            AppendToBuffer("nop");  // 3 byte nop.
+            data += 3;
+          } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
+            AppendToBuffer("nop");  // 4 byte nop.
+            data += 4;
+          } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
+                     data[4] == 0) {
+            AppendToBuffer("nop");  // 5 byte nop.
+            data += 5;
+          } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
+                     data[4] == 0 && data[5] == 0 && data[6] == 0) {
+            AppendToBuffer("nop");  // 7 byte nop.
+            data += 7;
+          } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
+                     data[4] == 0 && data[5] == 0 && data[6] == 0 &&
+                     data[7] == 0) {
+            AppendToBuffer("nop");  // 8 byte nop.
+            data += 8;
           } else if (f0byte == 0xA2 || f0byte == 0x31) {
             AppendToBuffer("%s", f0mnem);
             data += 2;
@@ -1106,8 +1154,12 @@
         break;
 
       case 0x66:  // prefix
-        data++;
-        if (*data == 0x8B) {
+        while (*data == 0x66) data++;
+        if (*data == 0xf && data[1] == 0x1f) {
+          AppendToBuffer("nop");  // 0x66 prefix
+        } else if (*data == 0x90) {
+          AppendToBuffer("nop");  // 0x66 prefix
+        } else if (*data == 0x8B) {
           data++;
           data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
         } else if (*data == 0x89) {
@@ -1161,6 +1213,16 @@
                              NameOfXMMRegister(rm),
                              static_cast<int>(imm8));
               data += 2;
+            } else if (*data == 0x17) {
+              data++;
+              int mod, regop, rm;
+              get_modrm(*data, &mod, &regop, &rm);
+              int8_t imm8 = static_cast<int8_t>(data[1]);
+              AppendToBuffer("extractps %s,%s,%d",
+                             NameOfCPURegister(regop),
+                             NameOfXMMRegister(rm),
+                             static_cast<int>(imm8));
+              data += 2;
             } else if (*data == 0x22) {
               data++;
               int mod, regop, rm;
@@ -1234,6 +1296,9 @@
                            NameOfXMMRegister(rm),
                            static_cast<int>(imm8));
             data += 2;
+          } else if (*data == 0x90) {
+            data++;
+            AppendToBuffer("nop");  // 2 byte nop.
           } else if (*data == 0xF3) {
             data++;
             int mod, regop, rm;
@@ -1346,11 +1411,6 @@
         data += 2;
         break;
 
-      case 0x2C:
-        AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
-        data += 2;
-        break;
-
       case 0xA9:
         AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
         data += 5;
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 2f1b2a9..9e51857 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,37 +34,37 @@
 
 // Register lists
 // Note that the bit values must match those used in actual instruction encoding
-static const int kNumRegs = 8;
+const int kNumRegs = 8;
 
 
 // Caller-saved registers
-static const RegList kJSCallerSaved =
+const RegList kJSCallerSaved =
   1 << 0 |  // eax
   1 << 1 |  // ecx
   1 << 2 |  // edx
   1 << 3 |  // ebx - used as a caller-saved register in JavaScript code
   1 << 7;   // edi - callee function
 
-static const int kNumJSCallerSaved = 5;
+const int kNumJSCallerSaved = 5;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
 
 // Number of registers for which space is reserved in safepoints.
-static const int kNumSafepointRegisters = 8;
+const int kNumSafepointRegisters = 8;
 
 // ----------------------------------------------------
 
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset    = 0 * kPointerSize;
-  static const int kContextOffset = 1 * kPointerSize;
-  static const int kFPOffset      = 2 * kPointerSize;
-  static const int kStateOffset   = 3 * kPointerSize;
-  static const int kPCOffset      = 4 * kPointerSize;
+  static const int kNextOffset     = 0 * kPointerSize;
+  static const int kCodeOffset     = 1 * kPointerSize;
+  static const int kStateOffset    = 2 * kPointerSize;
+  static const int kContextOffset  = 3 * kPointerSize;
+  static const int kFPOffset       = 4 * kPointerSize;
 
-  static const int kSize = kPCOffset + kPointerSize;
+  static const int kSize = kFPOffset + kPointerSize;
 };
 
 
@@ -95,9 +95,11 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
+  // Fixed part of the frame consists of return address, caller fp,
+  // context and function.
   // StandardFrame::IterateExpressions assumes that kContextOffset is the last
   // object pointer.
-  static const int kFixedFrameSize    =  4;  // Currently unused.
+  static const int kFixedFrameSize    =  4 * kPointerSize;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
@@ -123,6 +125,8 @@
 class ArgumentsAdaptorFrameConstants : public AllStatic {
  public:
   static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+  static const int kFrameSize =
+      StandardFrameConstants::kFixedFrameSize + kPointerSize;
 };
 
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index ca6ce6e..62a2c2a 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,6 +34,7 @@
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
+#include "isolate-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "stub-cache.h"
@@ -44,11 +45,6 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -105,23 +101,32 @@
 };
 
 
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
+int FullCodeGenerator::self_optimization_header_size() {
+  UNREACHABLE();
+  return 13;
+}
+
+
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right, with the
 // return address on top of them.  The actual argument count matches the
 // formal parameter count expected by the function.
 //
 // The live registers are:
-//   o edi: the JS function object being called (ie, ourselves)
+//   o edi: the JS function object being called (i.e. ourselves)
 //   o esi: our context
 //   o ebp: our caller's frame pointer
 //   o esp: stack pointer (pointing to return address)
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-ia32.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
-  ASSERT(info_ == NULL);
-  info_ = info;
-  scope_ = info->scope();
+void FullCodeGenerator::Generate() {
+  CompilationInfo* info = info_;
+  handler_table_ =
+      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+  profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -136,17 +141,26 @@
   // with undefined when called as functions (without an explicit
   // receiver object). ecx is zero for method calls and non-zero for
   // function calls.
-  if (info->is_strict_mode() || info->is_native()) {
+  if (!info->is_classic_mode() || info->is_native()) {
     Label ok;
-    __ test(ecx, Operand(ecx));
+    __ test(ecx, ecx);
     __ j(zero, &ok, Label::kNear);
     // +1 for return address.
     int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+    __ mov(ecx, Operand(esp, receiver_offset));
+    __ JumpIfSmi(ecx, &ok);
+    __ CmpObjectType(ecx, JS_GLOBAL_PROXY_TYPE, ecx);
+    __ j(not_equal, &ok, Label::kNear);
     __ mov(Operand(esp, receiver_offset),
            Immediate(isolate()->factory()->undefined_value()));
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   __ push(ebp);  // Caller's frame pointer.
   __ mov(ebp, esp);
   __ push(esi);  // Callee's context.
@@ -164,11 +178,6 @@
     }
   }
 
-  set_stack_height(2 + scope()->num_stack_slots());
-  if (FLAG_verify_stack_height) {
-    verify_stack_height();
-  }
-
   bool function_in_register = true;
 
   // Possibly allocate a local context.
@@ -200,11 +209,12 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have use a third register to avoid
-        // clobbering esi.
-        __ mov(ecx, esi);
-        __ RecordWrite(ecx, context_offset, eax, ebx);
+        // Update the write barrier. This clobbers eax and ebx.
+        __ RecordWriteContextSlot(esi,
+                                  context_offset,
+                                  eax,
+                                  ebx,
+                                  kDontSaveFPRegs);
       }
     }
   }
@@ -225,12 +235,12 @@
            Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
     __ push(edx);
     __ SafePush(Immediate(Smi::FromInt(num_parameters)));
-    // Arguments to ArgumentsAccessStub and/or New...:
+    // Arguments to ArgumentsAccessStub:
     //   function, receiver address, parameter count.
     // The stub will rewrite receiver and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub::Type type;
-    if (is_strict_mode()) {
+    if (!is_classic_mode()) {
       type = ArgumentsAccessStub::NEW_STRICT;
     } else if (function()->has_duplicate_parameters()) {
       type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -259,8 +269,11 @@
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        int ignored = 0;
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+        VariableProxy* proxy = scope()->function();
+        ASSERT(proxy->var()->mode() == CONST ||
+               proxy->var()->mode() == CONST_HARMONY);
+        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
+        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -298,15 +311,62 @@
 }
 
 
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+  __ mov(ebx, Immediate(profiling_counter_));
+  __ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+         Immediate(Smi::FromInt(delta)));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+  int reset_value = FLAG_interrupt_budget;
+  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
+    // Self-optimization is a one-off thing: if it fails, don't try again.
+    reset_value = Smi::kMaxValue;
+  }
+  if (isolate()->IsDebuggerActive()) {
+    // Detect debug break requests as soon as possible.
+    reset_value = 10;
+  }
+  __ mov(ebx, Immediate(profiling_counter_));
+  __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+         Immediate(Smi::FromInt(reset_value)));
+}
+
+
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 100;
+
+
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+                                       Label* back_edge_target) {
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
-  ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(isolate());
-  __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above_equal, &ok, Label::kNear);
-  StackCheckStub stub;
-  __ CallStub(&stub);
+
+  if (FLAG_count_based_interrupts) {
+    int weight = 1;
+    if (FLAG_weighted_back_edges) {
+      ASSERT(back_edge_target->is_bound());
+      int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+      weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kBackEdgeDistanceDivisor));
+    }
+    EmitProfilingCounterDecrement(weight);
+    __ j(positive, &ok, Label::kNear);
+    InterruptStub stub;
+    __ CallStub(&stub);
+  } else {
+    // Count based interrupts happen often enough when they are enabled
+    // that the additional stack checks are not necessary (they would
+    // only check for interrupts).
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(isolate());
+    __ cmp(esp, Operand::StaticVariable(stack_limit));
+    __ j(above_equal, &ok, Label::kNear);
+    StackCheckStub stub;
+    __ CallStub(&stub);
+  }
+
   // Record a mapping of this PC offset to the OSR id.  This is used to find
   // the AST id from the unoptimized code in order to use it as a key into
   // the deoptimization input data found in the optimized code.
@@ -319,6 +379,10 @@
   ASSERT(loop_depth() > 0);
   __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
 
+  if (FLAG_count_based_interrupts) {
+    EmitProfilingCounterReset();
+  }
+
   __ bind(&ok);
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
@@ -339,6 +403,31 @@
       __ push(eax);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
+    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
+      // Pretend that the exit is a backwards jump to the entry.
+      int weight = 1;
+      if (info_->ShouldSelfOptimize()) {
+        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+      } else if (FLAG_weighted_back_edges) {
+        int distance = masm_->pc_offset();
+        weight = Min(kMaxBackEdgeWeight,
+                     Max(1, distance / kBackEdgeDistanceDivisor));
+      }
+      EmitProfilingCounterDecrement(weight);
+      Label ok;
+      __ j(positive, &ok, Label::kNear);
+      __ push(eax);
+      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
+        __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
+      } else {
+        InterruptStub stub;
+        __ CallStub(&stub);
+      }
+      __ pop(eax);
+      EmitProfilingCounterReset();
+      __ bind(&ok);
+    }
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
     Label check_exit_codesize;
@@ -363,15 +452,6 @@
 }
 
 
-void FullCodeGenerator::verify_stack_height() {
-  ASSERT(FLAG_verify_stack_height);
-  __ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
-  __ cmp(ebp, Operand(esp));
-  __ Assert(equal, "Full codegen stack height not as expected.");
-  __ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
   ASSERT(var->IsStackAllocated() || var->IsContextSlot());
 }
@@ -388,14 +468,13 @@
   MemOperand operand = codegen()->VarOperand(var, result_register());
   // Memory operands can be pushed directly.
   __ push(operand);
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -442,12 +521,11 @@
   } else {
     __ push(Immediate(lit));
   }
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -480,7 +558,6 @@
                                                    Register reg) const {
   ASSERT(count > 0);
   __ Drop(count);
-  codegen()->decrement_stack_height(count);
 }
 
 
@@ -490,7 +567,6 @@
   ASSERT(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->decrement_stack_height(count);
 }
 
 
@@ -499,7 +575,6 @@
   ASSERT(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ mov(Operand(esp, 0), reg);
-  codegen()->decrement_stack_height(count - 1);
 }
 
 
@@ -509,9 +584,8 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
-  codegen()->decrement_stack_height(count);
 }
 
 
@@ -545,7 +619,6 @@
   __ bind(materialize_false);
   __ push(Immediate(isolate()->factory()->false_value()));
   __ bind(&done);
-  codegen()->increment_stack_height();
 }
 
 
@@ -573,12 +646,11 @@
       ? isolate()->factory()->true_value()
       : isolate()->factory()->false_value();
   __ push(Immediate(value));
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -597,7 +669,7 @@
   ToBooleanStub stub(result_register());
   __ push(result_register());
   __ CallStub(&stub, condition->test_id());
-  __ test(result_register(), Operand(result_register()));
+  __ test(result_register(), result_register());
   // The stub returns nonzero for true.
   Split(not_zero, if_true, if_false, fall_through);
 }
@@ -661,16 +733,17 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ mov(location, src);
+
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
     ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
-    __ RecordWrite(scratch0, offset, src, scratch1);
+    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -681,13 +754,7 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ cmp(eax, isolate()->factory()->true_value());
     Split(equal, if_true, if_false, NULL);
@@ -697,16 +764,17 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        Variable::Mode mode,
-                                        FunctionLiteral* function,
-                                        int* global_count) {
+                                        VariableMode mode,
+                                        FunctionLiteral* function) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++(*global_count);
+      ++global_count_;
       break;
 
     case Variable::PARAMETER:
@@ -715,7 +783,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ mov(StackOperand(variable), result_register());
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ mov(StackOperand(variable),
                Immediate(isolate()->factory()->the_hole_value()));
@@ -738,11 +806,16 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ mov(ContextOperand(esi, variable->index()), result_register());
-        int offset = Context::SlotOffset(variable->index());
-        __ mov(ebx, esi);
-        __ RecordWrite(ebx, offset, result_register(), ecx);
+        // We know that we have written a function, which is not a smi.
+        __ RecordWriteContextSlot(esi,
+                                  Context::SlotOffset(variable->index()),
+                                  result_register(),
+                                  ecx,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ mov(ContextOperand(esi, variable->index()),
                Immediate(isolate()->factory()->the_hole_value()));
@@ -755,37 +828,32 @@
       Comment cmnt(masm_, "[ Declaration");
       __ push(esi);
       __ push(Immediate(variable->name()));
-      // Declaration nodes are always introduced in one of three modes.
-      ASSERT(mode == Variable::VAR ||
-             mode == Variable::CONST ||
-             mode == Variable::LET);
-      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of four modes.
+      ASSERT(mode == VAR ||
+             mode == CONST ||
+             mode == CONST_HARMONY ||
+             mode == LET);
+      PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
+          ? READ_ONLY : NONE;
       __ push(Immediate(Smi::FromInt(attr)));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
       // 'undefined') because we may have a (legal) redeclaration and we
       // must not destroy the current value.
-      increment_stack_height(3);
       if (function != NULL) {
         VisitForStackValue(function);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         __ push(Immediate(isolate()->factory()->the_hole_value()));
-        increment_stack_height();
       } else {
         __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
-        increment_stack_height();
       }
       __ CallRuntime(Runtime::kDeclareContextSlot, 4);
-      decrement_stack_height(4);
       break;
     }
   }
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
-
-
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(esi);  // The context is the first argument.
@@ -801,7 +869,6 @@
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
 
-  int switch_clause_stack_height = stack_height();
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -835,10 +902,10 @@
     if (inline_smi_code) {
       Label slow_case;
       __ mov(ecx, edx);
-      __ or_(ecx, Operand(eax));
+      __ or_(ecx, eax);
       patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
 
-      __ cmp(edx, Operand(eax));
+      __ cmp(edx, eax);
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
       __ jmp(clause->body_target());
@@ -848,9 +915,9 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
     patch_site.EmitPatchInfo();
-    __ test(eax, Operand(eax));
+    __ test(eax, eax);
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
     __ jmp(clause->body_target());
@@ -866,7 +933,6 @@
     __ jmp(default_clause->body_target());
   }
 
-  set_stack_height(switch_clause_stack_height);
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
@@ -898,6 +964,8 @@
   __ cmp(eax, isolate()->factory()->null_value());
   __ j(equal, &exit);
 
+  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
   // Convert the object to a JS object.
   Label convert, done_convert;
   __ JumpIfSmi(eax, &convert, Label::kNear);
@@ -908,67 +976,30 @@
   __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ bind(&done_convert);
   __ push(eax);
-  increment_stack_height();
+
+  // Check for proxies.
+  Label call_runtime, use_cache, fixed_array;
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+  __ j(below_equal, &call_runtime);
 
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  Label next, call_runtime;
-  __ mov(ecx, eax);
-  __ bind(&next);
+  __ CheckEnumCache(&call_runtime);
 
-  // Check that there are no elements.  Register ecx contains the
-  // current JS object we've reached through the prototype chain.
-  __ cmp(FieldOperand(ecx, JSObject::kElementsOffset),
-         isolate()->factory()->empty_fixed_array());
-  __ j(not_equal, &call_runtime);
-
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in ebx for the subsequent
-  // prototype load.
-  __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
-  __ JumpIfSmi(edx, &call_runtime);
-
-  // Check that there is an enum cache in the non-empty instance
-  // descriptors (edx).  This is the case if the next enumeration
-  // index field does not contain a smi.
-  __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
-  __ JumpIfSmi(edx, &call_runtime);
-
-  // For all objects but the receiver, check that the cache is empty.
-  Label check_prototype;
-  __ cmp(ecx, Operand(eax));
-  __ j(equal, &check_prototype, Label::kNear);
-  __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ cmp(edx, isolate()->factory()->empty_fixed_array());
-  __ j(not_equal, &call_runtime);
-
-  // Load the prototype from the map and loop if non-null.
-  __ bind(&check_prototype);
-  __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
-  __ cmp(ecx, isolate()->factory()->null_value());
-  __ j(not_equal, &next);
-
-  // The enum cache is valid.  Load the map of the object being
-  // iterated over and use the cache for the iteration.
-  Label use_cache;
   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
   __ jmp(&use_cache, Label::kNear);
 
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
-  __ push(eax);  // Duplicate the enumerable object on the stack.
+  __ push(eax);
   __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
-  // If we got a map from the runtime call, we can do a fast
-  // modification check. Otherwise, we got a fixed array, and we have
-  // to do a slow check.
-  Label fixed_array;
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
-  __ j(not_equal, &fixed_array, Label::kNear);
+  __ j(not_equal, &fixed_array);
+
 
   // We got a map in register eax. Get the enumeration cache from it.
   __ bind(&use_cache);
@@ -976,7 +1007,7 @@
   __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
   __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
-  // Setup the four remaining stack slots.
+  // Set up the four remaining stack slots.
   __ push(eax);  // Map.
   __ push(edx);  // Enumeration cache.
   __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
@@ -985,16 +1016,33 @@
   __ jmp(&loop);
 
   // We got a fixed array in register eax. Iterate through that.
+  Label non_proxy;
   __ bind(&fixed_array);
-  __ push(Immediate(Smi::FromInt(0)));  // Map (0) - force slow check.
-  __ push(eax);
+
+  Handle<JSGlobalPropertyCell> cell =
+      isolate()->factory()->NewJSGlobalPropertyCell(
+          Handle<Object>(
+              Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+  RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+  __ LoadHeapObject(ebx, cell);
+  __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+         Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+
+  __ mov(ebx, Immediate(Smi::FromInt(1)));  // Smi indicates slow check
+  __ mov(ecx, Operand(esp, 0 * kPointerSize));  // Get enumerated object
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
+  __ j(above, &non_proxy);
+  __ mov(ebx, Immediate(Smi::FromInt(0)));  // Zero indicates proxy
+  __ bind(&non_proxy);
+  __ push(ebx);  // Smi
+  __ push(eax);  // Array
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
   __ push(eax);  // Fixed array length (as smi).
   __ push(Immediate(Smi::FromInt(0)));  // Initial index.
 
-  // 1 ~ The object has already been pushed.
-  increment_stack_height(ForIn::kElementCount - 1);
   // Generate code for doing the condition check.
+  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
   __ mov(eax, Operand(esp, 0 * kPointerSize));  // Get the current index.
   __ cmp(eax, Operand(esp, 1 * kPointerSize));  // Compare to the array length.
@@ -1004,26 +1052,32 @@
   __ mov(ebx, Operand(esp, 2 * kPointerSize));
   __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
 
-  // Get the expected map from the stack or a zero map in the
+  // Get the expected map from the stack or a smi in the
   // permanent slow case into register edx.
   __ mov(edx, Operand(esp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
+  // If not, we may have to filter the key.
   Label update_each;
   __ mov(ecx, Operand(esp, 4 * kPointerSize));
   __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
+  // For proxies, no filtering is done.
+  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+  ASSERT(Smi::FromInt(0) == 0);
+  __ test(edx, edx);
+  __ j(zero, &update_each);
+
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
   // just skip it.
   __ push(ecx);  // Enumerable.
   __ push(ebx);  // Current entry.
   __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(equal, loop_statement.continue_label());
-  __ mov(ebx, Operand(eax));
+  __ mov(ebx, eax);
 
   // Update the 'each' property or variable from the possibly filtered
   // entry in register ebx.
@@ -1031,7 +1085,7 @@
   __ mov(result_register(), ebx);
   // Perform the assignment as if via '='.
   { EffectContext context(this);
-    EmitAssignment(stmt->each(), stmt->AssignmentId());
+    EmitAssignment(stmt->each());
   }
 
   // Generate code for the body of the loop.
@@ -1042,15 +1096,15 @@
   __ bind(loop_statement.continue_label());
   __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
 
-  EmitStackCheck(stmt);
+  EmitStackCheck(stmt, &loop);
   __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ add(Operand(esp), Immediate(5 * kPointerSize));
+  __ add(esp, Immediate(5 * kPointerSize));
 
-  decrement_stack_height(ForIn::kElementCount);
   // Exit and decrement the loop depth.
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1069,7 +1123,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(info->language_mode());
     __ push(Immediate(info));
     __ CallStub(&stub);
   } else {
@@ -1099,7 +1153,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
                Immediate(0));
@@ -1113,7 +1167,7 @@
     // If no outer scope calls eval, we do not need to check more
     // context extensions.  If we have reached an eval scope, we check
     // all extensions from this point.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1146,7 +1200,7 @@
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  __ call(ic, mode);
+  CallIC(ic, mode);
 }
 
 
@@ -1158,7 +1212,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
                Immediate(0));
@@ -1189,16 +1243,23 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+  if (var->mode() == DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+  } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == Variable::CONST) {
+    if (local->mode() == CONST ||
+        local->mode() == CONST_HARMONY ||
+        local->mode() == LET) {
       __ cmp(eax, isolate()->factory()->the_hole_value());
       __ j(not_equal, done);
-      __ mov(eax, isolate()->factory()->undefined_value());
+      if (local->mode() == CONST) {
+        __ mov(eax, isolate()->factory()->undefined_value());
+      } else {  // LET || CONST_HARMONY
+        __ push(Immediate(var->name()));
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      }
     }
     __ jmp(done);
   }
@@ -1220,7 +1281,7 @@
       __ mov(eax, GlobalObjectOperand());
       __ mov(ecx, var->name());
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+      CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(eax);
       break;
     }
@@ -1231,23 +1292,63 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        Label done;
-        GetVar(eax, var);
-        __ cmp(eax, isolate()->factory()->the_hole_value());
-        __ j(not_equal, &done, Label::kNear);
-        if (var->mode() == Variable::LET) {
-          __ push(Immediate(var->name()));
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-        } else {  // Variable::CONST
-          __ mov(eax, isolate()->factory()->undefined_value());
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        bool skip_init_check;
+        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+          skip_init_check = false;
+        } else {
+          // Check that we always have valid source position.
+          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          skip_init_check = var->mode() != CONST &&
+              var->initializer_position() < proxy->position();
         }
-        __ bind(&done);
-        context()->Plug(eax);
+
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          Label done;
+          GetVar(eax, var);
+          __ cmp(eax, isolate()->factory()->the_hole_value());
+          __ j(not_equal, &done, Label::kNear);
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            __ push(Immediate(var->name()));
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ mov(eax, isolate()->factory()->undefined_value());
+          }
+          __ bind(&done);
+          context()->Plug(eax);
+          break;
+        }
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1323,12 +1424,22 @@
 }
 
 
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+  if (expression == NULL) {
+    __ push(Immediate(isolate()->factory()->null_value()));
+  } else {
+    VisitForStackValue(expression);
+  }
+}
+
+
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
+  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
-  __ push(Immediate(expr->constant_properties()));
+  __ push(Immediate(constant_properties));
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1336,10 +1447,15 @@
       ? ObjectLiteral::kHasFunction
       : ObjectLiteral::kNoFlags;
   __ push(Immediate(Smi::FromInt(flags)));
+  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    __ CallStub(&stub);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1351,6 +1467,7 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore();
 
+  AccessorTable accessor_table(isolate()->zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
@@ -1360,9 +1477,10 @@
     if (!result_saved) {
       __ push(eax);  // Save result on the stack
       result_saved = true;
-      increment_stack_height();
     }
     switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
         // Fall through.
@@ -1372,10 +1490,10 @@
             VisitForAccumulatorValue(value);
             __ mov(ecx, Immediate(key->handle()));
             __ mov(edx, Operand(esp, 0));
-            Handle<Code> ic = is_strict_mode()
-                ? isolate()->builtins()->StoreIC_Initialize_Strict()
-                : isolate()->builtins()->StoreIC_Initialize();
-            __ call(ic, RelocInfo::CODE_TARGET, key->id());
+            Handle<Code> ic = is_classic_mode()
+                ? isolate()->builtins()->StoreIC_Initialize()
+                : isolate()->builtins()->StoreIC_Initialize_Strict();
+            CallIC(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1385,7 +1503,6 @@
         // Fall through.
       case ObjectLiteral::Property::PROTOTYPE:
         __ push(Operand(esp, 0));  // Duplicate receiver.
-        increment_stack_height();
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
@@ -1394,25 +1511,29 @@
         } else {
           __ Drop(3);
         }
-        decrement_stack_height(3);
+        break;
+      case ObjectLiteral::Property::GETTER:
+        accessor_table.lookup(key)->second->getter = value;
         break;
       case ObjectLiteral::Property::SETTER:
-      case ObjectLiteral::Property::GETTER:
-        __ push(Operand(esp, 0));  // Duplicate receiver.
-        increment_stack_height();
-        VisitForStackValue(key);
-        __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
-                          Smi::FromInt(1) :
-                          Smi::FromInt(0)));
-        increment_stack_height();
-        VisitForStackValue(value);
-        __ CallRuntime(Runtime::kDefineAccessor, 4);
-        decrement_stack_height(4);
+        accessor_table.lookup(key)->second->setter = value;
         break;
-      default: UNREACHABLE();
     }
   }
 
+  // Emit code to define accessors, using only a single call to the runtime for
+  // each pair of corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end();
+       ++it) {
+    __ push(Operand(esp, 0));  // Duplicate receiver.
+    VisitForStackValue(it->first);
+    EmitAccessor(it->second->getter);
+    EmitAccessor(it->second->setter);
+    __ push(Immediate(Smi::FromInt(NONE)));
+    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+  }
+
   if (expr->has_function()) {
     ASSERT(result_saved);
     __ push(Operand(esp, 0));
@@ -1432,25 +1553,42 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
 
   __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
-  __ push(Immediate(expr->constant_elements()));
-  if (expr->constant_elements()->map() ==
-      isolate()->heap()->fixed_cow_array_map()) {
-    ASSERT(expr->depth() == 1);
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    __ CallStub(&stub);
+  __ push(Immediate(constant_elements));
+  Heap* heap = isolate()->heap();
+  if (has_constant_fast_elements &&
+      constant_elements_values->map() == heap->fixed_cow_array_map()) {
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
     __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        length);
+    __ CallStub(&stub);
   } else if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           FLAG_smi_only_arrays);
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
+    FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
+        ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
 
@@ -1470,18 +1608,31 @@
     if (!result_saved) {
       __ push(eax);
       result_saved = true;
-      increment_stack_height();
     }
     VisitForAccumulatorValue(subexpr);
 
-    // Store the subexpression value in the array's elements.
-    __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
-    __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
-    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-    __ mov(FieldOperand(ebx, offset), result_register());
-
-    // Update the write barrier for the array store.
-    __ RecordWrite(ebx, offset, result_register(), ecx);
+    if (constant_elements_kind == FAST_ELEMENTS) {
+      // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
+      // transition and don't need to call the runtime stub.
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
+      __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+      // Store the subexpression value in the array's elements.
+      __ mov(FieldOperand(ebx, offset), result_register());
+      // Update the write barrier for the array store.
+      __ RecordWriteField(ebx, offset, result_register(), ecx,
+                          kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          INLINE_SMI_CHECK);
+    } else {
+      // Store the subexpression value in the array's elements.
+      __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
+      __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
+      __ mov(ecx, Immediate(Smi::FromInt(i)));
+      __ mov(edx, Immediate(Smi::FromInt(expr->literal_index())));
+      StoreArrayLiteralElementStub stub;
+      __ CallStub(&stub);
+    }
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1499,9 +1650,7 @@
   // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
   // on the left-hand side.
   if (!expr->target()->IsValidLeftHandSide()) {
-    ASSERT(expr->target()->AsThrow() != NULL);
-    VisitInCurrentContext(expr->target());  // Throw does not plug the context
-    context()->Plug(eax);
+    VisitForEffect(expr->target());
     return;
   }
 
@@ -1526,7 +1675,6 @@
         // We need the receiver both on the stack and in the accumulator.
         VisitForAccumulatorValue(property->obj());
         __ push(result_register());
-        increment_stack_height();
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1537,7 +1685,6 @@
         VisitForAccumulatorValue(property->key());
         __ mov(edx, Operand(esp, 0));
         __ push(eax);
-        increment_stack_height();
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1569,7 +1716,6 @@
 
     Token::Value op = expr->binary_op();
     __ push(eax);  // Left operand goes on the stack.
-    increment_stack_height();
     VisitForAccumulatorValue(expr->value());
 
     OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1619,14 +1765,14 @@
   ASSERT(!key->handle()->IsSmi());
   __ mov(ecx, Immediate(key->handle()));
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1639,16 +1785,15 @@
   // stack. Right operand is in eax.
   Label smi_case, done, stub_call;
   __ pop(edx);
-  decrement_stack_height();
   __ mov(ecx, eax);
-  __ or_(eax, Operand(edx));
+  __ or_(eax, edx);
   JumpPatchSite patch_site(masm_);
   patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
 
   __ bind(&stub_call);
   __ mov(eax, ecx);
   BinaryOpStub stub(op, mode);
-  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   __ jmp(&done, Label::kNear);
 
@@ -1691,32 +1836,32 @@
       break;
     }
     case Token::ADD:
-      __ add(eax, Operand(ecx));
+      __ add(eax, ecx);
       __ j(overflow, &stub_call);
       break;
     case Token::SUB:
-      __ sub(eax, Operand(ecx));
+      __ sub(eax, ecx);
       __ j(overflow, &stub_call);
       break;
     case Token::MUL: {
       __ SmiUntag(eax);
-      __ imul(eax, Operand(ecx));
+      __ imul(eax, ecx);
       __ j(overflow, &stub_call);
-      __ test(eax, Operand(eax));
+      __ test(eax, eax);
       __ j(not_zero, &done, Label::kNear);
       __ mov(ebx, edx);
-      __ or_(ebx, Operand(ecx));
+      __ or_(ebx, ecx);
       __ j(negative, &stub_call);
       break;
     }
     case Token::BIT_OR:
-      __ or_(eax, Operand(ecx));
+      __ or_(eax, ecx);
       break;
     case Token::BIT_AND:
-      __ and_(eax, Operand(ecx));
+      __ and_(eax, ecx);
       break;
     case Token::BIT_XOR:
-      __ xor_(eax, Operand(ecx));
+      __ xor_(eax, ecx);
       break;
     default:
       UNREACHABLE();
@@ -1731,22 +1876,19 @@
                                      Token::Value op,
                                      OverwriteMode mode) {
   __ pop(edx);
-  decrement_stack_height();
   BinaryOpStub stub(op, mode);
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
-    ASSERT(expr->AsThrow() != NULL);
-    VisitInCurrentContext(expr);  // Throw does not plug the context
-    context()->Plug(eax);
+    VisitForEffect(expr);
     return;
   }
 
@@ -1770,36 +1912,30 @@
     }
     case NAMED_PROPERTY: {
       __ push(eax);  // Preserve value.
-      increment_stack_height();
       VisitForAccumulatorValue(prop->obj());
       __ mov(edx, eax);
       __ pop(eax);  // Restore value.
-      decrement_stack_height();
       __ mov(ecx, prop->key()->AsLiteral()->handle());
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
-      __ call(ic);
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
+      CallIC(ic);
       break;
     }
     case KEYED_PROPERTY: {
       __ push(eax);  // Preserve value.
-      increment_stack_height();
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ mov(ecx, eax);
       __ pop(edx);
-      decrement_stack_height();
       __ pop(eax);  // Restore value.
-      decrement_stack_height();
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      __ call(ic);
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      CallIC(ic);
       break;
     }
   }
-  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
   context()->Plug(eax);
 }
 
@@ -1810,10 +1946,10 @@
     // Global var, const, or let.
     __ mov(ecx, var->name());
     __ mov(edx, GlobalObjectOperand());
-    Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->StoreIC_Initialize_Strict()
-        : isolate()->builtins()->StoreIC_Initialize();
-    __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->StoreIC_Initialize()
+        : isolate()->builtins()->StoreIC_Initialize_Strict();
+    CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
@@ -1838,13 +1974,13 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+  } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(eax);  // Value.
       __ push(esi);  // Context.
       __ push(Immediate(var->name()));
-      __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+      __ push(Immediate(Smi::FromInt(language_mode())));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
       ASSERT(var->IsStackAllocated() || var->IsContextSlot());
@@ -1859,12 +1995,14 @@
       __ mov(location, eax);
       if (var->IsContextSlot()) {
         __ mov(edx, eax);
-        __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
       }
     }
 
-  } else if (var->mode() != Variable::CONST) {
-    // Assignment to var or initializing assignment to let.
+  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+    // Assignment to var or initializing assignment to let/const
+    // in harmony mode.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, ecx);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1877,14 +2015,15 @@
       __ mov(location, eax);
       if (var->IsContextSlot()) {
         __ mov(edx, eax);
-        __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(eax);  // Value.
       __ push(esi);  // Context.
       __ push(Immediate(var->name()));
-      __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+      __ push(Immediate(Smi::FromInt(language_mode())));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
   }
@@ -1915,12 +2054,11 @@
     __ mov(edx, Operand(esp, 0));
   } else {
     __ pop(edx);
-    decrement_stack_height();
   }
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->StoreIC_Initialize_Strict()
-      : isolate()->builtins()->StoreIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->StoreIC_Initialize()
+      : isolate()->builtins()->StoreIC_Initialize_Strict();
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1929,7 +2067,6 @@
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
     __ Drop(1);
-    decrement_stack_height();
   }
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
   context()->Plug(eax);
@@ -1951,19 +2088,17 @@
   }
 
   __ pop(ecx);
-  decrement_stack_height();
   if (expr->ends_initialization_block()) {
     __ mov(edx, Operand(esp, 0));  // Leave receiver on the stack for later.
   } else {
     __ pop(edx);
-    decrement_stack_height();
   }
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize()
+      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1972,7 +2107,6 @@
     __ push(edx);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
-    decrement_stack_height();
   }
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -1992,13 +2126,22 @@
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(edx);
-    decrement_stack_height();
     EmitKeyedPropertyLoad(expr);
     context()->Plug(eax);
   }
 }
 
 
+void FullCodeGenerator::CallIC(Handle<Code> code,
+                               RelocInfo::Mode rmode,
+                               unsigned ast_id) {
+  ic_total_count_++;
+  __ call(code, rmode, ast_id);
+}
+
+
+
+
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
@@ -2015,11 +2158,10 @@
   SetSourcePosition(expr->position());
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-  __ call(ic, mode, expr->id());
+  CallIC(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
@@ -2034,7 +2176,6 @@
   __ pop(ecx);
   __ push(eax);
   __ push(ecx);
-  increment_stack_height();
 
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2049,11 +2190,10 @@
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize));  // Key.
-  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height(arg_count + 1);
   context()->DropAndPlug(1, eax);  // Drop the key still on the stack.
 }
 
@@ -2069,19 +2209,30 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
+
+  // Record call targets in unoptimized code, but not in the snapshot.
+  if (!Serializer::enabled()) {
+    flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+    Handle<Object> uninitialized =
+        TypeFeedbackCells::UninitializedSentinel(isolate());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+    RecordTypeFeedbackCell(expr->id(), cell);
+    __ mov(ebx, cell);
+  }
+
   CallFunctionStub stub(arg_count, flags);
-  __ CallStub(&stub);
+  __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+  __ CallStub(&stub, expr->id());
+
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  decrement_stack_height(arg_count + 1);
   context()->DropAndPlug(1, eax);
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ push(Operand(esp, arg_count * kPointerSize));
@@ -2091,18 +2242,14 @@
 
   // Push the receiver of the enclosing function.
   __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
+  // Push the language mode.
+  __ push(Immediate(Smi::FromInt(language_mode())));
 
-  // Push the strict mode flag. In harmony mode every eval call
-  // is a strict mode eval call.
-  StrictModeFlag strict_mode = strict_mode_flag();
-  if (FLAG_harmony_block_scoping) {
-    strict_mode = kStrictMode;
-  }
-  __ push(Immediate(Smi::FromInt(strict_mode)));
+  // Push the start position of the scope the calls resides in.
+  __ push(Immediate(Smi::FromInt(scope()->start_position())));
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  // Do the runtime call.
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2128,33 +2275,15 @@
       VisitForStackValue(callee);
       // Reserved receiver slot.
       __ push(Immediate(isolate()->factory()->undefined_value()));
-      increment_stack_height();
       // Push the arguments.
       for (int i = 0; i < arg_count; i++) {
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly in
-      // generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(eax);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ push(Operand(esp, (arg_count + 1) * kPointerSize));
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in eax (function) and
       // edx (receiver). Touch up the stack with the right values.
@@ -2164,17 +2293,16 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    decrement_stack_height(arg_count + 1);  // Function is left on the stack.
     context()->DropAndPlug(1, eax);
 
   } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
     // Push global object as receiver for the call IC.
     __ push(GlobalObjectOperand());
-    increment_stack_height();
     EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -2193,7 +2321,6 @@
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     __ push(eax);  // Function.
     __ push(edx);  // Receiver.
-    increment_stack_height(2);
 
     // If fast case code has been generated, emit code to push the function
     // and receiver and have the slow path jump around this code.
@@ -2201,8 +2328,7 @@
       Label call;
       __ jmp(&call, Label::kNear);
       __ bind(&done);
-      // Push function.  Stack height already incremented in slow case
-      // above.
+      // Push function.
       __ push(eax);
       // The receiver is implicitly the global receiver. Indicate this by
       // passing the hole to the call function stub.
@@ -2235,7 +2361,6 @@
     // Load global receiver object.
     __ mov(ebx, GlobalObjectOperand());
     __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-    increment_stack_height();
     // Emit function call.
     EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
   }
@@ -2273,16 +2398,29 @@
   __ SafeSet(eax, Immediate(arg_count));
   __ mov(edi, Operand(esp, arg_count * kPointerSize));
 
-  Handle<Code> construct_builtin =
-      isolate()->builtins()->JSConstructCall();
-  __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+  // Record call targets in unoptimized code, but not in the snapshot.
+  CallFunctionFlags flags;
+  if (!Serializer::enabled()) {
+    flags = RECORD_CALL_TARGET;
+    Handle<Object> uninitialized =
+        TypeFeedbackCells::UninitializedSentinel(isolate());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+    RecordTypeFeedbackCell(expr->id(), cell);
+    __ mov(ebx, cell);
+  } else {
+    flags = NO_CALL_FUNCTION_FLAGS;
+  }
 
-  decrement_stack_height(arg_count + 1);
+  CallConstructStub stub(flags);
+  __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2294,7 +2432,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2302,7 +2440,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2314,7 +2453,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask | 0x80000000));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2322,7 +2461,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2346,14 +2486,15 @@
   __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   __ j(below, if_false);
   __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2367,14 +2508,15 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2390,7 +2532,7 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2398,7 +2540,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2438,9 +2581,9 @@
   STATIC_ASSERT(kPointerSize == 4);
   __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
   // Calculate location of the first key name.
-  __ add(Operand(ebx),
-           Immediate(FixedArray::kHeaderSize +
-                     DescriptorArray::kFirstIndex * kPointerSize));
+  __ add(ebx,
+         Immediate(FixedArray::kHeaderSize +
+                   DescriptorArray::kFirstIndex * kPointerSize));
   // Loop through all the keys in the descriptor array. If one of these is the
   // symbol valueOf the result is false.
   Label entry, loop;
@@ -2449,9 +2592,9 @@
   __ mov(edx, FieldOperand(ebx, 0));
   __ cmp(edx, FACTORY->value_of_symbol());
   __ j(equal, if_false);
-  __ add(Operand(ebx), Immediate(kPointerSize));
+  __ add(ebx, Immediate(kPointerSize));
   __ bind(&entry);
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(not_equal, &loop);
 
   // Reload map as register ebx was used as temporary above.
@@ -2475,12 +2618,13 @@
          Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2494,14 +2638,15 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2515,14 +2660,15 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2536,7 +2682,7 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2544,8 +2690,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2568,14 +2714,15 @@
   __ bind(&check_frame_marker);
   __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2590,16 +2737,16 @@
                          &if_true, &if_false, &fall_through);
 
   __ pop(ebx);
-  decrement_stack_height();
-  __ cmp(eax, Operand(ebx));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ cmp(eax, ebx);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
@@ -2613,8 +2760,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -2636,7 +2783,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2647,20 +2795,24 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
   // Map is now in eax.
   __ j(below, &null);
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ j(equal, &function);
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-  __ j(above_equal, &function);
+  __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ j(equal, &function);
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
 
-  // Check if the constructor in the map is a function.
+  // Check if the constructor in the map is a JS function.
   __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
   __ j(not_equal, &non_function_constructor);
@@ -2692,7 +2844,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2700,12 +2852,12 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
     VisitForStackValue(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
-    decrement_stack_height(2);
   }
   // Finally, we're expected to leave a value on the top of the stack.
   __ mov(eax, isolate()->factory()->undefined_value());
@@ -2713,8 +2865,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
@@ -2730,9 +2882,10 @@
   __ bind(&heapnumber_allocated);
 
   __ PrepareCallCFunction(1, ebx);
-  __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()),
-                   1);
+  __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+  __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
+  __ mov(Operand(esp, 0), eax);
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
   // Convert 32 random bits in eax to 0.(32 random bits) in a double
   // by computing:
@@ -2741,8 +2894,8 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope fscope(SSE2);
     __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-    __ movd(xmm1, Operand(ebx));
-    __ movd(xmm0, Operand(eax));
+    __ movd(xmm1, ebx);
+    __ movd(xmm0, eax);
     __ cvtss2sd(xmm1, xmm1);
     __ xorps(xmm0, xmm1);
     __ subsd(xmm0, xmm1);
@@ -2763,34 +2916,35 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
-  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   VisitForStackValue(args->at(3));
   __ CallStub(&stub);
-  decrement_stack_height(4);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2808,30 +2962,72 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 2);
+  ASSERT_NE(NULL, args->at(1)->AsLiteral());
+  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label runtime, done;
+  Register object = eax;
+  Register result = eax;
+  Register scratch = ecx;
+
+#ifdef DEBUG
+  __ AbortIfSmi(object);
+  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+  __ Assert(equal, "Trying to get date field from non-date.");
+#endif
+
+  if (index->value() == 0) {
+    __ mov(result, FieldOperand(object, JSDate::kValueOffset));
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ mov(scratch, Operand::StaticVariable(stamp));
+      __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+      __ j(not_equal, &runtime, Label::kNear);
+      __ mov(result, FieldOperand(object, JSDate::kValueOffset +
+                                          kPointerSize * index->value()));
+      __ jmp(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2, scratch);
+    __ mov(Operand(esp, 0), object);
+    __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ bind(&done);
+  }
+  context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
   if (CpuFeatures::IsSupported(SSE2)) {
-    MathPowStub stub;
+    MathPowStub stub(MathPowStub::ON_STACK);
     __ CallStub(&stub);
   } else {
     __ CallRuntime(Runtime::kMath_pow, 2);
   }
-  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(ebx);  // eax = value. ebx = object.
-  decrement_stack_height();
 
   Label done;
   // If the object is a smi, return the value.
@@ -2843,17 +3039,19 @@
 
   // Store the value.
   __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
+
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
   __ mov(edx, eax);
-  __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
+  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -2861,12 +3059,12 @@
 
   NumberToStringStub stub;
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2884,7 +3082,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2892,18 +3091,15 @@
 
   Register object = ebx;
   Register index = eax;
-  Register scratch = ecx;
   Register result = edx;
 
   __ pop(object);
-  decrement_stack_height();
 
   Label need_conversion;
   Label index_out_of_range;
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
-                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -2932,7 +3128,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2940,20 +3137,17 @@
 
   Register object = ebx;
   Register index = eax;
-  Register scratch1 = ecx;
-  Register scratch2 = edx;
+  Register scratch = edx;
   Register result = eax;
 
   __ pop(object);
-  decrement_stack_height();
 
   Label need_conversion;
   Label index_out_of_range;
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch1,
-                                  scratch2,
+                                  scratch,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -2982,7 +3176,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -2990,12 +3185,12 @@
 
   StringAddStub stub(NO_STRING_ADD_FLAGS);
   __ CallStub(&stub);
-  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3003,58 +3198,70 @@
 
   StringCompareStub stub;
   __ CallStub(&stub);
-  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallStub(&stub);
+  context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3063,31 +3270,43 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
+  // Check for proxy.
+  Label proxy, done;
+  __ CmpObjectType(eax, JS_FUNCTION_PROXY_TYPE, ebx);
+  __ j(equal, &proxy);
+
   // InvokeFunction requires the function in edi. Move it in there.
   __ mov(edi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(edi, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height(arg_count + 1);
+  __ jmp(&done);
+
+  __ bind(&proxy);
+  __ push(eax);
+  __ CallRuntime(Runtime::kCall, args->length());
+  __ bind(&done);
+
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
-  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3119,14 +3338,14 @@
   __ mov(index_1, Operand(esp, 1 * kPointerSize));
   __ mov(index_2, Operand(esp, 0));
   __ mov(temp, index_1);
-  __ or_(temp, Operand(index_2));
+  __ or_(temp, index_2);
   __ JumpIfNotSmi(temp, &slow_case);
 
   // Check that both indices are valid.
   __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
-  __ cmp(temp, Operand(index_1));
+  __ cmp(temp, index_1);
   __ j(below_equal, &slow_case);
-  __ cmp(temp, Operand(index_2));
+  __ cmp(temp, index_2);
   __ j(below_equal, &slow_case);
 
   // Bring addresses into index1 and index2.
@@ -3139,16 +3358,35 @@
   __ mov(Operand(index_2, 0), object);
   __ mov(Operand(index_1, 0), temp);
 
-  Label new_space;
-  __ InNewSpace(elements, temp, equal, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   temp,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   not_zero,
+                   &no_remembered_set,
+                   Label::kNear);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask.)
 
-  __ mov(object, elements);
-  __ RecordWriteHelper(object, index_1, temp);
-  __ RecordWriteHelper(elements, index_2, temp);
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index_1,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index_2,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
+
   // We are done. Drop elements from the stack, and return undefined.
-  __ add(Operand(esp), Immediate(3 * kPointerSize));
+  __ add(esp, Immediate(3 * kPointerSize));
   __ mov(eax, isolate()->factory()->undefined_value());
   __ jmp(&done);
 
@@ -3156,12 +3394,12 @@
   __ CallRuntime(Runtime::kSwapElements, 3);
 
   __ bind(&done);
-  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3209,7 +3447,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = eax;
@@ -3221,11 +3460,11 @@
   __ pop(left);
 
   Label done, fail, ok;
-  __ cmp(left, Operand(right));
+  __ cmp(left, right);
   __ j(equal, &ok);
   // Fail if either is a non-HeapObject.
   __ mov(tmp, left);
-  __ and_(Operand(tmp), right);
+  __ and_(tmp, right);
   __ JumpIfSmi(tmp, &fail);
   __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
   __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
@@ -3242,12 +3481,12 @@
   __ mov(eax, Immediate(isolate()->factory()->true_value()));
   __ bind(&done);
 
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3265,14 +3504,15 @@
 
   __ test(FieldOperand(eax, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3287,11 +3527,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
 
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
@@ -3316,7 +3557,7 @@
   Operand separator_operand = Operand(esp, 2 * kPointerSize);
   Operand result_operand = Operand(esp, 1 * kPointerSize);
   Operand array_length_operand = Operand(esp, 0);
-  __ sub(Operand(esp), Immediate(2 * kPointerSize));
+  __ sub(esp, Immediate(2 * kPointerSize));
   __ cld();
   // Check that the array is a JSArray
   __ JumpIfSmi(array, &bailout);
@@ -3352,7 +3593,7 @@
   // Live loop registers: index, array_length, string,
   //                      scratch, string_length, elements.
   if (FLAG_debug_code) {
-    __ cmp(index, Operand(array_length));
+    __ cmp(index, array_length);
     __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
   }
   __ bind(&loop);
@@ -3370,8 +3611,8 @@
   __ add(string_length,
          FieldOperand(string, SeqAsciiString::kLengthOffset));
   __ j(overflow, &bailout);
-  __ add(Operand(index), Immediate(1));
-  __ cmp(index, Operand(array_length));
+  __ add(index, Immediate(1));
+  __ cmp(index, array_length);
   __ j(less, &loop);
 
   // If array_length is 1, return elements[0], a string.
@@ -3405,10 +3646,10 @@
   // to string_length.
   __ mov(scratch, separator_operand);
   __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
-  __ sub(string_length, Operand(scratch));  // May be negative, temporarily.
+  __ sub(string_length, scratch);  // May be negative, temporarily.
   __ imul(scratch, array_length_operand);
   __ j(overflow, &bailout);
-  __ add(string_length, Operand(scratch));
+  __ add(string_length, scratch);
   __ j(overflow, &bailout);
 
   __ shr(string_length, 1);
@@ -3449,7 +3690,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
   __ bind(&loop_1_condition);
   __ cmp(index, array_length_operand);
   __ j(less, &loop_1);  // End while (index < length).
@@ -3459,7 +3700,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Replace separator with its ascii character value.
+  // Replace separator with its ASCII character value.
   __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ mov_b(separator_operand, scratch);
 
@@ -3490,7 +3731,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
 
   __ cmp(index, array_length_operand);
   __ j(less, &loop_2);  // End while (index < length).
@@ -3531,7 +3772,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
 
   __ cmp(index, array_length_operand);
   __ j(less, &loop_3);  // End while (index < length).
@@ -3543,10 +3784,9 @@
   __ bind(&done);
   __ mov(eax, result_operand);
   // Drop temp values from the stack, and restore context register.
-  __ add(Operand(esp), Immediate(3 * kPointerSize));
+  __ add(esp, Immediate(3 * kPointerSize));
 
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -3566,7 +3806,6 @@
     // Prepare for calling JS runtime function.
     __ mov(eax, GlobalObjectOperand());
     __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
-    increment_stack_height();
   }
 
   // Push the arguments ("left-to-right").
@@ -3581,18 +3820,13 @@
     RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
         isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-    __ call(ic, mode, expr->id());
+    CallIC(ic, mode, expr->id());
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   } else {
     // Call the C runtime function.
     __ CallRuntime(expr->function(), arg_count);
   }
-  decrement_stack_height(arg_count);
-  if (expr->is_jsruntime()) {
-    decrement_stack_height();
-  }
-
   context()->Plug(eax);
 }
 
@@ -3607,15 +3841,16 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+            ? kNonStrictMode : kStrictMode;
+        __ push(Immediate(Smi::FromInt(strict_mode_flag)));
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-        decrement_stack_height(2);
         context()->Plug(eax);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
           __ push(GlobalObjectOperand());
           __ push(Immediate(var->name()));
@@ -3657,18 +3892,41 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ mov(eax, isolate()->factory()->true_value());
+        } else {
+          __ Push(isolate()->factory()->true_value());
+        }
+        __ jmp(&done, Label::kNear);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ mov(eax, isolate()->factory()->false_value());
+        } else {
+          __ Push(isolate()->factory()->false_value());
+        }
+        __ bind(&done);
       }
       break;
     }
@@ -3679,7 +3937,6 @@
         VisitForTypeofValue(expr->expression());
       }
       __ CallRuntime(Runtime::kTypeof, 1);
-      decrement_stack_height();
       context()->Plug(eax);
       break;
     }
@@ -3721,7 +3978,7 @@
   // accumulator register eax.
   VisitForAccumulatorValue(expr->expression());
   SetSourcePosition(expr->position());
-  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   context()->Plug(eax);
 }
 
@@ -3733,10 +3990,7 @@
   // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
   // as the left-hand side.
   if (!expr->expression()->IsValidLeftHandSide()) {
-    ASSERT(expr->expression()->AsThrow() != NULL);
-    VisitInCurrentContext(expr->expression());
-    // Visiting Throw does not plug the context.
-    context()->Plug(eax);
+    VisitForEffect(expr->expression());
     return;
   }
 
@@ -3761,20 +4015,17 @@
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
       __ push(Immediate(Smi::FromInt(0)));
-      increment_stack_height();
     }
     if (assign_type == NAMED_PROPERTY) {
       // Put the object both on the stack and in the accumulator.
       VisitForAccumulatorValue(prop->obj());
       __ push(eax);
-      increment_stack_height();
       EmitNamedPropertyLoad(prop);
     } else {
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ mov(edx, Operand(esp, 0));
       __ push(eax);
-      increment_stack_height();
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -3805,7 +4056,6 @@
       switch (assign_type) {
         case VARIABLE:
           __ push(eax);
-          increment_stack_height();
           break;
         case NAMED_PROPERTY:
           __ mov(Operand(esp, kPointerSize), eax);
@@ -3823,9 +4073,9 @@
 
   if (ShouldInlineSmiCase(expr->op())) {
     if (expr->op() == Token::INC) {
-      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ add(eax, Immediate(Smi::FromInt(1)));
     } else {
-      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ sub(eax, Immediate(Smi::FromInt(1)));
     }
     __ j(overflow, &stub_call, Label::kNear);
     // We could eliminate this smi check if we split the code at
@@ -3835,9 +4085,9 @@
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     if (expr->op() == Token::INC) {
-      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ sub(eax, Immediate(Smi::FromInt(1)));
     } else {
-      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ add(eax, Immediate(Smi::FromInt(1)));
     }
   }
 
@@ -3848,7 +4098,7 @@
   __ mov(edx, eax);
   __ mov(eax, Immediate(Smi::FromInt(1)));
   BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
-  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -3879,11 +4129,10 @@
     case NAMED_PROPERTY: {
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       __ pop(edx);
-      decrement_stack_height();
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
-      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3897,12 +4146,10 @@
     case KEYED_PROPERTY: {
       __ pop(ecx);
       __ pop(edx);
-      decrement_stack_height();
-      decrement_stack_height();
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         // Result is on the stack
@@ -3930,7 +4177,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ call(ic);
+    CallIC(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(eax);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -3950,20 +4197,25 @@
     context()->Plug(eax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(eax, if_true);
@@ -3998,8 +4250,11 @@
     Split(not_zero, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(eax, if_false);
-    __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
-    Split(above_equal, if_true, if_false, fall_through);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
+    __ j(equal, if_true);
+    __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
+    Split(equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(eax, if_false);
     if (!FLAG_harmony_typeof) {
@@ -4017,18 +4272,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ cmp(eax, isolate()->factory()->undefined_value());
-  Split(equal, if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -4036,9 +4280,12 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4046,21 +4293,13 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
-  switch (expr->op()) {
+  switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      decrement_stack_height(2);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
       break;
@@ -4069,9 +4308,8 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      decrement_stack_height(2);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-      __ test(eax, Operand(eax));
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      __ test(eax, eax);
       // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
       break;
@@ -4084,43 +4322,34 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cc = equal;
-          __ pop(edx);
           break;
         case Token::LT:
           cc = less;
-          __ pop(edx);
           break;
         case Token::GT:
-          // Reverse left and right sizes to obtain ECMA-262 conversion order.
-          cc = less;
-          __ mov(edx, result_register());
-          __ pop(eax);
+          cc = greater;
          break;
         case Token::LTE:
-          // Reverse left and right sizes to obtain ECMA-262 conversion order.
-          cc = greater_equal;
-          __ mov(edx, result_register());
-          __ pop(eax);
+          cc = less_equal;
           break;
         case Token::GTE:
           cc = greater_equal;
-          __ pop(edx);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
-      decrement_stack_height();
+      __ pop(edx);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
         Label slow_case;
-        __ mov(ecx, Operand(edx));
-        __ or_(ecx, Operand(eax));
+        __ mov(ecx, edx);
+        __ or_(ecx, eax);
         patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
-        __ cmp(edx, Operand(eax));
+        __ cmp(edx, eax);
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
       }
@@ -4128,11 +4357,11 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
 
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-      __ test(eax, Operand(eax));
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      __ test(eax, eax);
       Split(cc, if_true, if_false, fall_through);
     }
   }
@@ -4143,7 +4372,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4151,15 +4382,20 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ cmp(eax, isolate()->factory()->null_value());
-  if (expr->is_strict()) {
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Handle<Object> nil_value = nil == kNullValue ?
+      isolate()->factory()->null_value() :
+      isolate()->factory()->undefined_value();
+  __ cmp(eax, nil_value);
+  if (expr->op() == Token::EQ_STRICT) {
     Split(equal, if_true, if_false, fall_through);
   } else {
+    Handle<Object> other_nil_value = nil == kNullValue ?
+        isolate()->factory()->undefined_value() :
+        isolate()->factory()->null_value();
     __ j(equal, if_true);
-    __ cmp(eax, isolate()->factory()->undefined_value());
+    __ cmp(eax, other_nil_value);
     __ j(equal, if_true);
     __ JumpIfSmi(eax, if_false);
     // It can be an undetectable object.
@@ -4226,7 +4462,7 @@
   // Cook return address on top of stack (smi encoded Code* delta)
   ASSERT(!result_register().is(edx));
   __ pop(edx);
-  __ sub(Operand(edx), Immediate(masm_->CodeObject()));
+  __ sub(edx, Immediate(masm_->CodeObject()));
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   STATIC_ASSERT(kSmiTag == 0);
   __ SmiTag(edx);
@@ -4242,8 +4478,8 @@
   // Uncook return address.
   __ pop(edx);
   __ SmiUntag(edx);
-  __ add(Operand(edx), Immediate(masm_->CodeObject()));
-  __ jmp(Operand(edx));
+  __ add(edx, Immediate(masm_->CodeObject()));
+  __ jmp(edx);
 }
 
 
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 9b5cc56..33f247a 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -212,7 +212,7 @@
 
   // Update write barrier. Make sure not to clobber the value.
   __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1);
+  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
 }
 
 
@@ -326,7 +326,7 @@
   // Fast case: Do the load.
   STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
   __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
-  __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
+  __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
   // In case the loaded value is the_hole we have to consult GetProperty
   // to ensure the prototype chain is searched.
   __ j(equal, out_of_range);
@@ -394,8 +394,8 @@
   // Check if element is in the range of mapped arguments. If not, jump
   // to the unmapped lookup with the parameter map in scratch1.
   __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
-  __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
-  __ cmp(key, Operand(scratch2));
+  __ sub(scratch2, Immediate(Smi::FromInt(2)));
+  __ cmp(key, scratch2);
   __ j(greater_equal, unmapped_case);
 
   // Load element index and check whether it is the hole.
@@ -432,7 +432,7 @@
   Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
   __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
   __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(scratch));
+  __ cmp(key, scratch);
   __ j(greater_equal, slow_case);
   return FieldOperand(backing_store,
                       key,
@@ -473,7 +473,6 @@
   Counters* counters = isolate->counters();
   __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
   __ ret(0);
-
   __ bind(&check_number_dictionary);
   __ mov(ebx, eax);
   __ SmiUntag(ebx);
@@ -534,18 +533,38 @@
   __ shr(ecx, KeyedLookupCache::kMapHashShift);
   __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
   __ shr(edi, String::kHashShift);
-  __ xor_(ecx, Operand(edi));
-  __ and_(ecx, KeyedLookupCache::kCapacityMask);
+  __ xor_(ecx, edi);
+  __ and_(ecx, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
 
   // Load the key (consisting of map and symbol) from the cache and
   // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
   ExternalReference cache_keys =
       ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-  __ mov(edi, ecx);
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    __ mov(edi, ecx);
+    __ shl(edi, kPointerSizeLog2 + 1);
+    if (i != 0) {
+      __ add(edi, Immediate(kPointerSize * i * 2));
+    }
+    __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+    __ j(not_equal, &try_next_entry);
+    __ add(edi, Immediate(kPointerSize));
+    __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
+    __ j(equal, &hit_on_nth_entry[i]);
+    __ bind(&try_next_entry);
+  }
+
+  __ lea(edi, Operand(ecx, 1));
   __ shl(edi, kPointerSizeLog2 + 1);
+  __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
   __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
-  __ add(Operand(edi), Immediate(kPointerSize));
+  __ add(edi, Immediate(kPointerSize));
   __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
 
@@ -556,15 +575,27 @@
   // ecx     : lookup cache index
   ExternalReference cache_field_offsets =
       ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-  __ mov(edi,
-         Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
-  __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
-  __ sub(edi, Operand(ecx));
-  __ j(above_equal, &property_array_property);
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    if (i != 0) {
+      __ add(ecx, Immediate(i));
+    }
+    __ mov(edi,
+           Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
+    __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+    __ sub(edi, ecx);
+    __ j(above_equal, &property_array_property);
+    if (i != 0) {
+      __ jmp(&load_in_object_property);
+    }
+  }
 
   // Load in-object property.
+  __ bind(&load_in_object_property);
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
-  __ add(ecx, Operand(edi));
+  __ add(ecx, edi);
   __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
   __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
   __ ret(0);
@@ -606,14 +637,12 @@
 
   Register receiver = edx;
   Register index = eax;
-  Register scratch1 = ebx;
-  Register scratch2 = ecx;
+  Register scratch = ecx;
   Register result = eax;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch1,
-                                          scratch2,
+                                          scratch,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -651,8 +680,8 @@
   // Check that it has indexed interceptor and access checks
   // are not enabled for this object.
   __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
-  __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
-  __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
+  __ and_(ecx, Immediate(kSlowCaseBitFieldMask));
+  __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
   __ j(not_zero, &slow);
 
   // Everything is fine, call runtime.
@@ -710,7 +739,7 @@
   __ mov(mapped_location, eax);
   __ lea(ecx, mapped_location);
   __ mov(edx, eax);
-  __ RecordWrite(ebx, ecx, edx);
+  __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in ebx.
@@ -719,7 +748,7 @@
   __ mov(unmapped_location, eax);
   __ lea(edi, unmapped_location);
   __ mov(edx, eax);
-  __ RecordWrite(ebx, edi, edx);
+  __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -734,7 +763,10 @@
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label slow, fast, array, extra;
+  Label slow, fast_object_with_map_check, fast_object_without_map_check;
+  Label fast_double_with_map_check, fast_double_without_map_check;
+  Label check_if_double_array, array, extra, transition_smi_elements;
+  Label finish_object_store, non_double_value, transition_double_elements;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(edx, &slow);
@@ -750,22 +782,18 @@
   __ CmpInstanceType(edi, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
+  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
   __ j(below, &slow);
-  __ CmpInstanceType(edi, JS_PROXY_TYPE);
-  __ j(equal, &slow);
-  __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
-  __ j(equal, &slow);
 
   // Object case: Check key against length in the elements array.
   // eax: value
   // edx: JSObject
   // ecx: key (a smi)
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  // Check that the object is in fast mode and writable.
-  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
-  __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-  __ j(below, &fast);
+  // edi: receiver map
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+  __ j(below, &fast_object_with_map_check);
 
   // Slow case: call runtime.
   __ bind(&slow);
@@ -778,16 +806,28 @@
   // eax: value
   // edx: receiver, a JSArray
   // ecx: key, a smi.
-  // edi: receiver->elements, a FixedArray
+  // ebx: receiver->elements, a FixedArray
+  // edi: receiver map
   // flags: compare (ecx, edx.length())
   // do not leave holes in the array:
   __ j(not_equal, &slow);
-  __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
   __ j(above_equal, &slow);
-  // Add 1 to receiver->length, and go to fast array write.
+  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+  __ j(not_equal, &check_if_double_array);
+  // Add 1 to receiver->length, and go to common element store code for Objects.
   __ add(FieldOperand(edx, JSArray::kLengthOffset),
          Immediate(Smi::FromInt(1)));
-  __ jmp(&fast);
+  __ jmp(&fast_object_without_map_check);
+
+  __ bind(&check_if_double_array);
+  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+  __ j(not_equal, &slow);
+  // Add 1 to receiver->length, and go to common element store code for doubles.
+  __ add(FieldOperand(edx, JSArray::kLengthOffset),
+         Immediate(Smi::FromInt(1)));
+  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
@@ -796,34 +836,111 @@
   // eax: value
   // edx: receiver, a JSArray
   // ecx: key, a smi.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
+  // edi: receiver map
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
 
-  // Check the key against the length in the array, compute the
-  // address to store into and fall through to fast case.
+  // Check the key against the length in the array and fall through to the
+  // common store code.
   __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
   __ j(above_equal, &extra);
 
-  // Fast case: Do the store.
-  __ bind(&fast);
+  // Fast case: Do the store, could either Object or double.
+  __ bind(&fast_object_with_map_check);
   // eax: value
   // ecx: key (a smi)
   // edx: receiver
-  // edi: FixedArray receiver->elements
-  __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
-  // Update write barrier for the elements array address.
-  __ mov(edx, Operand(eax));
-  __ RecordWrite(edi, 0, edx, ecx);
+  // ebx: FixedArray receiver->elements
+  // edi: receiver map
+  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+  __ j(not_equal, &fast_double_with_map_check);
+  __ bind(&fast_object_without_map_check);
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(eax, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
   __ ret(0);
+
+  __ bind(&non_smi_value);
+  // Escape to elements kind transition case.
+  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+  __ CheckFastObjectElements(edi, &transition_smi_elements);
+
+  // Fast elements array, store the value to the elements backing store.
+  __ bind(&finish_object_store);
+  __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+  // Update write barrier for the elements array address.
+  __ mov(edx, eax);  // Preserve the value which is returned.
+  __ RecordWriteArray(
+      ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ ret(0);
+
+  __ bind(&fast_double_with_map_check);
+  // Check for fast double array case. If this fails, call through to the
+  // runtime.
+  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+  __ j(not_equal, &slow);
+  __ bind(&fast_double_without_map_check);
+  // If the value is a number, store it as a double in the FastDoubleElements
+  // array.
+  __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0,
+                                 &transition_double_elements, false);
+  __ ret(0);
+
+  __ bind(&transition_smi_elements);
+  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+
+  // Transition the array appropriately depending on the value type.
+  __ CheckMap(eax,
+              masm->isolate()->factory()->heap_number_map(),
+              &non_double_value,
+              DONT_DO_SMI_CHECK);
+
+  // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_DOUBLE_ELEMENTS,
+                                         ebx,
+                                         edi,
+                                         &slow);
+  ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ jmp(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         ebx,
+                                         edi,
+                                         &slow);
+  ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         ebx,
+                                         edi,
+                                         &slow);
+  ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
 }
 
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                          int argc,
-                                          Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                               int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- edx                 : receiver
@@ -833,11 +950,11 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_ic_state,
+                                         extra_state,
                                          NORMAL,
                                          argc);
-  Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
-                                                  eax);
+  Isolate* isolate = masm->isolate();
+  isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
 
   // If the stub cache probing failed, the receiver might be a value.
   // For value objects, we use the map of the prototype objects for
@@ -863,9 +980,9 @@
 
   // Check for boolean.
   __ bind(&non_string);
-  __ cmp(edx, FACTORY->true_value());
+  __ cmp(edx, isolate->factory()->true_value());
   __ j(equal, &boolean);
-  __ cmp(edx, FACTORY->false_value());
+  __ cmp(edx, isolate->factory()->false_value());
   __ j(not_equal, &miss);
   __ bind(&boolean);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
@@ -873,8 +990,7 @@
 
   // Probe the stub cache for the value object.
   __ bind(&probe);
-  Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
-                                                  no_reg);
+  isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
   __ bind(&miss);
 }
 
@@ -904,8 +1020,9 @@
                     NullCallWrapper(), CALL_AS_METHOD);
 }
 
+
 // The generated code falls through if the call should be handled by runtime.
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -929,10 +1046,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm,
-                             int argc,
-                             IC::UtilityId id,
-                             Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+                              int argc,
+                              IC::UtilityId id,
+                              Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -951,22 +1068,22 @@
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ push(edx);
-  __ push(ecx);
+    // Push the receiver and the name of the function.
+    __ push(edx);
+    __ push(ecx);
 
-  // Call the entry.
-  CEntryStub stub(1);
-  __ mov(eax, Immediate(2));
-  __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
-  __ CallStub(&stub);
+    // Call the entry.
+    CEntryStub stub(1);
+    __ mov(eax, Immediate(2));
+    __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
+    __ CallStub(&stub);
 
-  // Move result to edi and exit the internal frame.
-  __ mov(edi, eax);
-  __ LeaveInternalFrame();
+    // Move result to edi and exit the internal frame.
+    __ mov(edi, eax);
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -989,7 +1106,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -1003,7 +1120,7 @@
 
 void CallIC::GenerateMegamorphic(MacroAssembler* masm,
                                  int argc,
-                                 Code::ExtraICState extra_ic_state) {
+                                 Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1014,38 +1131,10 @@
 
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+  CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC,
+                                            extra_state);
 
-  GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- ecx                 : name
-  //  -- esp[0]              : return address
-  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- esp[(argc + 1) * 4] : receiver
-  // -----------------------------------
-
-  GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm,
-                          int argc,
-                          Code::ExtraICState extra_ic_state) {
-  // ----------- S t a t e -------------
-  //  -- ecx                 : name
-  //  -- esp[0]              : return address
-  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- esp[(argc + 1) * 4] : receiver
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
+  GenerateMiss(masm, argc, extra_state);
 }
 
 
@@ -1111,13 +1200,17 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-  __ EnterInternalFrame();
-  __ push(ecx);  // save the key
-  __ push(edx);  // pass the receiver
-  __ push(ecx);  // pass the key
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(ecx);  // restore the key
-  __ LeaveInternalFrame();
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(ecx);  // save the key
+    __ push(edx);  // pass the receiver
+    __ push(ecx);  // pass the key
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(ecx);  // restore the key
+    // Leave the internal frame.
+  }
+
   __ mov(edi, eax);
   __ jmp(&do_call);
 
@@ -1143,10 +1236,8 @@
 
   __ bind(&lookup_monomorphic_cache);
   __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
-  GenerateMonomorphicCacheProbe(masm,
-                                argc,
-                                Code::KEYED_CALL_IC,
-                                Code::kNoExtraICState);
+  CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
+                                            Code::kNoExtraICState);
   // Fall through on miss.
 
   __ bind(&slow_call);
@@ -1209,25 +1300,12 @@
   __ JumpIfSmi(ecx, &miss);
   Condition cond = masm->IsObjectStringType(ecx, eax, eax);
   __ j(NegateCondition(cond), &miss);
-  GenerateCallNormal(masm, argc);
+  CallICBase::GenerateNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
 
 
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- ecx                 : name
-  //  -- esp[0]              : return address
-  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- esp[(argc + 1) * 4] : receiver
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
@@ -1375,10 +1453,10 @@
   //  -- esp[0] : return address
   // -----------------------------------
   //
-  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // This accepts as a receiver anything JSArray::SetElementsLength accepts
   // (currently anything except for external arrays which means anything with
-  // elements of FixedArray type.), but currently is restricted to JSArray.
-  // Value must be a number, but only smis are accepted as the most common case.
+  // elements of FixedArray type).  Value must be a number, but only smis are
+  // accepted as the most common case.
 
   Label miss;
 
@@ -1400,6 +1478,13 @@
   __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
   __ j(not_equal, &miss);
 
+  // Check that the array has fast properties, otherwise the length
+  // property might have been redefined.
+  __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+  __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
+                 Heap::kHashTableMapRootIndex);
+  __ j(equal, &miss);
+
   // Check that value is a smi.
   __ JumpIfNotSmi(value, &miss);
 
@@ -1536,6 +1621,57 @@
 }
 
 
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ebx    : target map
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  // Must return the modified receiver in eax.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+    __ mov(eax, edx);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ pop(ebx);
+  __ push(edx);
+  __ push(ebx);  // return address
+  // Leaving the code managed by the register allocator and return to the
+  // convention of using esi as context register.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ebx    : target map
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  // Must return the modified receiver in eax.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+    __ mov(eax, edx);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ pop(ebx);
+  __ push(edx);
+  __ push(ebx);  // return address
+  // Leaving the code managed by the register allocator and return to the
+  // convention of using esi as context register.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
 #undef __
 
 
@@ -1547,11 +1683,9 @@
     case Token::LT:
       return less;
     case Token::GT:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return less;
+      return greater;
     case Token::LTE:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return greater_equal;
+      return less_equal;
     case Token::GTE:
       return greater_equal;
     default:
@@ -1583,6 +1717,9 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
+    if (state == KNOWN_OBJECTS) {
+      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index d5a4fe6..8fb4c79 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,6 +33,7 @@
 #include "code-stubs.h"
 #include "deoptimizer.h"
 #include "stub-cache.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -66,10 +67,18 @@
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
-  HPhase phase("Code generation", chunk());
+  HPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
   CpuFeatures::Scope scope(SSE2);
+
+  CodeStub::GenerateFPStubs();
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -133,7 +142,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). ecx is zero for method calls and non-zero for
   // function calls.
-  if (info_->is_strict_mode() || info_->is_native()) {
+  if (!info_->is_classic_mode() || info_->is_native()) {
     Label ok;
     __ test(ecx, Operand(ecx));
     __ j(zero, &ok, Label::kNear);
@@ -204,11 +213,12 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use a third register to avoid
-        // clobbering esi.
-        __ mov(ecx, esi);
-        __ RecordWrite(ecx, context_offset, eax, ebx);
+        // Update the write barrier. This clobbers eax and ebx.
+        __ RecordWriteContextSlot(esi,
+                                  context_offset,
+                                  eax,
+                                  ebx,
+                                  kDontSaveFPRegs);
       }
     }
     Comment(";;; End allocate local context");
@@ -252,6 +262,9 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -302,18 +315,21 @@
 }
 
 
-Immediate LCodeGen::ToImmediate(LOperand* op) {
-  LConstantOperand* const_op = LConstantOperand::cast(op);
-  Handle<Object> literal = chunk_->LookupLiteral(const_op);
-  Representation r = chunk_->LookupLiteralRepresentation(const_op);
-  if (r.IsInteger32()) {
-    ASSERT(literal->IsNumber());
-    return Immediate(static_cast<int32_t>(literal->Number()));
-  } else if (r.IsDouble()) {
-    Abort("unsupported double immediate");
-  }
-  ASSERT(r.IsTagged());
-  return Immediate(literal);
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  Handle<Object> literal = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+  return literal;
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  return value->Number();
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
 }
 
 
@@ -352,7 +368,19 @@
 
   WriteTranslation(environment->outer(), translation);
   int closure_id = DefineDeoptimizationLiteral(environment->closure());
-  translation->BeginFrame(environment->ast_id(), closure_id, height);
+  switch (environment->frame_type()) {
+    case JS_FUNCTION:
+      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+      break;
+    case JS_CONSTRUCT:
+      translation->BeginConstructStubFrame(closure_id, translation_size);
+      break;
+    case ARGUMENTS_ADAPTOR:
+      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+      break;
+    default:
+      UNREACHABLE();
+  }
   for (int i = 0; i < translation_size; ++i) {
     LOperand* value = environment->values()->at(i);
     // spilled_registers_ and spilled_double_registers_ are either
@@ -464,14 +492,18 @@
                                        int argc,
                                        LInstruction* instr,
                                        LOperand* context) {
-  ASSERT(context->IsRegister() || context->IsStackSlot());
   if (context->IsRegister()) {
     if (!ToRegister(context).is(esi)) {
       __ mov(esi, ToRegister(context));
     }
-  } else {
-    // Context is stack slot.
+  } else if (context->IsStackSlot()) {
     __ mov(esi, ToOperand(context));
+  } else if (context->IsConstantOperand()) {
+    Handle<Object> literal =
+        chunk_->LookupLiteral(LConstantOperand::cast(context));
+    __ LoadHeapObject(esi, Handle<Context>::cast(literal));
+  } else {
+    UNREACHABLE();
   }
 
   __ CallRuntimeSaveDoubles(id);
@@ -497,10 +529,14 @@
     // |>------------  translation_size ------------<|
 
     int frame_count = 0;
+    int jsframe_count = 0;
     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
       ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
     }
-    Translation translation(&translations_, frame_count);
+    Translation translation(&translations_, frame_count, jsframe_count);
     WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
     int pc_offset = masm()->pc_offset();
@@ -517,7 +553,6 @@
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
-  ASSERT(entry != NULL);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -568,7 +603,6 @@
 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
-  ASSERT(FLAG_deopt);
   Handle<DeoptimizationInputData> data =
       factory()->NewDeoptimizationInputData(length, TENURED);
 
@@ -643,7 +677,7 @@
     int arguments,
     Safepoint::DeoptMode deopt_mode) {
   ASSERT(kind == expected_safepoint_kind_);
-  const ZoneList<LOperand*>* operands = pointers->operands();
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint =
       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
   for (int i = 0; i < operands->length(); i++) {
@@ -1111,7 +1145,7 @@
   ASSERT(left->Equals(instr->result()));
 
   if (right->IsConstantOperand()) {
-    __ sub(ToOperand(left), ToImmediate(right));
+    __ sub(ToOperand(left), ToInteger32Immediate(right));
   } else {
     __ sub(ToRegister(left), ToOperand(right));
   }
@@ -1167,8 +1201,13 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  ASSERT(instr->result()->IsRegister());
-  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+  Register reg = ToRegister(instr->result());
+  Handle<Object> handle = instr->value();
+  if (handle->IsHeapObject()) {
+    __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
+  } else {
+    __ Set(reg, Immediate(handle));
+  }
 }
 
 
@@ -1207,6 +1246,7 @@
   Register result = ToRegister(instr->result());
   Register map = ToRegister(instr->TempAt(0));
   ASSERT(input.is(result));
+
   Label done;
   // If the object is a smi return the object.
   __ JumpIfSmi(input, &done, Label::kNear);
@@ -1220,6 +1260,43 @@
 }
 
 
+void LCodeGen::DoDateField(LDateField* instr) {
+  Register object = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->TempAt(0));
+  Smi* index = instr->index();
+  Label runtime, done;
+  ASSERT(object.is(result));
+  ASSERT(object.is(eax));
+
+#ifdef DEBUG
+  __ AbortIfSmi(object);
+  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+  __ Assert(equal, "Trying to get date field from non-date.");
+#endif
+
+  if (index->value() == 0) {
+    __ mov(result, FieldOperand(object, JSDate::kValueOffset));
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ mov(scratch, Operand::StaticVariable(stamp));
+      __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+      __ j(not_equal, &runtime, Label::kNear);
+      __ mov(result, FieldOperand(object, JSDate::kValueOffset +
+                                          kPointerSize * index->value()));
+      __ jmp(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2, scratch);
+    __ mov(Operand(esp, 0), object);
+    __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ bind(&done);
+  }
+}
+
+
 void LCodeGen::DoBitNotI(LBitNotI* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->Equals(instr->result()));
@@ -1245,7 +1322,7 @@
   ASSERT(left->Equals(instr->result()));
 
   if (right->IsConstantOperand()) {
-    __ add(ToOperand(left), ToImmediate(right));
+    __ add(ToOperand(left), ToInteger32Immediate(right));
   } else {
     __ add(ToRegister(left), ToOperand(right));
   }
@@ -1494,32 +1571,40 @@
 }
 
 
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
-  if (right->IsConstantOperand()) {
-    __ cmp(ToOperand(left), ToImmediate(right));
-  } else {
-    __ cmp(ToRegister(left), ToOperand(right));
-  }
-}
-
-
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-
-  if (instr->is_double()) {
-    // Don't base result on EFLAGS when a NaN is involved. Instead
-    // jump to the false block.
-    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
-  } else {
-    EmitCmpI(left, right);
-  }
-
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
-  EmitBranch(true_block, false_block, cc);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block =
+      EvalComparison(instr->op(), left_val, right_val) ? true_block
+                                                       : false_block;
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Don't base result on EFLAGS when a NaN is involved. Instead
+      // jump to the false block.
+      __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+      __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+    } else {
+      if (right->IsConstantOperand()) {
+        __ cmp(ToRegister(left), ToInteger32Immediate(right));
+      } else if (left->IsConstantOperand()) {
+        __ cmp(ToOperand(right), ToInteger32Immediate(left));
+        // We transposed the operands. Reverse the condition.
+        cc = ReverseCondition(cc);
+      } else {
+        __ cmp(ToRegister(left), ToOperand(right));
+      }
+    }
+    EmitBranch(true_block, false_block, cc);
+  }
 }
 
 
@@ -1544,23 +1629,33 @@
 }
 
 
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
-
-  // TODO(fsc): If the expression is known to be a smi, then it's
-  // definitely not null. Jump to the false block.
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  __ cmp(reg, factory()->null_value());
-  if (instr->is_strict()) {
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
+  if (instr->hydrogen()->representation().IsSpecialization() ||
+      instr->hydrogen()->type().IsSmi()) {
+    EmitGoto(false_block);
+    return;
+  }
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  Handle<Object> nil_value = instr->nil() == kNullValue ?
+      factory()->null_value() :
+      factory()->undefined_value();
+  __ cmp(reg, nil_value);
+  if (instr->kind() == kStrictEquality) {
     EmitBranch(true_block, false_block, equal);
   } else {
+    Handle<Object> other_nil_value = instr->nil() == kNullValue ?
+        factory()->undefined_value() :
+        factory()->null_value();
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ j(equal, true_label);
-    __ cmp(reg, factory()->undefined_value());
+    __ cmp(reg, other_nil_value);
     __ j(equal, true_label);
     __ JumpIfSmi(reg, false_label);
     // Check for undetectable objects by looking in the bit field in
@@ -1612,6 +1707,31 @@
 }
 
 
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+
+  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
+
+  return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond = EmitIsString(reg, temp, false_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   Operand input = ToOperand(instr->InputAt(0));
 
@@ -1639,6 +1759,41 @@
 }
 
 
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+  __ test(eax, Operand(eax));
+
+  EmitBranch(true_block, false_block, condition);
+}
+
+
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1702,7 +1857,7 @@
 
 
 // Branches to a label or falls through with the answer in the z flag.  Trashes
-// the temp registers, but not the input.  Only input and temp2 may alias.
+// the temp registers, but not the input.
 void LCodeGen::EmitClassOfTest(Label* is_true,
                                Label* is_false,
                                Handle<String>class_name,
@@ -1710,30 +1865,38 @@
                                Register temp,
                                Register temp2) {
   ASSERT(!input.is(temp));
-  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
+  ASSERT(!input.is(temp2));
+  ASSERT(!temp.is(temp2));
   __ JumpIfSmi(input, is_false);
-  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
-  __ j(below, is_false);
 
-  // Map is now in temp.
-  // Functions have class 'Function'.
-  __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ j(above_equal, is_true);
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+    __ j(below, is_false);
+    __ j(equal, is_true);
+    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+    __ j(equal, is_true);
   } else {
-    __ j(above_equal, is_false);
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+    __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
+    __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                                     FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ j(above, is_false);
   }
 
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
-
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1762,12 +1925,7 @@
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
   Register temp2 = ToRegister(instr->TempAt(1));
-  if (input.is(temp)) {
-    // Swap.
-    Register swapper = temp;
-    temp = temp2;
-    temp2 = swapper;
-  }
+
   Handle<String> class_name = instr->hydrogen()->class_name();
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1818,9 +1976,8 @@
     virtual void Generate() {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-
+    virtual LInstruction* instr() { return instr_; }
     Label* map_check() { return &map_check_; }
-
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -1843,7 +2000,9 @@
   Register map = ToRegister(instr->TempAt(0));
   __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
   __ bind(deferred->map_check());  // Label for calculating code patching.
-  __ cmp(map, factory()->the_hole_value());  // Patched to cached map.
+  Handle<JSGlobalPropertyCell> cache_cell =
+      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+  __ cmp(map, Operand::Cell(cache_cell));  // Patched to cached map.
   __ j(not_equal, &cache_miss, Label::kNear);
   __ mov(eax, factory()->the_hole_value());  // Patched to either true or false.
   __ jmp(&done);
@@ -1891,7 +2050,7 @@
   // the stub.
   Register temp = ToRegister(instr->TempAt(0));
   ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
-  __ mov(InstanceofStub::right(), Immediate(instr->function()));
+  __ LoadHeapObject(InstanceofStub::right(), instr->function());
   static const int kAdditionalDelta = 13;
   int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
   __ mov(temp, Immediate(delta));
@@ -1909,26 +2068,6 @@
 }
 
 
-static Condition ComputeCompareCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return equal;
-    case Token::LT:
-      return less;
-    case Token::GT:
-      return greater;
-    case Token::LTE:
-      return less_equal;
-    case Token::GTE:
-      return greater_equal;
-    default:
-      UNREACHABLE();
-      return no_condition;
-  }
-}
-
-
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
@@ -1936,9 +2075,6 @@
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
-  if (op == Token::GT || op == Token::LTE) {
-    condition = ReverseCondition(condition);
-  }
   Label true_value, done;
   __ test(eax, Operand(eax));
   __ j(condition, &true_value, Label::kNear);
@@ -1969,7 +2105,7 @@
 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
-  if (instr->hydrogen()->check_hole_value()) {
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(result, factory()->the_hole_value());
     DeoptimizeIf(equal, instr->environment());
   }
@@ -1990,20 +2126,21 @@
 
 
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
-  Register value = ToRegister(instr->InputAt(0));
-  Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
+  Register value = ToRegister(instr->value());
+  Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
 
   // If the cell we are storing to contains the hole it could have
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted. We deoptimize in that case.
-  if (instr->hydrogen()->check_hole_value()) {
-    __ cmp(cell_operand, factory()->the_hole_value());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
     DeoptimizeIf(equal, instr->environment());
   }
 
   // Store the value.
-  __ mov(cell_operand, value);
+  __ mov(Operand::Cell(cell_handle), value);
+  // Cells are always rescanned, so no write barrier here.
 }
 
 
@@ -2013,7 +2150,7 @@
   ASSERT(ToRegister(instr->value()).is(eax));
 
   __ mov(ecx, instr->name());
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2024,18 +2161,54 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ mov(result, ContextOperand(context, instr->slot_index()));
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(result, factory()->the_hole_value());
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(equal, instr->environment());
+    } else {
+      Label is_not_hole;
+      __ j(not_equal, &is_not_hole, Label::kNear);
+      __ mov(result, factory()->undefined_value());
+      __ bind(&is_not_hole);
+    }
+  }
 }
 
 
 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
-  __ mov(ContextOperand(context, instr->slot_index()), value);
-  if (instr->needs_write_barrier()) {
+
+  Label skip_assignment;
+
+  Operand target = ContextOperand(context, instr->slot_index());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(target, factory()->the_hole_value());
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(equal, instr->environment());
+    } else {
+      __ j(not_equal, &skip_assignment, Label::kNear);
+    }
+  }
+
+  __ mov(target, value);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     Register temp = ToRegister(instr->TempAt(0));
     int offset = Context::SlotOffset(instr->slot_index());
-    __ RecordWrite(context, offset, value, temp);
+    __ RecordWriteContextSlot(context,
+                              offset,
+                              value,
+                              temp,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
   }
+
+  __ bind(&skip_assignment);
 }
 
 
@@ -2055,9 +2228,9 @@
                                                Register object,
                                                Handle<Map> type,
                                                Handle<String> name) {
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsProperty() &&
+  ASSERT(lookup.IsFound() &&
          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
   if (lookup.type() == FIELD) {
     int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2073,7 +2246,24 @@
     }
   } else {
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    LoadHeapObject(result, Handle<HeapObject>::cast(function));
+    __ LoadHeapObject(result, function);
+  }
+}
+
+
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+  ASSERT(!operand->IsDoubleRegister());
+  if (operand->IsConstantOperand()) {
+    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+    if (object->IsSmi()) {
+      __ Push(Handle<Smi>::cast(object));
+    } else {
+      __ PushHeapObject(Handle<HeapObject>::cast(object));
+    }
+  } else if (operand->IsRegister()) {
+    __ push(ToRegister(operand));
+  } else {
+    __ push(ToOperand(operand));
   }
 }
 
@@ -2251,16 +2441,14 @@
     LLoadKeyedFastDoubleElement* instr) {
   XMMRegister result = ToDoubleRegister(instr->result());
 
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
-        sizeof(kHoleNanLower32);
-    Operand hole_check_operand = BuildFastArrayOperand(
-        instr->elements(), instr->key(),
-        FAST_DOUBLE_ELEMENTS,
-        offset);
-    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr->environment());
-  }
+  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+      sizeof(kHoleNanLower32);
+  Operand hole_check_operand = BuildFastArrayOperand(
+      instr->elements(), instr->key(),
+      FAST_DOUBLE_ELEMENTS,
+      offset);
+  __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+  DeoptimizeIf(equal, instr->environment());
 
   Operand double_load_operand = BuildFastArrayOperand(
       instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2330,6 +2518,7 @@
         break;
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -2398,15 +2587,10 @@
 }
 
 
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   Register receiver = ToRegister(instr->receiver());
   Register function = ToRegister(instr->function());
-  Register length = ToRegister(instr->length());
-  Register elements = ToRegister(instr->elements());
   Register scratch = ToRegister(instr->TempAt(0));
-  ASSERT(receiver.is(eax));  // Used for parameter count.
-  ASSERT(function.is(edi));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(eax));
 
   // If the receiver is null or undefined, we have to pass the global
   // object as a receiver to normal functions. Values have to be
@@ -2448,6 +2632,17 @@
   __ mov(receiver,
          FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  ASSERT(receiver.is(eax));  // Used for parameter count.
+  ASSERT(function.is(edi));  // Required by InvokeFunction.
+  ASSERT(ToRegister(instr->result()).is(eax));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
@@ -2484,17 +2679,13 @@
 
 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   LOperand* argument = instr->InputAt(0);
-  if (argument->IsConstantOperand()) {
-    __ push(ToImmediate(argument));
-  } else {
-    __ push(ToOperand(argument));
-  }
+  EmitPushTaggedOperand(argument);
 }
 
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadHeapObject(result, instr->hydrogen()->closure());
 }
 
 
@@ -2512,6 +2703,15 @@
 }
 
 
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  ASSERT(ToRegister(instr->InputAt(0)).is(esi));
+  __ push(esi);  // The context is the first argument.
+  __ push(Immediate(instr->hydrogen()->pairs()));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2530,41 +2730,53 @@
                                  int arity,
                                  LInstruction* instr,
                                  CallKind call_kind) {
-  // Change context if needed.
-  bool change_context =
-      (info()->closure()->context() != function->context()) ||
-      scope()->contains_with() ||
-      (scope()->num_heap_slots() > 0);
-  if (change_context) {
-    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-  } else {
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  }
-
-  // Set eax to arguments count if adaption is not needed. Assumes that eax
-  // is available to write to at this point.
-  if (!function->NeedsArgumentsAdaption()) {
-    __ mov(eax, arity);
-  }
+  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+      function->shared()->formal_parameter_count() == arity;
 
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  // Invoke function.
-  __ SetCallKind(ecx, call_kind);
-  if (*function == *info()->closure()) {
-    __ CallSelf();
-  } else {
-    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
-  }
+  if (can_invoke_directly) {
+    __ LoadHeapObject(edi, function);
 
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    // Change context if needed.
+    bool change_context =
+        (info()->closure()->context() != function->context()) ||
+        scope()->contains_with() ||
+        (scope()->num_heap_slots() > 0);
+
+    if (change_context) {
+      __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+    } else {
+      __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    }
+
+    // Set eax to arguments count if adaption is not needed. Assumes that eax
+    // is available to write to at this point.
+    if (!function->NeedsArgumentsAdaption()) {
+      __ mov(eax, arity);
+    }
+
+    // Invoke function directly.
+    __ SetCallKind(ecx, call_kind);
+    if (*function == *info()->closure()) {
+      __ CallSelf();
+    } else {
+      __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+    }
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    // We need to adapt arguments.
+    SafepointGenerator generator(
+        this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+  }
 }
 
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
-  __ mov(edi, instr->function());
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -2647,6 +2859,7 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -2778,72 +2991,146 @@
 }
 
 
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->value());
+  Register scratch = ToRegister(instr->temp());
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done, sqrt;
+  // Check base for -Infinity.  According to IEEE-754, single-precision
+  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
+  __ mov(scratch, 0xFF800000);
+  __ movd(xmm_scratch, scratch);
+  __ cvtss2sd(xmm_scratch, xmm_scratch);
+  __ ucomisd(input_reg, xmm_scratch);
+  // Comparing -Infinity with NaN results in "unordered", which sets the
+  // zero flag as if both were equal.  However, it also sets the carry flag.
+  __ j(not_equal, &sqrt, Label::kNear);
+  __ j(carry, &sqrt, Label::kNear);
+  // If input is -Infinity, return Infinity.
+  __ xorps(input_reg, input_reg);
+  __ subsd(input_reg, xmm_scratch);
+  __ jmp(&done, Label::kNear);
+
+  // Square root.
+  __ bind(&sqrt);
   __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  DoubleRegister result_reg = ToDoubleRegister(instr->result());
   Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+         ToDoubleRegister(instr->InputAt(1)).is(xmm1));
+  ASSERT(!instr->InputAt(1)->IsRegister() ||
+         ToRegister(instr->InputAt(1)).is(eax));
+  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
 
-  if (exponent_type.IsDouble()) {
-    // It is safe to use ebx directly since the instruction is marked
-    // as a call.
-    __ PrepareCallCFunction(4, ebx);
-    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
-    __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
-    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
-                     4);
-  } else if (exponent_type.IsInteger32()) {
-    // It is safe to use ebx directly since the instruction is marked
-    // as a call.
-    ASSERT(!ToRegister(right).is(ebx));
-    __ PrepareCallCFunction(4, ebx);
-    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
-    __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
-    __ CallCFunction(ExternalReference::power_double_int_function(isolate()),
-                     4);
-  } else {
-    ASSERT(exponent_type.IsTagged());
-    CpuFeatures::Scope scope(SSE2);
-    Register right_reg = ToRegister(right);
-
-    Label non_smi, call;
-    __ JumpIfNotSmi(right_reg, &non_smi);
-    __ SmiUntag(right_reg);
-    __ cvtsi2sd(result_reg, Operand(right_reg));
-    __ jmp(&call);
-
-    __ bind(&non_smi);
-    // It is safe to use ebx directly since the instruction is marked
-    // as a call.
-    ASSERT(!right_reg.is(ebx));
-    __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
+  if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(eax, &no_deopt);
+    __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
     DeoptimizeIf(not_equal, instr->environment());
-    __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
-    __ bind(&call);
-    __ PrepareCallCFunction(4, ebx);
-    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
-    __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
-    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
-                     4);
+    __ bind(&no_deopt);
+    MathPowStub stub(MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    ASSERT(exponent_type.IsDouble());
+    MathPowStub stub(MathPowStub::DOUBLE);
+    __ CallStub(&stub);
   }
+}
 
-  // Return value is in st(0) on ia32.
-  // Store it into the (fixed) result register.
-  __ sub(Operand(esp), Immediate(kDoubleSize));
-  __ fstp_d(Operand(esp, 0));
-  __ movdbl(result_reg, Operand(esp, 0));
-  __ add(Operand(esp), Immediate(kDoubleSize));
+
+void LCodeGen::DoRandom(LRandom* instr) {
+  class DeferredDoRandom: public LDeferredCode {
+   public:
+    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LRandom* instr_;
+  };
+
+  DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+
+  // Having marked this instruction as a call we can use any
+  // registers.
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  ASSERT(ToRegister(instr->InputAt(0)).is(eax));
+  // Assert that the register size is indeed the size of each seed.
+  static const int kSeedSize = sizeof(uint32_t);
+  STATIC_ASSERT(kPointerSize == kSeedSize);
+
+  __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
+  static const int kRandomSeedOffset =
+      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
+  __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
+  // ebx: FixedArray of the global context's random seeds
+
+  // Load state[0].
+  __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
+  // If state[0] == 0, call runtime to initialize seeds.
+  __ test(ecx, ecx);
+  __ j(zero, deferred->entry());
+  // Load state[1].
+  __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
+  // ecx: state[0]
+  // eax: state[1]
+
+  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
+  __ movzx_w(edx, ecx);
+  __ imul(edx, edx, 18273);
+  __ shr(ecx, 16);
+  __ add(ecx, edx);
+  // Save state[0].
+  __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
+
+  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
+  __ movzx_w(edx, eax);
+  __ imul(edx, edx, 36969);
+  __ shr(eax, 16);
+  __ add(eax, edx);
+  // Save state[1].
+  __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
+
+  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
+  __ shl(ecx, 14);
+  __ and_(eax, Immediate(0x3FFFF));
+  __ add(eax, ecx);
+
+  __ bind(deferred->exit());
+  // Convert 32 random bits in eax to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
+  __ movd(xmm2, ebx);
+  __ movd(xmm1, eax);
+  __ cvtss2sd(xmm2, xmm2);
+  __ xorps(xmm1, xmm2);
+  __ subsd(xmm1, xmm2);
+}
+
+
+void LCodeGen::DoDeferredRandom(LRandom* instr) {
+  __ PrepareCallCFunction(1, ebx);
+  __ mov(Operand(esp, 0), eax);
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+  // Return value is in eax.
 }
 
 
@@ -2878,6 +3165,14 @@
 }
 
 
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -2908,15 +3203,15 @@
     case kMathSqrt:
       DoMathSqrt(instr);
       break;
-    case kMathPowHalf:
-      DoMathPowHalf(instr);
-      break;
     case kMathCos:
       DoMathCos(instr);
       break;
     case kMathSin:
       DoMathSin(instr);
       break;
+    case kMathTan:
+      DoMathTan(instr);
+      break;
     case kMathLog:
       DoMathLog(instr);
       break;
@@ -2968,12 +3263,12 @@
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
+  ASSERT(ToRegister(instr->function()).is(edi));
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ Drop(1);
 }
 
 
@@ -2992,7 +3287,6 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
-  __ mov(edi, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
@@ -3002,9 +3296,9 @@
   ASSERT(ToRegister(instr->constructor()).is(edi));
   ASSERT(ToRegister(instr->result()).is(eax));
 
-  Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   __ Set(eax, Immediate(instr->arity()));
-  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
 }
 
 
@@ -3023,21 +3317,36 @@
   }
 
   // Do the store.
+  HType type = instr->hydrogen()->value()->type();
+  SmiCheck check_needed =
+      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (instr->is_in_object()) {
     __ mov(FieldOperand(object, offset), value);
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       Register temp = ToRegister(instr->TempAt(0));
       // Update the write barrier for the object for in-object properties.
-      __ RecordWrite(object, offset, value, temp);
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          temp,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   } else {
     Register temp = ToRegister(instr->TempAt(0));
     __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
     __ mov(FieldOperand(temp, offset), value);
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWrite(temp, offset, value, object);
+      __ RecordWriteField(temp,
+                          offset,
+                          value,
+                          object,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   }
 }
@@ -3049,7 +3358,7 @@
   ASSERT(ToRegister(instr->value()).is(eax));
 
   __ mov(ecx, instr->name());
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3059,7 +3368,7 @@
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   if (instr->index()->IsConstantOperand()) {
     __ cmp(ToOperand(instr->length()),
-           ToImmediate(LConstantOperand::cast(instr->index())));
+           Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
     DeoptimizeIf(below_equal, instr->environment());
   } else {
     __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
@@ -3096,6 +3405,7 @@
         break;
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -3128,13 +3438,21 @@
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ lea(key,
            FieldOperand(elements,
                         key,
                         times_pointer_size,
                         FixedArray::kHeaderSize));
-    __ RecordWrite(elements, key, value);
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed);
   }
 }
 
@@ -3165,99 +3483,75 @@
   ASSERT(ToRegister(instr->key()).is(ecx));
   ASSERT(ToRegister(instr->value()).is(eax));
 
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register new_map_reg = ToRegister(instr->new_map_reg());
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = from_map->elements_kind();
+  ElementsKind to_kind = to_map->elements_kind();
+
+  Label not_applicable;
+  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+  __ j(not_equal, &not_applicable);
+  __ mov(new_map_reg, to_map);
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register object_reg = ToRegister(instr->object());
+    __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+    // Write barrier.
+    ASSERT_NE(instr->temp_reg(), NULL);
+    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+                        ToRegister(instr->temp_reg()), kDontSaveFPRegs);
+  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(edx));
+    ASSERT(new_map_reg.is(ebx));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+             RelocInfo::CODE_TARGET, instr);
+  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(edx));
+    ASSERT(new_map_reg.is(ebx));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+             RelocInfo::CODE_TARGET, instr);
+  } else {
+    UNREACHABLE();
+  }
+  __ bind(&not_applicable);
+}
+
+
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   class DeferredStringCharCodeAt: public LDeferredCode {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
 
-  Register string = ToRegister(instr->string());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
-
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  // Fetch the instance type of the receiver into result register.
-  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ test(result, Immediate(kIsIndirectStringMask));
-  __ j(zero, &check_sequential, Label::kNear);
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ test(result, Immediate(kSlicedNotConsMask));
-  __ j(zero, &cons_string, Label::kNear);
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
-  __ SmiUntag(result);
-  __ add(index, Operand(result));
-  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded, Label::kNear);
-
-  // Handle conses.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
-         Immediate(factory()->empty_string()));
-  __ j(not_equal, deferred->entry());
-  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // Check whether the string is sequential. The only non-sequential
-  // shapes we support have just been unwrapped above.
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result, Immediate(kStringRepresentationMask));
-  __ j(not_zero, deferred->entry());
-
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii_string;
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ test(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string, Label::kNear);
-
-  // Two-byte string.
-  // Load the two-byte character code into the result register.
-  Label done;
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ movzx_w(result, FieldOperand(string,
-                                  index,
-                                  times_2,
-                                  SeqTwoByteString::kHeaderSize));
-  __ jmp(&done, Label::kNear);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ movzx_b(result, FieldOperand(string,
-                                  index,
-                                  times_1,
-                                  SeqAsciiString::kHeaderSize));
-  __ bind(&done);
+  StringCharLoadGenerator::Generate(masm(),
+                                    factory(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
   __ bind(deferred->exit());
 }
 
@@ -3300,6 +3594,7 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3349,16 +3644,8 @@
 
 
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  if (instr->left()->IsConstantOperand()) {
-    __ push(ToImmediate(instr->left()));
-  } else {
-    __ push(ToOperand(instr->left()));
-  }
-  if (instr->right()->IsConstantOperand()) {
-    __ push(ToImmediate(instr->right()));
-  } else {
-    __ push(ToOperand(instr->right()));
-  }
+  EmitPushTaggedOperand(instr->left());
+  EmitPushTaggedOperand(instr->right());
   StringAddStub stub(NO_STRING_CHECK_IN_STUB);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
 }
@@ -3379,6 +3666,7 @@
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagI* instr_;
   };
@@ -3446,6 +3734,7 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3506,8 +3795,10 @@
 
 
 void LCodeGen::EmitNumberUntagD(Register input_reg,
+                                Register temp_reg,
                                 XMMRegister result_reg,
                                 bool deoptimize_on_undefined,
+                                bool deoptimize_on_minus_zero,
                                 LEnvironment* env) {
   Label load_smi, done;
 
@@ -3536,6 +3827,15 @@
   }
   // Heap number to XMM conversion.
   __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  if (deoptimize_on_minus_zero) {
+    XMMRegister xmm_scratch = xmm0;
+    __ xorps(xmm_scratch, xmm_scratch);
+    __ ucomisd(result_reg, xmm_scratch);
+    __ j(not_zero, &done, Label::kNear);
+    __ movmskpd(temp_reg, result_reg);
+    __ test_b(temp_reg, 1);
+    DeoptimizeIf(not_zero, env);
+  }
   __ jmp(&done, Label::kNear);
 
   // Smi to XMM conversion
@@ -3547,16 +3847,6 @@
 }
 
 
-class DeferredTaggedToI: public LDeferredCode {
- public:
-  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-      : LDeferredCode(codegen), instr_(instr) { }
-  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
-  LTaggedToI* instr_;
-};
-
-
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
@@ -3589,8 +3879,7 @@
       __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
       __ j(less, &convert, Label::kNear);
       // Pop FPU stack before deoptimizing.
-      __ ffree(0);
-      __ fincstp();
+      __ fstp(0);
       DeoptimizeIf(no_condition, instr->environment());
 
       // Reserve space for 64 bit answer.
@@ -3638,6 +3927,16 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -3659,14 +3958,23 @@
 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
+  LOperand* temp = instr->TempAt(0);
+  ASSERT(temp == NULL || temp->IsRegister());
   LOperand* result = instr->result();
   ASSERT(result->IsDoubleRegister());
 
   Register input_reg = ToRegister(input);
   XMMRegister result_reg = ToDoubleRegister(result);
 
-  EmitNumberUntagD(input_reg, result_reg,
+  bool deoptimize_on_minus_zero =
+      instr->hydrogen()->deoptimize_on_minus_zero();
+  Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+
+  EmitNumberUntagD(input_reg,
+                   temp_reg,
+                   result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
+                   deoptimize_on_minus_zero,
                    instr->environment());
 }
 
@@ -3840,7 +4148,7 @@
     } else {
       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
       __ and_(temp, mask);
-      __ cmpb(Operand(temp), tag);
+      __ cmp(temp, tag);
       DeoptimizeIf(not_equal, instr->environment());
     }
   }
@@ -3848,20 +4156,37 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  ASSERT(instr->InputAt(0)->IsRegister());
-  Operand operand = ToOperand(instr->InputAt(0));
-  __ cmp(operand, instr->hydrogen()->target());
+  Handle<JSFunction> target = instr->hydrogen()->target();
+  if (isolate()->heap()->InNewSpace(*target)) {
+    Register reg = ToRegister(instr->value());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(target);
+    __ cmp(reg, Operand::Cell(cell));
+  } else {
+    Operand operand = ToOperand(instr->value());
+    __ cmp(operand, target);
+  }
   DeoptimizeIf(not_equal, instr->environment());
 }
 
 
+void LCodeGen::DoCheckMapCommon(Register reg,
+                                Handle<Map> map,
+                                CompareMapMode mode,
+                                LEnvironment* env) {
+  Label success;
+  __ CompareMap(reg, map, &success, mode);
+  DeoptimizeIf(not_equal, env);
+  __ bind(&success);
+}
+
+
 void LCodeGen::DoCheckMap(LCheckMap* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
-  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
-         instr->hydrogen()->map());
-  DeoptimizeIf(not_equal, instr->environment());
+  Handle<Map> map = instr->hydrogen()->map();
+  DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
 }
 
 
@@ -3913,17 +4238,6 @@
 }
 
 
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
-  if (isolate()->heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(object);
-    __ mov(result, Operand::Cell(cell));
-  } else {
-    __ mov(result, object);
-  }
-}
-
-
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   Register reg = ToRegister(instr->TempAt(0));
 
@@ -3931,33 +4245,141 @@
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  LoadHeapObject(reg, current_prototype);
+  __ LoadHeapObject(reg, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
-    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
-           Handle<Map>(current_prototype->map()));
-    DeoptimizeIf(not_equal, instr->environment());
+    DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    LoadHeapObject(reg, current_prototype);
+    __ LoadHeapObject(reg, current_prototype);
   }
 
   // Check the holder map.
-  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
-         Handle<Map>(current_prototype->map()));
-  DeoptimizeIf(not_equal, instr->environment());
+  DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+                   ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+}
+
+
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+  class DeferredAllocateObject: public LDeferredCode {
+   public:
+    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LAllocateObject* instr_;
+  };
+
+  DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->TempAt(0));
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+  Handle<Map> initial_map(constructor->initial_map());
+  int instance_size = initial_map->instance_size();
+  ASSERT(initial_map->pre_allocated_property_fields() +
+         initial_map->unused_property_fields() -
+         initial_map->inobject_properties() == 0);
+
+  // Allocate memory for the object.  The initial map might change when
+  // the constructor's prototype changes, but instance size and property
+  // counts remain unchanged (if slack tracking finished).
+  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
+  __ AllocateInNewSpace(instance_size,
+                        result,
+                        no_reg,
+                        scratch,
+                        deferred->entry(),
+                        TAG_OBJECT);
+
+  // Load the initial map.
+  Register map = scratch;
+  __ LoadHeapObject(scratch, constructor);
+  __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
+
+  if (FLAG_debug_code) {
+    __ AbortIfSmi(map);
+    __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
+            instance_size >> kPointerSizeLog2);
+    __ Assert(equal, "Unexpected instance size");
+    __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
+            initial_map->pre_allocated_property_fields());
+    __ Assert(equal, "Unexpected pre-allocated property fields count");
+    __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
+            initial_map->unused_property_fields());
+    __ Assert(equal, "Unexpected unused property fields count");
+    __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
+            initial_map->inobject_properties());
+    __ Assert(equal, "Unexpected in-object property fields count");
+  }
+
+  // Initialize map and fields of the newly allocated object.
+  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+  __ mov(FieldOperand(result, JSObject::kMapOffset), map);
+  __ mov(scratch, factory()->empty_fixed_array());
+  __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
+  __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+  if (initial_map->inobject_properties() != 0) {
+    __ mov(scratch, factory()->undefined_value());
+    for (int i = 0; i < initial_map->inobject_properties(); i++) {
+      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+      __ mov(FieldOperand(result, property_offset), scratch);
+    }
+  }
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+  Register result = ToRegister(instr->result());
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Set(result, Immediate(0));
+
+  PushSafepointRegistersScope scope(this);
+  __ PushHeapObject(constructor);
+  CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(result, eax);
 }
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
-  // Setup the parameters to the stub/runtime call.
+  Heap* heap = isolate()->heap();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate_elements_kind();
+
+  // Deopt if the array literal boilerplate ElementsKind is of a type different
+  // than the expected one. The check isn't necessary if the boilerplate has
+  // already been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
+    __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+    // Load the map's "bit field 2". We only need the first byte,
+    // but the following masking takes care of that anyway.
+    __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ and_(ebx, Map::kElementsKindMask);
+    __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
+    DeoptimizeIf(not_equal, instr->environment());
+  }
+
+  // Set up the parameters to the stub/runtime call.
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ push(Immediate(instr->hydrogen()->constant_elements()));
+  // Boilerplate already exists, constant elements are never accessed.
+  // Pass an empty fixed array.
+  __ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
 
   // Pick the right runtime function or stub to call.
   int length = instr->hydrogen()->length();
@@ -3973,20 +4395,150 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
 
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+                            Register result,
+                            Register source,
+                            int* offset) {
+  ASSERT(!source.is(ecx));
+  ASSERT(!result.is(ecx));
+
+  if (FLAG_debug_code) {
+    __ LoadHeapObject(ecx, object);
+    __ cmp(source, ecx);
+    __ Assert(equal, "Unexpected object literal boilerplate");
+  }
+
+  // Only elements backing stores for non-COW arrays need to be copied.
+  Handle<FixedArrayBase> elements(object->elements());
+  bool has_elements = elements->length() > 0 &&
+      elements->map() != isolate()->heap()->fixed_cow_array_map();
+
+  // Increase the offset so that subsequent objects end up right after
+  // this object and its backing store.
+  int object_offset = *offset;
+  int object_size = object->map()->instance_size();
+  int elements_offset = *offset + object_size;
+  int elements_size = has_elements ? elements->Size() : 0;
+  *offset += object_size + elements_size;
+
+  // Copy object header.
+  ASSERT(object->properties()->length() == 0);
+  int inobject_properties = object->map()->inobject_properties();
+  int header_size = object_size - inobject_properties * kPointerSize;
+  for (int i = 0; i < header_size; i += kPointerSize) {
+    if (has_elements && i == JSObject::kElementsOffset) {
+      __ lea(ecx, Operand(result, elements_offset));
+    } else {
+      __ mov(ecx, FieldOperand(source, i));
+    }
+    __ mov(FieldOperand(result, object_offset + i), ecx);
+  }
+
+  // Copy in-object properties.
+  for (int i = 0; i < inobject_properties; i++) {
+    int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
+    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      __ lea(ecx, Operand(result, *offset));
+      __ mov(FieldOperand(result, total_offset), ecx);
+      __ LoadHeapObject(source, value_object);
+      EmitDeepCopy(value_object, result, source, offset);
+    } else if (value->IsHeapObject()) {
+      __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+      __ mov(FieldOperand(result, total_offset), ecx);
+    } else {
+      __ mov(FieldOperand(result, total_offset), Immediate(value));
+    }
+  }
+
+  if (has_elements) {
+    // Copy elements backing store header.
+    __ LoadHeapObject(source, elements);
+    for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
+      __ mov(ecx, FieldOperand(source, i));
+      __ mov(FieldOperand(result, elements_offset + i), ecx);
+    }
+
+    // Copy elements backing store content.
+    int elements_length = elements->length();
+    if (elements->IsFixedDoubleArray()) {
+      Handle<FixedDoubleArray> double_array =
+          Handle<FixedDoubleArray>::cast(elements);
+      for (int i = 0; i < elements_length; i++) {
+        int64_t value = double_array->get_representation(i);
+        int32_t value_low = value & 0xFFFFFFFF;
+        int32_t value_high = value >> 32;
+        int total_offset =
+            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+        __ mov(FieldOperand(result, total_offset), Immediate(value_low));
+        __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
+      }
+    } else if (elements->IsFixedArray()) {
+      for (int i = 0; i < elements_length; i++) {
+        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+        Handle<Object> value = JSObject::GetElement(object, i);
+        if (value->IsJSObject()) {
+          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+          __ lea(ecx, Operand(result, *offset));
+          __ mov(FieldOperand(result, total_offset), ecx);
+          __ LoadHeapObject(source, value_object);
+          EmitDeepCopy(value_object, result, source, offset);
+        } else if (value->IsHeapObject()) {
+          __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+          __ mov(FieldOperand(result, total_offset), ecx);
+        } else {
+          __ mov(FieldOperand(result, total_offset), Immediate(value));
+        }
+      }
+    } else {
+      UNREACHABLE();
+    }
+  }
+}
+
+
+void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
+  int size = instr->hydrogen()->total_size();
+
+  // Allocate all objects that are part of the literal in one big
+  // allocation. This avoids multiple limit checks.
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ push(Immediate(Smi::FromInt(size)));
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+  __ bind(&allocated);
+  int offset = 0;
+  __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
+  EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
+  ASSERT_EQ(size, offset);
+}
+
+
 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
-  // Setup the parameters to the stub/runtime call.
-  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+  Handle<FixedArray> literals(instr->environment()->closure()->literals());
+  Handle<FixedArray> constant_properties =
+      instr->hydrogen()->constant_properties();
+
+  // Set up the parameters to the stub/runtime call.
+  __ PushHeapObject(literals);
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ push(Immediate(instr->hydrogen()->constant_properties()));
+  __ push(Immediate(constant_properties));
   int flags = instr->hydrogen()->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -3995,11 +4547,16 @@
       : ObjectLiteral::kNoFlags;
   __ push(Immediate(Smi::FromInt(flags)));
 
-  // Pick the right runtime function to call.
+  // Pick the right runtime function or stub to call.
+  int properties_count = constant_properties->length() / 2;
   if (instr->hydrogen()->depth() > 1) {
     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
@@ -4072,12 +4629,11 @@
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && shared_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(shared_info->language_mode());
     __ push(Immediate(shared_info));
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
-    __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ push(esi);
     __ push(Immediate(shared_info));
     __ push(Immediate(pretenure
                       ? factory()->true_value()
@@ -4089,11 +4645,7 @@
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
   LOperand* input = instr->InputAt(1);
-  if (input->IsConstantOperand()) {
-    __ push(ToImmediate(input));
-  } else {
-    __ push(ToOperand(input));
-  }
+  EmitPushTaggedOperand(input);
   CallRuntime(Runtime::kTypeof, 1, instr);
 }
 
@@ -4105,12 +4657,11 @@
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  Condition final_branch_condition = EmitTypeofIs(true_label,
-                                                  false_label,
-                                                  input,
-                                                  instr->type_literal());
-
-  EmitBranch(true_block, false_block, final_branch_condition);
+  Condition final_branch_condition =
+      EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+  if (final_branch_condition != no_condition) {
+    EmitBranch(true_block, false_block, final_branch_condition);
+  }
 }
 
 
@@ -4154,10 +4705,12 @@
     final_branch_condition = not_zero;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
-    final_branch_condition = above_equal;
+    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+    __ j(equal, true_label);
+    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+    final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -4175,11 +4728,8 @@
     final_branch_condition = zero;
 
   } else {
-    final_branch_condition = not_equal;
     __ jmp(false_label);
-    // A dead branch instruction will be generated after this point.
   }
-
   return final_branch_condition;
 }
 
@@ -4219,9 +4769,7 @@
   int patch_size = Deoptimizer::patch_size();
   if (current_pc < last_lazy_deopt_pc_ + patch_size) {
     int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
-    while (padding_size-- > 0) {
-      __ nop();
-    }
+    __ Nop(padding_size);
   }
   last_lazy_deopt_pc_ = masm()->pc_offset();
 }
@@ -4245,11 +4793,7 @@
   LOperand* obj = instr->object();
   LOperand* key = instr->key();
   __ push(ToOperand(obj));
-  if (key->IsConstantOperand()) {
-    __ push(ToImmediate(key));
-  } else {
-    __ push(ToOperand(key));
-  }
+  EmitPushTaggedOperand(key);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
@@ -4281,6 +4825,7 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
@@ -4345,16 +4890,8 @@
 void LCodeGen::DoIn(LIn* instr) {
   LOperand* obj = instr->object();
   LOperand* key = instr->key();
-  if (key->IsConstantOperand()) {
-    __ push(ToImmediate(key));
-  } else {
-    __ push(ToOperand(key));
-  }
-  if (obj->IsConstantOperand()) {
-    __ push(ToImmediate(obj));
-  } else {
-    __ push(ToOperand(obj));
-  }
+  EmitPushTaggedOperand(key);
+  EmitPushTaggedOperand(obj);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
@@ -4364,6 +4901,84 @@
 }
 
 
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  __ cmp(eax, isolate()->factory()->undefined_value());
+  DeoptimizeIf(equal, instr->environment());
+
+  __ cmp(eax, isolate()->factory()->null_value());
+  DeoptimizeIf(equal, instr->environment());
+
+  __ test(eax, Immediate(kSmiTagMask));
+  DeoptimizeIf(zero, instr->environment());
+
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+  DeoptimizeIf(below_equal, instr->environment());
+
+  Label use_cache, call_runtime;
+  __ CheckEnumCache(&call_runtime);
+
+  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+  __ jmp(&use_cache, Label::kNear);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(eax);
+  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+         isolate()->factory()->meta_map());
+  DeoptimizeIf(not_equal, instr->environment());
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  __ LoadInstanceDescriptors(map, result);
+  __ mov(result,
+         FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
+  __ mov(result,
+         FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+  __ test(result, result);
+  DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  __ cmp(ToRegister(instr->map()),
+         FieldOperand(object, HeapObject::kMapOffset));
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+
+  Label out_of_object, done;
+  __ cmp(index, Immediate(0));
+  __ j(less, &out_of_object);
+  __ mov(object, FieldOperand(object,
+                              index,
+                              times_half_pointer_size,
+                              JSObject::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&out_of_object);
+  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
+  __ neg(index);
+  // Index is now equal to out of object property index plus 1.
+  __ mov(object, FieldOperand(object,
+                              index,
+                              times_half_pointer_size,
+                              FixedArray::kHeaderSize - kPointerSize));
+  __ bind(&done);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index d955450..52befc6 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -77,7 +77,13 @@
   Operand ToOperand(LOperand* op) const;
   Register ToRegister(LOperand* op) const;
   XMMRegister ToDoubleRegister(LOperand* op) const;
-  Immediate ToImmediate(LOperand* op);
+
+  bool IsInteger32(LConstantOperand* op) const;
+  Immediate ToInteger32Immediate(LOperand* op) const {
+    return Immediate(ToInteger32(LConstantOperand::cast(op)));
+  }
+
+  Handle<Object> ToHandle(LConstantOperand* op) const;
 
   // The operand denoting the second word (the one with a higher address) of
   // a double stack slot.
@@ -98,11 +104,16 @@
   void DoDeferredTaggedToI(LTaggedToI* instr);
   void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
   void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredRandom(LRandom* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocateObject(LAllocateObject* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
 
+  void DoCheckMapCommon(Register reg, Handle<Map> map,
+                        CompareMapMode mode, LEnvironment* env);
+
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
   void DoGap(LGap* instr);
@@ -130,8 +141,8 @@
   bool is_done() const { return status_ == DONE; }
   bool is_aborted() const { return status_ == ABORTED; }
 
-  int strict_mode_flag() const {
-    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+  StrictModeFlag strict_mode_flag() const {
+    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -202,8 +213,6 @@
                          LInstruction* instr,
                          CallKind call_kind);
 
-  void LoadHeapObject(Register result, Handle<HeapObject> object);
-
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
 
@@ -222,6 +231,8 @@
   Register ToRegister(int index) const;
   XMMRegister ToDoubleRegister(int index) const;
   int ToInteger32(LConstantOperand* op) const;
+
+  double ToDouble(LConstantOperand* op) const;
   Operand BuildFastArrayOperand(LOperand* elements_pointer,
                                 LOperand* key,
                                 ElementsKind elements_kind,
@@ -233,8 +244,8 @@
   void DoMathFloor(LUnaryMathOperation* instr);
   void DoMathRound(LUnaryMathOperation* instr);
   void DoMathSqrt(LUnaryMathOperation* instr);
-  void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
   void DoMathSin(LUnaryMathOperation* instr);
 
@@ -253,17 +264,20 @@
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
-  void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
+                        Register temp,
                         XMMRegister result,
                         bool deoptimize_on_undefined,
+                        bool deoptimize_on_minus_zero,
                         LEnvironment* env);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label, Label* false_label,
-                         Register input, Handle<String> type_name);
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
@@ -273,6 +287,13 @@
                          Label* is_not_object,
                          Label* is_object);
 
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
@@ -281,8 +302,20 @@
                                        Register object,
                                        Handle<Map> type,
                                        Handle<String> name);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset);
+
   void EnsureSpaceForLazyDeopt();
 
+  // Emits code for pushing either a tagged constant, a (non-double)
+  // register, or a stack slot operand.
+  void EmitPushTaggedOperand(LOperand* operand);
+
   LChunk* const chunk_;
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
@@ -338,16 +371,20 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen), external_exit_(NULL) {
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
 
-  void SetExit(Label *exit) { external_exit_ = exit; }
+  void SetExit(Label* exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -358,6 +395,7 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index fcf1f91..510d9f1 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -303,14 +303,24 @@
     }
 
   } else if (source->IsConstantOperand()) {
-    ASSERT(destination->IsRegister() || destination->IsStackSlot());
-    Immediate src = cgen_->ToImmediate(source);
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
     if (destination->IsRegister()) {
       Register dst = cgen_->ToRegister(destination);
-      __ Set(dst, src);
+      if (cgen_->IsInteger32(constant_source)) {
+        __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+      } else {
+        __ LoadObject(dst, cgen_->ToHandle(constant_source));
+      }
     } else {
+      ASSERT(destination->IsStackSlot());
       Operand dst = cgen_->ToOperand(destination);
-      __ Set(dst, src);
+      if (cgen_->IsInteger32(constant_source)) {
+        __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+      } else {
+        Register tmp = EnsureTempRegister();
+        __ LoadObject(tmp, cgen_->ToHandle(constant_source));
+        __ mov(dst, tmp);
+      }
     }
 
   } else if (source->IsDoubleRegister()) {
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 3dc220d..186b346 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -110,22 +110,17 @@
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+void LInstruction::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
-  for (int i = 0; i < inputs_.length(); i++) {
+  for (int i = 0; i < InputCount(); i++) {
     if (i > 0) stream->Add(" ");
-    inputs_[i]->PrintTo(stream);
+    InputAt(i)->PrintTo(stream);
   }
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
-  for (int i = 0; i < results_.length(); i++) {
-    if (i > 0) stream->Add(" ");
-    results_[i]->PrintTo(stream);
-  }
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
 }
 
 
@@ -214,10 +209,11 @@
 }
 
 
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -229,6 +225,13 @@
 }
 
 
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -243,6 +246,14 @@
 }
 
 
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(1)->PrintTo(stream);
+  InputAt(2)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -287,6 +298,12 @@
 }
 
 
+void LMathPowHalf::PrintDataTo(StringStream* stream) {
+  stream->Add("/pow_half ");
+  InputAt(0)->PrintTo(stream);
+}
+
+
 void LLoadContextSlot::PrintDataTo(StringStream* stream) {
   InputAt(0)->PrintTo(stream);
   stream->Add("[%d]", slot_index());
@@ -367,7 +384,7 @@
 
 
 void LChunk::MarkEmptyBlocks() {
-  HPhase phase("Mark empty blocks", this);
+  HPhase phase("L_Mark empty blocks", this);
   for (int i = 0; i < graph()->blocks()->length(); ++i) {
     HBasicBlock* block = graph()->blocks()->at(i);
     int first = block->first_instruction_index();
@@ -447,8 +464,14 @@
 }
 
 
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new LInstructionGap(block);
+  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -523,8 +546,8 @@
 
 LChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
-  chunk_ = new LChunk(info(), graph());
-  HPhase phase("Building chunk", chunk_);
+  chunk_ = new(zone()) LChunk(info(), graph());
+  HPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -553,20 +576,15 @@
 }
 
 
-LRegister* LChunkBuilder::ToOperand(Register reg) {
-  return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
-  return new LUnallocated(LUnallocated::FIXED_REGISTER,
-                          Register::ToAllocationIndex(reg));
+  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+                                  Register::ToAllocationIndex(reg));
 }
 
 
 LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
-  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-                          XMMRegister::ToAllocationIndex(reg));
+  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                                  XMMRegister::ToAllocationIndex(reg));
 }
 
 
@@ -581,30 +599,30 @@
 
 
 LOperand* LChunkBuilder::UseRegister(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
   return Use(value,
-             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
-                              LUnallocated::USED_AT_START));
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
 }
 
 
 LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::Use(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::NONE));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
 }
 
 
 LOperand* LChunkBuilder::UseAtStart(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::NONE,
-                                     LUnallocated::USED_AT_START));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                             LUnallocated::USED_AT_START));
 }
 
 
@@ -639,7 +657,7 @@
 LOperand* LChunkBuilder::UseAny(HValue* value) {
   return value->IsConstant()
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      :  Use(value, new LUnallocated(LUnallocated::ANY));
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
 }
 
 
@@ -648,7 +666,7 @@
     HInstruction* instr = HInstruction::cast(value);
     VisitInstruction(instr);
   }
-  allocator_->RecordUse(value, operand);
+  operand->set_virtual_register(value->id());
   return operand;
 }
 
@@ -656,22 +674,17 @@
 template<int I, int T>
 LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
                                     LUnallocated* result) {
-  allocator_->RecordDefinition(current_instruction_, result);
+  result->set_virtual_register(current_instruction_->id());
   instr->set_result(result);
   return instr;
 }
 
 
 template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
 LInstruction* LChunkBuilder::DefineAsRegister(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
@@ -679,14 +692,16 @@
 LInstruction* LChunkBuilder::DefineAsSpilled(
     LTemplateInstruction<1, I, T>* instr,
     int index) {
-  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineSameAsFirst(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
 }
 
 
@@ -707,7 +722,9 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  instr->set_environment(CreateEnvironment(hydrogen_env));
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
   return instr;
 }
 
@@ -737,7 +754,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasSideEffects()) {
+  if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -749,7 +766,8 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -766,67 +784,48 @@
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
-  instr->set_pointer_map(new LPointerMap(position_));
+  instr->set_pointer_map(new(zone()) LPointerMap(position_));
   return instr;
 }
 
 
 LUnallocated* LChunkBuilder::TempRegister() {
-  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
-  allocator_->RecordTemporary(operand);
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  operand->set_virtual_register(allocator_->GetVirtualRegister());
+  if (!allocator_->AllocationOk()) {
+    Abort("Not enough virtual registers (temps).");
+  }
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  allocator_->RecordTemporary(operand);
+  ASSERT(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  allocator_->RecordTemporary(operand);
+  ASSERT(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
-  return new LLabel(instr->block());
+  return new(zone()) LLabel(instr->block());
 }
 
 
 LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new LDeoptimize);
+  return AssignEnvironment(new(zone()) LDeoptimize);
 }
 
 
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
-  return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
-                                   HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineSameAsFirst(new LBitI(op, left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* context = UseFixed(instr->context(), esi);
-    LOperand* left = UseFixed(instr->left(), edx);
-    LOperand* right = UseFixed(instr->right(), eax);
-    LArithmeticT* result = new LArithmeticT(op, context, left, right);
-    return MarkAsCall(DefineFixed(result, eax), instr);
-  }
+  return AssignEnvironment(new(zone()) LDeoptimize);
 }
 
 
@@ -839,7 +838,7 @@
     LOperand* context = UseFixed(instr->context(), esi);
     LOperand* left = UseFixed(instr->left(), edx);
     LOperand* right = UseFixed(instr->right(), eax);
-    LArithmeticT* result = new LArithmeticT(op, context, left, right);
+    LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
     return MarkAsCall(DefineFixed(result, eax), instr);
   }
 
@@ -873,7 +872,7 @@
   }
 
   LInstruction* result =
-      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+      DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
   return does_deopt ? AssignEnvironment(result) : result;
 }
 
@@ -886,7 +885,7 @@
   ASSERT(op != Token::MOD);
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LArithmeticD* result = new LArithmeticD(op, left, right);
+  LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
   return DefineSameAsFirst(result);
 }
 
@@ -906,7 +905,7 @@
   LOperand* left_operand = UseFixed(left, edx);
   LOperand* right_operand = UseFixed(right, eax);
   LArithmeticT* result =
-      new LArithmeticT(op, context, left_operand, right_operand);
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -994,20 +993,26 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
   int ast_id = hydrogen_env->ast_id();
-  ASSERT(ast_id != AstNode::kNoNumber);
+  ASSERT(ast_id != AstNode::kNoNumber ||
+         hydrogen_env->frame_type() != JS_FUNCTION);
   int value_count = hydrogen_env->length();
-  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
-                                          ast_id,
-                                          hydrogen_env->parameter_count(),
-                                          argument_count_,
-                                          value_count,
-                                          outer);
-  int argument_index = 0;
+  LEnvironment* result =
+      new(zone()) LEnvironment(hydrogen_env->closure(),
+                               hydrogen_env->frame_type(),
+                               ast_id,
+                               hydrogen_env->parameter_count(),
+                               argument_count_,
+                               value_count,
+                               outer);
+  int argument_index = *argument_index_accumulator;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1016,56 +1021,69 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new LArgument(argument_index++);
+      op = new(zone()) LArgument(argument_index++);
     } else {
       op = UseAny(value);
     }
     result->AddValue(op, value->representation());
   }
 
+  if (hydrogen_env->frame_type() == JS_FUNCTION) {
+    *argument_index_accumulator = argument_index;
+  }
+
   return result;
 }
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  return new LGoto(instr->FirstSuccessor()->block_id());
+  return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
 LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
-  HValue* v = instr->value();
-  if (v->EmitAtUses()) {
-    ASSERT(v->IsConstant());
-    ASSERT(!v->representation().IsDouble());
-    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+  HValue* value = instr->value();
+  if (value->EmitAtUses()) {
+    ASSERT(value->IsConstant());
+    ASSERT(!value->representation().IsDouble());
+    HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
         ? instr->FirstSuccessor()
         : instr->SecondSuccessor();
-    return new LGoto(successor->block_id());
+    return new(zone()) LGoto(successor->block_id());
   }
+
+  // Untagged integers or doubles, smis and booleans don't require a
+  // deoptimization environment nor a temp register.
+  Representation rep = value->representation();
+  HType type = value->type();
+  if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
+    return new(zone()) LBranch(UseRegister(value), NULL);
+  }
+
   ToBooleanStub::Types expected = instr->expected_input_types();
   // We need a temporary register when we have to access the map *or* we have
   // no type info yet, in which case we handle all cases (including the ones
   // involving maps).
   bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
   LOperand* temp = needs_temp ? TempRegister() : NULL;
-  return AssignEnvironment(new LBranch(UseRegister(v), temp));
+  return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
 }
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
-  return new LCmpMapAndBranch(value);
+  return new(zone()) LCmpMapAndBranch(value);
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
-  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+  return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
-  return DefineAsRegister(new LArgumentsElements);
+  return DefineAsRegister(new(zone()) LArgumentsElements);
 }
 
 
@@ -1073,7 +1091,7 @@
   LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
   LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
   LOperand* context = UseFixed(instr->context(), esi);
-  LInstanceOf* result = new LInstanceOf(context, left, right);
+  LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1081,7 +1099,7 @@
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
-      new LInstanceOfKnownGlobal(
+      new(zone()) LInstanceOfKnownGlobal(
           UseFixed(instr->context(), esi),
           UseFixed(instr->left(), InstanceofStub::left()),
           FixedTemp(edi));
@@ -1089,17 +1107,25 @@
 }
 
 
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegister(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LOperand* temp = TempRegister();
+  LWrapReceiver* result =
+      new(zone()) LWrapReceiver(receiver, function, temp);
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
 LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
   LOperand* function = UseFixed(instr->function(), edi);
   LOperand* receiver = UseFixed(instr->receiver(), eax);
   LOperand* length = UseFixed(instr->length(), ebx);
   LOperand* elements = UseFixed(instr->elements(), ecx);
-  LOperand* temp = FixedTemp(edx);
-  LApplyArguments* result = new LApplyArguments(function,
-                                                receiver,
-                                                length,
-                                                elements,
-                                                temp);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                        receiver,
+                                                        length,
+                                                        elements);
   return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1107,42 +1133,50 @@
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
   ++argument_count_;
   LOperand* argument = UseAny(instr->argument());
-  return new LPushArgument(argument);
+  return new(zone()) LPushArgument(argument);
 }
 
 
 LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
 }
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
 }
 
 
 LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LOuterContext(context));
+  return DefineAsRegister(new(zone()) LOuterContext(context));
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LGlobalObject(context));
+  return DefineAsRegister(new(zone()) LGlobalObject(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
   LOperand* global_object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LGlobalReceiver(global_object));
+  return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
 }
 
 
 LInstruction* LChunkBuilder::DoCallConstantFunction(
     HCallConstantFunction* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallConstantFunction, eax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
 }
 
 
@@ -1150,7 +1184,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* function = UseFixed(instr->function(), edi);
   argument_count_ -= instr->argument_count();
-  LInvokeFunction* result = new LInvokeFunction(context, function);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
   return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1162,17 +1196,25 @@
     ASSERT(instr->value()->representation().IsDouble());
     LOperand* context = UseAny(instr->context());  // Not actually used.
     LOperand* input = UseRegisterAtStart(instr->value());
-    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+                                                                  input);
     return DefineSameAsFirst(result);
-  } else if (op == kMathSin || op == kMathCos) {
+  } else if (op == kMathSin || op == kMathCos || op == kMathTan) {
     LOperand* context = UseFixed(instr->context(), esi);
     LOperand* input = UseFixedDouble(instr->value(), xmm1);
-    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+                                                                  input);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
     LOperand* context = UseAny(instr->context());  // Deferred use by MathAbs.
-    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+    if (op == kMathPowHalf) {
+      LOperand* temp = TempRegister();
+      LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
+      return DefineSameAsFirst(result);
+    }
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+                                                                  input);
     switch (op) {
       case kMathAbs:
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1182,8 +1224,6 @@
         return AssignEnvironment(DefineAsRegister(result));
       case kMathSqrt:
         return DefineSameAsFirst(result);
-      case kMathPowHalf:
-        return DefineSameAsFirst(result);
       default:
         UNREACHABLE();
         return NULL;
@@ -1197,7 +1237,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* key = UseFixed(instr->key(), ecx);
   argument_count_ -= instr->argument_count();
-  LCallKeyed* result = new LCallKeyed(context, key);
+  LCallKeyed* result = new(zone()) LCallKeyed(context, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1205,7 +1245,7 @@
 LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallNamed* result = new LCallNamed(context);
+  LCallNamed* result = new(zone()) LCallNamed(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1213,14 +1253,14 @@
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallGlobal* result = new LCallGlobal(context);
+  LCallGlobal* result = new(zone()) LCallGlobal(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallKnownGlobal, eax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
 }
 
 
@@ -1228,15 +1268,16 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* constructor = UseFixed(instr->constructor(), edi);
   argument_count_ -= instr->argument_count();
-  LCallNew* result = new LCallNew(context, constructor);
+  LCallNew* result = new(zone()) LCallNew(context, constructor);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseFixed(instr->function(), edi);
   argument_count_ -= instr->argument_count();
-  LCallFunction* result = new LCallFunction(context);
+  LCallFunction* result = new(zone()) LCallFunction(context, function);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1244,7 +1285,7 @@
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   argument_count_ -= instr->argument_count();
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LCallRuntime(context), eax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
 }
 
 
@@ -1263,8 +1304,26 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
-  return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineSameAsFirst(new(zone()) LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* context = UseFixed(instr->context(), esi);
+    LOperand* left = UseFixed(instr->left(), edx);
+    LOperand* right = UseFixed(instr->right(), eax);
+    LArithmeticT* result =
+        new(zone()) LArithmeticT(instr->op(), context, left, right);
+    return MarkAsCall(DefineFixed(result, eax), instr);
+  }
 }
 
 
@@ -1272,21 +1331,11 @@
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
   LOperand* input = UseRegisterAtStart(instr->value());
-  LBitNotI* result = new LBitNotI(input);
+  LBitNotI* result = new(zone()) LBitNotI(input);
   return DefineSameAsFirst(result);
 }
 
 
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
-  return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
-  return DoBit(Token::BIT_XOR, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
@@ -1296,7 +1345,7 @@
     LOperand* temp = FixedTemp(edx);
     LOperand* dividend = UseFixed(instr->left(), eax);
     LOperand* divisor = UseRegister(instr->right());
-    LDivI* result = new LDivI(dividend, divisor, temp);
+    LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
     return AssignEnvironment(DefineFixed(result, eax));
   } else {
     ASSERT(instr->representation().IsTagged());
@@ -1314,7 +1363,8 @@
     if (instr->HasPowerOf2Divisor()) {
       ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
       LOperand* value = UseRegisterAtStart(instr->left());
-      LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
+      LModI* mod =
+          new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
       result = DefineSameAsFirst(mod);
     } else {
       // The temporary operand is necessary to ensure that right is
@@ -1322,7 +1372,7 @@
       LOperand* temp = FixedTemp(edx);
       LOperand* value = UseFixed(instr->left(), eax);
       LOperand* divisor = UseRegister(instr->right());
-      LModI* mod = new LModI(value, divisor, temp);
+      LModI* mod = new(zone()) LModI(value, divisor, temp);
       result = DefineFixed(mod, edx);
     }
 
@@ -1339,7 +1389,7 @@
     // TODO(fschneider): Allow any register as input registers.
     LOperand* left = UseFixedDouble(instr->left(), xmm2);
     LOperand* right = UseFixedDouble(instr->right(), xmm1);
-    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   }
 }
@@ -1355,8 +1405,12 @@
     if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
       temp = TempRegister();
     }
-    LMulI* mul = new LMulI(left, right, temp);
-    return AssignEnvironment(DefineSameAsFirst(mul));
+    LMulI* mul = new(zone()) LMulI(left, right, temp);
+    if (instr->CheckFlag(HValue::kCanOverflow) ||
+        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineSameAsFirst(mul);
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MUL, instr);
   } else {
@@ -1372,7 +1426,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    LSubI* sub = new LSubI(left, right);
+    LSubI* sub = new(zone()) LSubI(left, right);
     LInstruction* result = DefineSameAsFirst(sub);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1393,7 +1447,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    LAddI* add = new LAddI(left, right);
+    LAddI* add = new(zone()) LAddI(left, right);
     LInstruction* result = DefineSameAsFirst(add);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1414,25 +1468,32 @@
   // We need to use fixed result register for the call.
   Representation exponent_type = instr->right()->representation();
   ASSERT(instr->left()->representation().IsDouble());
-  LOperand* left = UseFixedDouble(instr->left(), xmm1);
+  LOperand* left = UseFixedDouble(instr->left(), xmm2);
   LOperand* right = exponent_type.IsDouble() ?
-      UseFixedDouble(instr->right(), xmm2) :
+      UseFixedDouble(instr->right(), xmm1) :
       UseFixed(instr->right(), eax);
-  LPower* result = new LPower(left, right);
+  LPower* result = new(zone()) LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
                     CAN_DEOPTIMIZE_EAGERLY);
 }
 
 
+LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->global_object()->representation().IsTagged());
+  LOperand* global_object = UseFixed(instr->global_object(), eax);
+  LRandom* result = new(zone()) LRandom(global_object);
+  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  Token::Value op = instr->token();
   ASSERT(instr->left()->representation().IsTagged());
   ASSERT(instr->right()->representation().IsTagged());
-  bool reversed = (op == Token::GT || op == Token::LTE);
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
-  LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
-  LCmpT* result = new LCmpT(context, left, right);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1443,16 +1504,23 @@
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    return new LCmpIDAndBranch(left, right);
+    return new(zone()) LCmpIDAndBranch(left, right);
   } else {
     ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
-    LOperand* left = UseRegisterAtStart(instr->left());
-    LOperand* right = UseRegisterAtStart(instr->right());
-    return new LCmpIDAndBranch(left, right);
+    LOperand* left;
+    LOperand* right;
+    if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+      left = UseRegisterOrConstantAtStart(instr->left());
+      right = UseRegisterOrConstantAtStart(instr->right());
+    } else {
+      left = UseRegisterAtStart(instr->left());
+      right = UseRegisterAtStart(instr->right());
+    }
+    return new(zone()) LCmpIDAndBranch(left, right);
   }
 }
 
@@ -1461,49 +1529,73 @@
     HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseAtStart(instr->right());
-  return new LCmpObjectEqAndBranch(left, right);
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
 }
 
 
 LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
   HCompareConstantEqAndBranch* instr) {
-  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+  return new(zone()) LCmpConstantEqAndBranch(
+      UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
   // We only need a temp register for non-strict compare.
-  LOperand* temp = instr->is_strict() ? NULL : TempRegister();
-  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+  LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
+  return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
-  return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
+  return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new LIsStringAndBranch(UseRegister(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsSmiAndBranch(Use(instr->value()));
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
   ASSERT(instr  ->value()->representation().IsTagged());
-  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
-                                      TempRegister());
+  return new(zone()) LIsUndetectableAndBranch(
+      UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+
+  LStringCompareAndBranch* result = new
+      LStringCompareAndBranch(context, left, right);
+
+  return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
-                                       TempRegister());
+  return new(zone()) LHasInstanceTypeAndBranch(
+      UseRegisterAtStart(instr->value()),
+      TempRegister());
 }
 
 
@@ -1512,14 +1604,14 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
-  return DefineAsRegister(new LGetCachedArrayIndex(value));
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
 }
 
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LHasCachedArrayIndexAndBranch(
+  return new(zone()) LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()));
 }
 
@@ -1527,40 +1619,48 @@
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
-                                   TempRegister(),
-                                   TempRegister());
+  return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+                                           TempRegister(),
+                                           TempRegister());
 }
 
 
 LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LJSArrayLength(array));
+  return DefineAsRegister(new(zone()) LJSArrayLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
     HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LFixedArrayBaseLength(array));
+  return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
   LOperand* object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LElementsKind(object));
+  return DefineAsRegister(new(zone()) LElementsKind(object));
 }
 
 
 LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
   LOperand* object = UseRegister(instr->value());
-  LValueOf* result = new LValueOf(object, TempRegister());
-  return AssignEnvironment(DefineSameAsFirst(result));
+  LValueOf* result = new(zone()) LValueOf(object, TempRegister());
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+  LOperand* date = UseFixed(instr->value(), eax);
+  LDateField* result =
+      new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
+  return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  return AssignEnvironment(new LBoundsCheck(
+  return AssignEnvironment(new(zone()) LBoundsCheck(
       UseRegisterOrConstantAtStart(instr->index()),
       UseAtStart(instr->length())));
 }
@@ -1576,7 +1676,7 @@
 LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseFixed(instr->value(), eax);
-  return MarkAsCall(new LThrow(context, value), instr);
+  return MarkAsCall(new(zone()) LThrow(context, value), instr);
 }
 
 
@@ -1599,7 +1699,11 @@
   if (from.IsTagged()) {
     if (to.IsDouble()) {
       LOperand* value = UseRegister(instr->value());
-      LNumberUntagD* res = new LNumberUntagD(value);
+      // Temp register only necessary for minus zero check.
+      LOperand* temp = instr->deoptimize_on_minus_zero()
+                       ? TempRegister()
+                       : NULL;
+      LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
       return AssignEnvironment(DefineAsRegister(res));
     } else {
       ASSERT(to.IsInteger32());
@@ -1611,10 +1715,10 @@
             (truncating && CpuFeatures::IsSupported(SSE3))
             ? NULL
             : FixedTemp(xmm1);
-        LTaggedToI* res = new LTaggedToI(value, xmm_temp);
+        LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
         return AssignEnvironment(DefineSameAsFirst(res));
       } else {
-        return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+        return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
       }
     }
   } else if (from.IsDouble()) {
@@ -1624,7 +1728,7 @@
 
       // Make sure that temp and result_temp are different registers.
       LUnallocated* result_temp = TempRegister();
-      LNumberTagD* result = new LNumberTagD(value, temp);
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
       return AssignPointerMap(Define(result, result_temp));
     } else {
       ASSERT(to.IsInteger32());
@@ -1633,21 +1737,23 @@
       LOperand* value = needs_temp ?
           UseTempRegister(instr->value()) : UseRegister(instr->value());
       LOperand* temp = needs_temp ? TempRegister() : NULL;
-      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
+      return AssignEnvironment(
+          DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
     }
   } else if (from.IsInteger32()) {
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
       if (val->HasRange() && val->range()->IsInSmiRange()) {
-        return DefineSameAsFirst(new LSmiTag(value));
+        return DefineSameAsFirst(new(zone()) LSmiTag(value));
       } else {
-        LNumberTagI* result = new LNumberTagI(value);
+        LNumberTagI* result = new(zone()) LNumberTagI(value);
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
       }
     } else {
       ASSERT(to.IsDouble());
-      return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+      return DefineAsRegister(
+          new(zone()) LInteger32ToDouble(Use(instr->value())));
     }
   }
   UNREACHABLE();
@@ -1657,40 +1763,46 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseAtStart(instr->value());
-  return AssignEnvironment(new LCheckNonSmi(value));
+  return AssignEnvironment(new(zone()) LCheckNonSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
-  LCheckInstanceType* result = new LCheckInstanceType(value, temp);
+  LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
   LOperand* temp = TempRegister();
-  LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
+  LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value));
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
-  LOperand* value = UseAtStart(instr->value());
-  return AssignEnvironment(new LCheckFunction(value));
+  // If the target is in new space, we'll emit a global cell compare and so
+  // want the value in a register.  If the target gets promoted before we
+  // emit code, we will still get the register but will do an immediate
+  // compare instead of the cell compare.  This is safe.
+  LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
+      ? UseRegisterAtStart(instr->value())
+      : UseAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckFunction(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LCheckMap* result = new LCheckMap(value);
+  LCheckMap* result = new(zone()) LCheckMap(value);
   return AssignEnvironment(result);
 }
 
@@ -1700,67 +1812,39 @@
   Representation input_rep = value->representation();
   if (input_rep.IsDouble()) {
     LOperand* reg = UseRegister(value);
-    return DefineAsRegister(new LClampDToUint8(reg));
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg));
   } else if (input_rep.IsInteger32()) {
     LOperand* reg = UseFixed(value, eax);
-    return DefineFixed(new LClampIToUint8(reg), eax);
+    return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
   } else {
     ASSERT(input_rep.IsTagged());
     LOperand* reg = UseFixed(value, eax);
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve xmm1 explicitly.
     LOperand* temp = FixedTemp(xmm1);
-    LClampTToUint8* result = new LClampTToUint8(reg, temp);
+    LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
     return AssignEnvironment(DefineFixed(result, eax));
   }
 }
 
 
-LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
-  HValue* value = instr->value();
-  Representation input_rep = value->representation();
-
-  LInstruction* result;
-  if (input_rep.IsDouble()) {
-    LOperand* reg = UseRegister(value);
-    LOperand* temp_reg =
-        CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
-    result = DefineAsRegister(new LDoubleToI(reg, temp_reg));
-  } else if (input_rep.IsInteger32()) {
-    // Canonicalization should already have removed the hydrogen instruction in
-    // this case, since it is a noop.
-    UNREACHABLE();
-    return NULL;
-  } else {
-    ASSERT(input_rep.IsTagged());
-    LOperand* reg = UseRegister(value);
-    // Register allocator doesn't (yet) support allocation of double
-    // temps. Reserve xmm1 explicitly.
-    LOperand* xmm_temp =
-        CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
-    result = DefineSameAsFirst(new LTaggedToI(reg, xmm_temp));
-  }
-  return AssignEnvironment(result);
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  return new LReturn(UseFixed(instr->value(), eax));
+  return new(zone()) LReturn(UseFixed(instr->value(), eax));
 }
 
 
 LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
   Representation r = instr->representation();
   if (r.IsInteger32()) {
-    return DefineAsRegister(new LConstantI);
+    return DefineAsRegister(new(zone()) LConstantI);
   } else if (r.IsDouble()) {
     double value = instr->DoubleValue();
     LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
         ? TempRegister()
         : NULL;
-    return DefineAsRegister(new LConstantD(temp));
+    return DefineAsRegister(new(zone()) LConstantD(temp));
   } else if (r.IsTagged()) {
-    return DefineAsRegister(new LConstantT);
+    return DefineAsRegister(new(zone()) LConstantT);
   } else {
     UNREACHABLE();
     return NULL;
@@ -1769,8 +1853,8 @@
 
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
-  LLoadGlobalCell* result = new LLoadGlobalCell;
-  return instr->check_hole_value()
+  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+  return instr->RequiresHoleCheck()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1779,15 +1863,16 @@
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* global_object = UseFixed(instr->global_object(), eax);
-  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
   LStoreGlobalCell* result =
-      new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
-  return instr->check_hole_value() ? AssignEnvironment(result) : result;
+      new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1796,54 +1881,56 @@
   LOperand* global_object = UseFixed(instr->global_object(), edx);
   LOperand* value = UseFixed(instr->value(), eax);
   LStoreGlobalGeneric* result =
-      new LStoreGlobalGeneric(context, global_object, value);
+      new(zone()) LStoreGlobalGeneric(context, global_object, value);
   return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadContextSlot(context));
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
 LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
-  LOperand* context;
   LOperand* value;
   LOperand* temp;
+  LOperand* context = UseRegister(instr->context());
   if (instr->NeedsWriteBarrier()) {
-    context = UseTempRegister(instr->context());
     value = UseTempRegister(instr->value());
     temp = TempRegister();
   } else {
-    context = UseRegister(instr->context());
     value = UseRegister(instr->value());
     temp = NULL;
   }
-  return new LStoreContextSlot(context, value, temp);
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   ASSERT(instr->representation().IsTagged());
   LOperand* obj = UseRegisterAtStart(instr->object());
-  return DefineAsRegister(new LLoadNamedField(obj));
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
     HLoadNamedFieldPolymorphic* instr) {
   ASSERT(instr->representation().IsTagged());
-  LOperand* context = UseFixed(instr->context(), esi);
   if (instr->need_generic()) {
+    LOperand* context = UseFixed(instr->context(), esi);
     LOperand* obj = UseFixed(instr->object(), eax);
     LLoadNamedFieldPolymorphic* result =
-        new LLoadNamedFieldPolymorphic(context, obj);
+        new(zone()) LLoadNamedFieldPolymorphic(context, obj);
     return MarkAsCall(DefineFixed(result, eax), instr);
   } else {
+    LOperand* context = UseAny(instr->context());  // Not actually used.
     LOperand* obj = UseRegisterAtStart(instr->object());
     LLoadNamedFieldPolymorphic* result =
-        new LLoadNamedFieldPolymorphic(context, obj);
+        new(zone()) LLoadNamedFieldPolymorphic(context, obj);
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
@@ -1852,7 +1939,7 @@
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object = UseFixed(instr->object(), eax);
-  LLoadNamedGeneric* result = new LLoadNamedGeneric(context, object);
+  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1860,21 +1947,21 @@
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
-      new LLoadFunctionPrototype(UseRegister(instr->function()),
-                                 TempRegister())));
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
+                                         TempRegister())));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadElements(input));
+  return DefineAsRegister(new(zone()) LLoadElements(input));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
     HLoadExternalArrayPointer* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadExternalArrayPointer(input));
+  return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
 }
 
 
@@ -1884,8 +1971,9 @@
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
-  return AssignEnvironment(DefineAsRegister(result));
+  LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+  if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+  return DefineAsRegister(result);
 }
 
 
@@ -1896,7 +1984,7 @@
   LOperand* elements = UseRegisterAtStart(instr->elements());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LLoadKeyedFastDoubleElement* result =
-      new LLoadKeyedFastDoubleElement(elements, key);
+      new(zone()) LLoadKeyedFastDoubleElement(elements, key);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
@@ -1904,19 +1992,18 @@
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
   ElementsKind elements_kind = instr->elements_kind();
-  Representation representation(instr->representation());
   ASSERT(
-      (representation.IsInteger32() &&
+      (instr->representation().IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (representation.IsDouble() &&
+      (instr->representation().IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
   LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new LLoadKeyedSpecializedArrayElement(external_pointer,
+      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
                                             key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
@@ -1932,7 +2019,8 @@
   LOperand* object = UseFixed(instr->object(), edx);
   LOperand* key = UseFixed(instr->key(), eax);
 
-  LLoadKeyedGeneric* result = new LLoadKeyedGeneric(context, object, key);
+  LLoadKeyedGeneric* result =
+      new(zone()) LLoadKeyedGeneric(context, object, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1944,15 +2032,14 @@
   ASSERT(instr->object()->representation().IsTagged());
   ASSERT(instr->key()->representation().IsInteger32());
 
-  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* obj = UseRegister(instr->object());
   LOperand* val = needs_write_barrier
       ? UseTempRegister(instr->value())
       : UseRegisterAtStart(instr->value());
   LOperand* key = needs_write_barrier
       ? UseTempRegister(instr->key())
       : UseRegisterOrConstantAtStart(instr->key());
-
-  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+  return new(zone()) LStoreKeyedFastElement(obj, key, val);
 }
 
 
@@ -1966,19 +2053,18 @@
   LOperand* val = UseTempRegister(instr->value());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
 
-  return new LStoreKeyedFastDoubleElement(elements, key, val);
+  return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
-  Representation representation(instr->value()->representation());
   ElementsKind elements_kind = instr->elements_kind();
     ASSERT(
-      (representation.IsInteger32() &&
+      (instr->value()->representation().IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (representation.IsDouble() &&
+      (instr->value()->representation().IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -1996,9 +2082,9 @@
     val = UseRegister(instr->value());
   }
 
-  return new LStoreKeyedSpecializedArrayElement(external_pointer,
-                                                key,
-                                                val);
+  return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                        key,
+                                                        val);
 }
 
 
@@ -2013,17 +2099,45 @@
   ASSERT(instr->value()->representation().IsTagged());
 
   LStoreKeyedGeneric* result =
-      new LStoreKeyedGeneric(context, object, key, value);
+      new(zone()) LStoreKeyedGeneric(context, object, key, value);
   return MarkAsCall(result, instr);
 }
 
 
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LOperand* temp_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
+    return DefineSameAsFirst(result);
+  } else {
+    LOperand* object = UseFixed(instr->object(), eax);
+    LOperand* fixed_object_reg = FixedTemp(edx);
+    LOperand* new_map_reg = FixedTemp(ebx);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object,
+                                            new_map_reg,
+                                            fixed_object_reg);
+    return MarkAsCall(DefineFixed(result, eax), instr);
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   bool needs_write_barrier = instr->NeedsWriteBarrier();
 
-  LOperand* obj = needs_write_barrier
-      ? UseTempRegister(instr->object())
-      : UseRegisterAtStart(instr->object());
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = instr->is_in_object()
+        ? UseRegister(instr->object())
+        : UseTempRegister(instr->object());
+  } else {
+    obj = UseRegisterAtStart(instr->object());
+  }
 
   LOperand* val = needs_write_barrier
       ? UseTempRegister(instr->value())
@@ -2035,7 +2149,7 @@
       ? TempRegister()
       : NULL;
 
-  return new LStoreNamedField(obj, val, temp);
+  return new(zone()) LStoreNamedField(obj, val, temp);
 }
 
 
@@ -2044,7 +2158,8 @@
   LOperand* object = UseFixed(instr->object(), edx);
   LOperand* value = UseFixed(instr->value(), eax);
 
-  LStoreNamedGeneric* result = new LStoreNamedGeneric(context, object, value);
+  LStoreNamedGeneric* result =
+      new(zone()) LStoreNamedGeneric(context, object, value);
   return MarkAsCall(result, instr);
 }
 
@@ -2053,7 +2168,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseOrConstantAtStart(instr->left());
   LOperand* right = UseOrConstantAtStart(instr->right());
-  LStringAdd* string_add = new LStringAdd(context, left, right);
+  LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
   return MarkAsCall(DefineFixed(string_add, eax), instr);
 }
 
@@ -2062,7 +2177,8 @@
   LOperand* string = UseTempRegister(instr->string());
   LOperand* index = UseTempRegister(instr->index());
   LOperand* context = UseAny(instr->context());
-  LStringCharCodeAt* result = new LStringCharCodeAt(context, string, index);
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
 
@@ -2070,38 +2186,58 @@
 LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
   LOperand* char_code = UseRegister(instr->value());
   LOperand* context = UseAny(instr->context());
-  LStringCharFromCode* result = new LStringCharFromCode(context, char_code);
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
   return AssignPointerMap(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
   LOperand* string = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LStringLength(string));
+  return DefineAsRegister(new(zone()) LStringLength(string));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* temp = TempRegister();
+  LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LFastLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LArrayLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LObjectLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LObjectLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LRegExpLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LFunctionLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
 }
 
 
@@ -2109,7 +2245,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object = UseAtStart(instr->object());
   LOperand* key = UseOrConstantAtStart(instr->key());
-  LDeleteProperty* result = new LDeleteProperty(context, object, key);
+  LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2117,13 +2253,13 @@
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
-  return AssignEnvironment(new LOsrEntry);
+  return AssignEnvironment(new(zone()) LOsrEntry);
 }
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new LParameter, spill_index);
+  return DefineAsSpilled(new(zone()) LParameter, spill_index);
 }
 
 
@@ -2133,14 +2269,14 @@
     Abort("Too many spill slots needed for OSR");
     spill_index = 0;
   }
-  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallStub* result = new LCallStub(context);
+  LCallStub* result = new(zone()) LCallStub(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2158,14 +2294,15 @@
   LOperand* arguments = UseRegister(instr->arguments());
   LOperand* length = UseTempRegister(instr->length());
   LOperand* index = Use(instr->index());
-  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+  LAccessArgumentsAt* result =
+      new(zone()) LAccessArgumentsAt(arguments, length, index);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
   LOperand* object = UseFixed(instr->value(), eax);
-  LToFastProperties* result = new LToFastProperties(object);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2173,19 +2310,19 @@
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseAtStart(instr->value());
-  LTypeof* result = new LTypeof(context, value);
+  LTypeof* result = new(zone()) LTypeof(context, value);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
-  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
     HIsConstructCallAndBranch* instr) {
-  return new LIsConstructCallAndBranch(TempRegister());
+  return new(zone()) LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2209,7 +2346,7 @@
   // lazy bailout instruction to capture the environment.
   if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
     ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
-    LLazyBailout* lazy_bailout = new LLazyBailout;
+    LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
     LInstruction* result = AssignEnvironment(lazy_bailout);
     instruction_pending_deoptimization_environment_->
         set_deoptimization_environment(result->environment());
@@ -2224,11 +2361,12 @@
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
   if (instr->is_function_entry()) {
     LOperand* context = UseFixed(instr->context(), esi);
-    return MarkAsCall(new LStackCheck(context), instr);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
   } else {
     ASSERT(instr->is_backwards_branch());
     LOperand* context = UseAny(instr->context());
-    return AssignEnvironment(AssignPointerMap(new LStackCheck(context)));
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
   }
 }
 
@@ -2237,9 +2375,14 @@
   HEnvironment* outer = current_block_->last_environment();
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
                                                instr->function(),
                                                undefined,
-                                               instr->call_kind());
+                                               instr->call_kind(),
+                                               instr->is_construct());
+  if (instr->arguments() != NULL) {
+    inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+  }
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2247,7 +2390,8 @@
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
-  HEnvironment* outer = current_block_->last_environment()->outer();
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
   current_block_->UpdateEnvironment(outer);
   return NULL;
 }
@@ -2257,11 +2401,40 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* key = UseOrConstantAtStart(instr->key());
   LOperand* object = UseOrConstantAtStart(instr->object());
-  LIn* result = new LIn(context, key, object);
+  LIn* result = new(zone()) LIn(context, key, object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object = UseFixed(instr->enumerable(), eax);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index b0ab6b4..4ecce96 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,6 +43,7 @@
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
+  V(AllocateObject)                             \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
   V(ArgumentsLength)                            \
@@ -81,11 +82,13 @@
   V(ConstantI)                                  \
   V(ConstantT)                                  \
   V(Context)                                    \
+  V(DeclareGlobals)                             \
   V(DeleteProperty)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
   V(ElementsKind)                               \
+  V(FastLiteral)                                \
   V(FixedArrayBaseLength)                       \
   V(FunctionLiteral)                            \
   V(GetCachedArrayIndex)                        \
@@ -101,10 +104,12 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNullAndBranch)                            \
+  V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -121,6 +126,7 @@
   V(LoadNamedField)                             \
   V(LoadNamedFieldPolymorphic)                  \
   V(LoadNamedGeneric)                           \
+  V(MathPowHalf)                                \
   V(ModI)                                       \
   V(MulI)                                       \
   V(NumberTagD)                                 \
@@ -131,6 +137,7 @@
   V(OuterContext)                               \
   V(Parameter)                                  \
   V(Power)                                      \
+  V(Random)                                     \
   V(PushArgument)                               \
   V(RegExpLiteral)                              \
   V(Return)                                     \
@@ -156,11 +163,18 @@
   V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
+  V(TransitionElementsKind)                     \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
-  V(ValueOf)
+  V(ValueOf)                                    \
+  V(ForInPrepareMap)                            \
+  V(ForInCacheArray)                            \
+  V(CheckMapValue)                              \
+  V(LoadFieldByIndex)                           \
+  V(DateField)                                  \
+  V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -191,8 +205,8 @@
   virtual void CompileToNative(LCodeGen* generator) = 0;
   virtual const char* Mnemonic() const = 0;
   virtual void PrintTo(StringStream* stream);
-  virtual void PrintDataTo(StringStream* stream) = 0;
-  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
 
   enum Opcode {
     // Declare a unique enum value for each instruction.
@@ -288,9 +302,6 @@
   int TempCount() { return T; }
   LOperand* TempAt(int i) { return temps_[i]; }
 
-  virtual void PrintDataTo(StringStream* stream);
-  virtual void PrintOutputOperandTo(StringStream* stream);
-
  protected:
   EmbeddedContainer<LOperand*, R> results_;
   EmbeddedContainer<LOperand*, I> inputs_;
@@ -446,18 +457,33 @@
 };
 
 
-class LApplyArguments: public LTemplateInstruction<1, 4, 1> {
+class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
+ public:
+  LWrapReceiver(LOperand* receiver,
+                LOperand* function,
+                LOperand* temp) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
                   LOperand* receiver,
                   LOperand* length,
-                  LOperand* elements,
-                  LOperand* temp) {
+                  LOperand* elements) {
     inputs_[0] = function;
     inputs_[1] = receiver;
     inputs_[2] = length;
     inputs_[3] = elements;
-    temps_[0] = temp;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
@@ -581,6 +607,24 @@
 };
 
 
+class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
+ public:
+  LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
  public:
   LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -605,17 +649,18 @@
 };
 
 
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
+class LIsNilAndBranch: public LControlInstruction<1, 1> {
  public:
-  LIsNullAndBranch(LOperand* value, LOperand* temp) {
+  LIsNilAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
 
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -634,6 +679,19 @@
 };
 
 
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -661,6 +719,24 @@
 };
 
 
+class LStringCompareAndBranch: public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
  public:
   LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
@@ -787,18 +863,15 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
+  LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return hydrogen()->op(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
-  Token::Value op_;
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
 };
 
 
@@ -946,6 +1019,24 @@
 };
 
 
+class LDateField: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LDateField(LOperand* date, LOperand* temp, Smi* index)
+      : index_(index) {
+    inputs_[0] = date;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+  DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+  Smi* index() const { return index_; }
+
+ private:
+  Smi* index_;
+};
+
+
 class LThrow: public LTemplateInstruction<0, 2, 0> {
  public:
   LThrow(LOperand* context, LOperand* value) {
@@ -994,6 +1085,17 @@
 };
 
 
+class LRandom: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LRandom(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Random, "random")
+  DECLARE_HYDROGEN_ACCESSOR(Random)
+};
+
+
 class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1228,6 +1330,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+
+  LOperand* value() { return inputs_[0]; }
 };
 
 
@@ -1248,7 +1352,7 @@
   LOperand* global_object() { return InputAt(1); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(2); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1282,7 +1386,6 @@
   LOperand* context() { return InputAt(0); }
   LOperand* value() { return InputAt(1); }
   int slot_index() { return hydrogen()->slot_index(); }
-  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1299,7 +1402,9 @@
 
 
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
@@ -1321,6 +1426,17 @@
 };
 
 
+class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
 class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LGlobalObject(LOperand* context) {
@@ -1412,17 +1528,19 @@
 };
 
 
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction: public LTemplateInstruction<1, 2, 0> {
  public:
-  explicit LCallFunction(LOperand* context) {
+  explicit LCallFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
+    inputs_[1] = function;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
   DECLARE_HYDROGEN_ACCESSOR(CallFunction)
 
   LOperand* context() { return inputs_[0]; }
-  int arity() const { return hydrogen()->argument_count() - 2; }
+  LOperand* function() { return inputs_[1]; }
+  int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
@@ -1558,10 +1676,11 @@
 };
 
 
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
  public:
-  explicit LNumberUntagD(LOperand* value) {
+  explicit LNumberUntagD(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
+    temps_[0] = temp;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
@@ -1604,7 +1723,6 @@
   Handle<Object> name() const { return hydrogen()->name(); }
   bool is_in_object() { return hydrogen()->is_in_object(); }
   int offset() { return hydrogen()->offset(); }
-  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
   Handle<Map> transition() const { return hydrogen()->transition(); }
 };
 
@@ -1626,7 +1744,7 @@
   LOperand* object() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
   Handle<Object> name() const { return hydrogen()->name(); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1716,7 +1834,31 @@
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
   LOperand* value() { return inputs_[3]; }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* new_map_temp,
+                          LOperand* temp_reg) {
+    inputs_[0] = object;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp_reg;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_reg() { return temps_[0]; }
+  LOperand* temp_reg() { return temps_[1]; }
+  Handle<Map> original_map() { return hydrogen()->original_map(); }
+  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
 };
 
 
@@ -1788,6 +1930,8 @@
     inputs_[0] = value;
   }
 
+  LOperand* value() { return inputs_[0]; }
+
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
@@ -1887,6 +2031,33 @@
 };
 
 
+class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LAllocateObject(LOperand* context, LOperand* temp) {
+    inputs_[0] = context;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+
+  LOperand* context() { return inputs_[0]; }
+};
+
+
+class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LFastLiteral(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
+};
+
+
 class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LArrayLiteral(LOperand* context) {
@@ -2051,6 +2222,64 @@
 };
 
 
+class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
@@ -2124,6 +2353,7 @@
       : chunk_(NULL),
         info_(info),
         graph_(graph),
+        zone_(graph->isolate()->zone()),
         status_(UNUSED),
         current_instruction_(NULL),
         current_block_(NULL),
@@ -2153,6 +2383,7 @@
   LChunk* chunk() const { return chunk_; }
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
+  Zone* zone() { return zone_; }
 
   bool is_unused() const { return status_ == UNUSED; }
   bool is_building() const { return status_ == BUILDING; }
@@ -2162,7 +2393,6 @@
   void Abort(const char* format, ...);
 
   // Methods for getting operands for Use / Define / Temp.
-  LRegister* ToOperand(Register reg);
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(XMMRegister reg);
 
@@ -2213,8 +2443,6 @@
       LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
                            LUnallocated* result);
   template<int I, int T>
-      LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
-  template<int I, int T>
       LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
   template<int I, int T>
       LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
@@ -2249,12 +2477,12 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
 
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
@@ -2264,6 +2492,7 @@
   LChunk* chunk_;
   CompilationInfo* info_;
   HGraph* const graph_;
+  Zone* zone_;
   Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index ce6d6a6..60e38a6 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,7 +44,8 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true) {
+      allow_stub_calls_(true),
+      has_frame_(false) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -52,33 +53,75 @@
 }
 
 
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register addr,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, not_equal, &not_in_new_space);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
+void MacroAssembler::InNewSpace(
+    Register object,
+    Register scratch,
+    Condition cc,
+    Label* condition_met,
+    Label::Distance condition_met_distance) {
+  ASSERT(cc == equal || cc == not_equal);
+  if (scratch.is(object)) {
+    and_(scratch, Immediate(~Page::kPageAlignmentMask));
+  } else {
+    mov(scratch, Immediate(~Page::kPageAlignmentMask));
+    and_(scratch, object);
   }
+  // Check that we can use a test_b.
+  ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
+  ASSERT(MemoryChunk::IN_TO_SPACE < 8);
+  int mask = (1 << MemoryChunk::IN_FROM_SPACE)
+           | (1 << MemoryChunk::IN_TO_SPACE);
+  // If non-zero, the page belongs to new-space.
+  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+         static_cast<uint8_t>(mask));
+  j(cc, condition_met, condition_met_distance);
+}
 
-  // Compute the page start address from the heap object pointer, and reuse
-  // the 'object' register for it.
-  and_(object, ~Page::kPageAlignmentMask);
 
-  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
-  // method for more details.
-  shr(addr, Page::kRegionSizeLog2);
-  and_(addr, Page::kPageAlignmentMask >> Page::kRegionSizeLog2);
-
-  // Set dirty mark for region.
-  // Bit tests with a memory operand should be avoided on Intel processors,
-  // as they usually have long latency and multiple uops. We load the bit base
-  // operand to a register at first and store it back after bit set.
-  mov(scratch, Operand(object, Page::kDirtyFlagOffset));
-  bts(Operand(scratch), addr);
-  mov(Operand(object, Page::kDirtyFlagOffset), scratch);
+void MacroAssembler::RememberedSetHelper(
+    Register object,  // Only used for debug checks.
+    Register addr,
+    Register scratch,
+    SaveFPRegsMode save_fp,
+    MacroAssembler::RememberedSetFinalAction and_then) {
+  Label done;
+  if (FLAG_debug_code) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  mov(scratch, Operand::StaticVariable(store_buffer));
+  // Store pointer to buffer.
+  mov(Operand(scratch, 0), addr);
+  // Increment buffer top.
+  add(scratch, Immediate(kPointerSize));
+  // Write back new top of buffer.
+  mov(Operand::StaticVariable(store_buffer), scratch);
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kReturnAtEnd) {
+    Label buffer_overflowed;
+    j(not_equal, &buffer_overflowed, Label::kNear);
+    ret(0);
+    bind(&buffer_overflowed);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    j(equal, &done, Label::kNear);
+  }
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(save_fp);
+  CallStub(&store_buffer_overflow);
+  if (and_then == kReturnAtEnd) {
+    ret(0);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    bind(&done);
+  }
 }
 
 
@@ -112,100 +155,144 @@
 }
 
 
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                Label* branch,
-                                Label::Distance branch_near) {
-  ASSERT(cc == equal || cc == not_equal);
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    mov(scratch, Operand(object));
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    and_(Operand(scratch),
-         Immediate(ExternalReference::new_space_mask(isolate())));
-    cmp(Operand(scratch),
-        Immediate(ExternalReference::new_space_start(isolate())));
-    j(cc, branch, branch_near);
-  } else {
-    int32_t new_space_start = reinterpret_cast<int32_t>(
-        ExternalReference::new_space_start(isolate()).address());
-    lea(scratch, Operand(object, -new_space_start));
-    and_(scratch, isolate()->heap()->NewSpaceMask());
-    j(cc, branch, branch_near);
+void MacroAssembler::RecordWriteArray(Register object,
+                                      Register value,
+                                      Register index,
+                                      SaveFPRegsMode save_fp,
+                                      RememberedSetAction remembered_set_action,
+                                      SmiCheck smi_check) {
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    ASSERT_EQ(0, kSmiTag);
+    test(value, Immediate(kSmiTagMask));
+    j(zero, &done);
+  }
+
+  // Array access: calculate the destination address in the same manner as
+  // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
+  // into an array of words.
+  Register dst = index;
+  lea(dst, Operand(object, index, times_half_pointer_size,
+                   FixedArray::kHeaderSize - kHeapObjectTag));
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
+  bind(&done);
+
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(index, Immediate(BitCast<int32_t>(kZapValue)));
   }
 }
 
 
-void MacroAssembler::RecordWrite(Register object,
-                                 int offset,
-                                 Register value,
-                                 Register scratch) {
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
   // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis and stores into young gen.
+  // catch stores of Smis.
   Label done;
 
   // Skip barrier if writing a smi.
-  STATIC_ASSERT(kSmiTag == 0);
-  JumpIfSmi(value, &done, Label::kNear);
-
-  InNewSpace(object, value, equal, &done, Label::kNear);
-
-  // The offset is relative to a tagged or untagged HeapObject pointer,
-  // so either offset or offset + kHeapObjectTag must be a
-  // multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize) ||
-         IsAligned(offset + kHeapObjectTag, kPointerSize));
-
-  Register dst = scratch;
-  if (offset != 0) {
-    lea(dst, Operand(object, offset));
-  } else {
-    // Array access: calculate the destination address in the same manner as
-    // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
-    // into an array of words.
-    STATIC_ASSERT(kSmiTagSize == 1);
-    STATIC_ASSERT(kSmiTag == 0);
-    lea(dst, Operand(object, dst, times_half_pointer_size,
-                     FixedArray::kHeaderSize - kHeapObjectTag));
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done, Label::kNear);
   }
-  RecordWriteHelper(object, dst, value);
+
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
+
+  lea(dst, FieldOperand(object, offset));
+  if (emit_debug_code()) {
+    Label ok;
+    test_b(dst, (1 << kPointerSizeLog2) - 1);
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Immediate(BitCast<int32_t>(kZapValue)));
     mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
   }
 }
 
 
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register value) {
+                                 Register value,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
+  ASSERT(!object.is(value));
+  ASSERT(!object.is(address));
+  ASSERT(!value.is(address));
+  if (emit_debug_code()) {
+    AbortIfSmi(object);
+  }
+
+  if (remembered_set_action == OMIT_REMEMBERED_SET &&
+      !FLAG_incremental_marking) {
+    return;
+  }
+
+  if (FLAG_debug_code) {
+    Label ok;
+    cmp(value, Operand(address, 0));
+    j(equal, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
   Label done;
 
-  // Skip barrier if writing a smi.
-  STATIC_ASSERT(kSmiTag == 0);
-  JumpIfSmi(value, &done, Label::kNear);
+  if (smi_check == INLINE_SMI_CHECK) {
+    // Skip barrier if writing a smi.
+    JumpIfSmi(value, &done, Label::kNear);
+  }
 
-  InNewSpace(object, value, equal, &done);
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
 
-  RecordWriteHelper(object, address, value);
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Immediate(BitCast<int32_t>(kZapValue)));
     mov(address, Immediate(BitCast<int32_t>(kZapValue)));
     mov(value, Immediate(BitCast<int32_t>(kZapValue)));
   }
@@ -224,7 +311,7 @@
 
 void MacroAssembler::Set(Register dst, const Immediate& x) {
   if (x.is_zero()) {
-    xor_(dst, Operand(dst));  // Shorter than mov.
+    xor_(dst, dst);  // Shorter than mov.
   } else {
     mov(dst, x);
   }
@@ -265,7 +352,15 @@
 
 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
   // see ROOT_ACCESSOR macro in factory.h
-  Handle<Object> value(&isolate()->heap()->roots_address()[index]);
+  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+  cmp(with, value);
+}
+
+
+void MacroAssembler::CompareRoot(const Operand& with,
+                                 Heap::RootListIndex index) {
+  // see ROOT_ACCESSOR macro in factory.h
+  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
   cmp(with, value);
 }
 
@@ -287,22 +382,153 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Label* fail,
                                        Label::Distance distance) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
        Map::kMaximumBitField2FastElementValue);
   j(above, fail, distance);
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Label* fail,
+                                             Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Map::kMaximumBitField2FastSmiOnlyElementValue);
+  j(below_equal, fail, distance);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Map::kMaximumBitField2FastElementValue);
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Label* fail,
+                                              Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Map::kMaximumBitField2FastSmiOnlyElementValue);
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+    Register maybe_number,
+    Register elements,
+    Register key,
+    Register scratch1,
+    XMMRegister scratch2,
+    Label* fail,
+    bool specialize_for_processor) {
+  Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
+  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+  CheckMap(maybe_number,
+           isolate()->factory()->heap_number_map(),
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Double value, canonicalize NaN.
+  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+  cmp(FieldOperand(maybe_number, offset),
+      Immediate(kNaNOrInfinityLowerBoundUpper32));
+  j(greater_equal, &maybe_nan, Label::kNear);
+
+  bind(&not_nan);
+  ExternalReference canonical_nan_reference =
+      ExternalReference::address_of_canonical_non_hole_nan();
+  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+    bind(&have_double_value);
+    movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+           scratch2);
+  } else {
+    fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
+    bind(&have_double_value);
+    fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+  }
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  j(greater, &is_nan, Label::kNear);
+  cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+  j(zero, &not_nan);
+  bind(&is_nan);
+  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+  } else {
+    fld_d(Operand::StaticVariable(canonical_nan_reference));
+  }
+  jmp(&have_double_value, Label::kNear);
+
+  bind(&smi_value);
+  // Value is a smi. Convert to a double and store.
+  // Preserve original value.
+  mov(scratch1, maybe_number);
+  SmiUntag(scratch1);
+  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+    CpuFeatures::Scope fscope(SSE2);
+    cvtsi2sd(scratch2, scratch1);
+    movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+           scratch2);
+  } else {
+    push(scratch1);
+    fild_s(Operand(esp, 0));
+    pop(scratch1);
+    fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::CompareMap(Register obj,
+                                Handle<Map> map,
+                                Label* early_success,
+                                CompareMapMode mode) {
+  cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+    Map* transitioned_fast_element_map(
+        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+    ASSERT(transitioned_fast_element_map == NULL ||
+           map->elements_kind() != FAST_ELEMENTS);
+    if (transitioned_fast_element_map != NULL) {
+      j(equal, early_success, Label::kNear);
+      cmp(FieldOperand(obj, HeapObject::kMapOffset),
+          Handle<Map>(transitioned_fast_element_map));
+    }
+
+    Map* transitioned_double_map(
+        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+    ASSERT(transitioned_double_map == NULL ||
+           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+    if (transitioned_double_map != NULL) {
+      j(equal, early_success, Label::kNear);
+      cmp(FieldOperand(obj, HeapObject::kMapOffset),
+          Handle<Map>(transitioned_double_map));
+    }
+  }
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
-                              SmiCheckType smi_check_type) {
+                              SmiCheckType smi_check_type,
+                              CompareMapMode mode) {
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
-  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+
+  Label success;
+  CompareMap(obj, map, &success, mode);
   j(not_equal, fail);
+  bind(&success);
 }
 
 
@@ -345,7 +571,7 @@
                                             Register scratch,
                                             Label* fail) {
   movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
-  sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   cmp(scratch,
       LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   j(above, fail);
@@ -355,8 +581,7 @@
 void MacroAssembler::FCmp() {
   if (CpuFeatures::IsSupported(CMOV)) {
     fucomip();
-    ffree(0);
-    fincstp();
+    fstp(0);
   } else {
     fucompp();
     push(eax);
@@ -402,7 +627,7 @@
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
-  mov(ebp, Operand(esp));
+  mov(ebp, esp);
   push(esi);
   push(Immediate(Smi::FromInt(type)));
   push(Immediate(CodeObject()));
@@ -424,12 +649,12 @@
 
 
 void MacroAssembler::EnterExitFramePrologue() {
-  // Setup the frame structure on the stack.
+  // Set up the frame structure on the stack.
   ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
   push(ebp);
-  mov(ebp, Operand(esp));
+  mov(ebp, esp);
 
   // Reserve room for entry stack pointer and push the code object.
   ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
@@ -451,14 +676,14 @@
   if (save_doubles) {
     CpuFeatures::Scope scope(SSE2);
     int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
-    sub(Operand(esp), Immediate(space));
+    sub(esp, Immediate(space));
     const int offset = -2 * kPointerSize;
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
       movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
     }
   } else {
-    sub(Operand(esp), Immediate(argc * kPointerSize));
+    sub(esp, Immediate(argc * kPointerSize));
   }
 
   // Get the required frame alignment for the OS.
@@ -476,9 +701,9 @@
 void MacroAssembler::EnterExitFrame(bool save_doubles) {
   EnterExitFramePrologue();
 
-  // Setup argc and argv in callee-saved registers.
+  // Set up argc and argv in callee-saved registers.
   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
-  mov(edi, Operand(eax));
+  mov(edi, eax);
   lea(esi, Operand(ebp, eax, times_4, offset));
 
   // Reserve space for argc, argv and isolate.
@@ -532,55 +757,68 @@
 
 
 void MacroAssembler::LeaveApiExitFrame() {
-  mov(esp, Operand(ebp));
+  mov(esp, ebp);
   pop(ebp);
 
   LeaveExitFrameEpilogue();
 }
 
 
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
-                                    HandlerType type) {
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+                                    int handler_index) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // The pc (return address) is already on TOS.
-  if (try_location == IN_JAVASCRIPT) {
-    if (type == TRY_CATCH_HANDLER) {
-      push(Immediate(StackHandler::TRY_CATCH));
-    } else {
-      push(Immediate(StackHandler::TRY_FINALLY));
-    }
-    push(ebp);
-    push(esi);
-  } else {
-    ASSERT(try_location == IN_JS_ENTRY);
-    // The frame pointer does not point to a JS frame so we save NULL
-    // for ebp. We expect the code throwing an exception to check ebp
-    // before dereferencing it to restore the context.
-    push(Immediate(StackHandler::ENTRY));
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // We will build up the handler from the bottom by pushing on the stack.
+  // First push the frame pointer and context.
+  if (kind == StackHandler::JS_ENTRY) {
+    // The frame pointer does not point to a JS frame so we save NULL for
+    // ebp. We expect the code throwing an exception to check ebp before
+    // dereferencing it to restore the context.
     push(Immediate(0));  // NULL frame pointer.
     push(Immediate(Smi::FromInt(0)));  // No context.
+  } else {
+    push(ebp);
+    push(esi);
   }
-  // Save the current handler as the next handler.
-  push(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
-                                                 isolate())));
-  // Link this handler as the new current one.
-  mov(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
-                                                isolate())),
-      esp);
+  // Push the state and the code object.
+  unsigned state =
+      StackHandler::IndexField::encode(handler_index) |
+      StackHandler::KindField::encode(kind);
+  push(Immediate(state));
+  Push(CodeObject());
+
+  // Link the current handler as the next handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+  push(Operand::StaticVariable(handler_address));
+  // Set this new handler as the current one.
+  mov(Operand::StaticVariable(handler_address), esp);
 }
 
 
 void MacroAssembler::PopTryHandler() {
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
-                                                isolate())));
-  add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+  pop(Operand::StaticVariable(handler_address));
+  add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry() {
+  // Compute the handler entry address and jump to it.  The handler table is
+  // a fixed array of (smi-tagged) code offsets.
+  // eax = exception, edi = code object, edx = state.
+  mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
+  shr(edx, StackHandler::kKindWidth);
+  mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
+  SmiUntag(edx);
+  lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
+  jmp(edi);
 }
 
 
@@ -588,99 +826,83 @@
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // eax must hold the exception.
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in eax.
   if (!value.is(eax)) {
     mov(eax, value);
   }
-
-  // Drop the sp to the top of the handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress,
-                                    isolate());
+  // Drop the stack pointer to the top of the top handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   mov(esp, Operand::StaticVariable(handler_address));
-
-  // Restore next handler, context, and frame pointer; discard handler state.
+  // Restore the next handler.
   pop(Operand::StaticVariable(handler_address));
+
+  // Remove the code object and state, compute the handler address in edi.
+  pop(edi);  // Code object.
+  pop(edx);  // Index and state.
+
+  // Restore the context and frame pointer.
   pop(esi);  // Context.
   pop(ebp);  // Frame pointer.
-  pop(edx);  // State.
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any
-  // of them.
+  // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
+  // ebp or esi.
   Label skip;
-  cmp(Operand(edx), Immediate(StackHandler::ENTRY));
-  j(equal, &skip, Label::kNear);
+  test(esi, esi);
+  j(zero, &skip, Label::kNear);
   mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
   bind(&skip);
 
-  ret(0);
+  JumpToHandlerEntry();
 }
 
 
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
-                                      Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // eax must hold the exception.
+  // The exception is expected in eax.
   if (!value.is(eax)) {
     mov(eax, value);
   }
-
-  // Drop sp to the top stack handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress,
-                                    isolate());
+  // Drop the stack pointer to the top of the top stack handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   mov(esp, Operand::StaticVariable(handler_address));
 
-  // Unwind the handlers until the ENTRY handler is found.
-  Label loop, done;
-  bind(&loop);
-  // Load the type of the current stack handler.
-  const int kStateOffset = StackHandlerConstants::kStateOffset;
-  cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
-  j(equal, &done, Label::kNear);
-  // Fetch the next handler in the list.
-  const int kNextOffset = StackHandlerConstants::kNextOffset;
-  mov(esp, Operand(esp, kNextOffset));
-  jmp(&loop);
-  bind(&done);
+  // Unwind the handlers until the top ENTRY handler is found.
+  Label fetch_next, check_kind;
+  jmp(&check_kind, Label::kNear);
+  bind(&fetch_next);
+  mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
 
-  // Set the top handler address to next handler past the current ENTRY handler.
+  bind(&check_kind);
+  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+  test(Operand(esp, StackHandlerConstants::kStateOffset),
+       Immediate(StackHandler::KindField::kMask));
+  j(not_zero, &fetch_next);
+
+  // Set the top handler address to next handler past the top ENTRY handler.
   pop(Operand::StaticVariable(handler_address));
 
-  if (type == OUT_OF_MEMORY) {
-    // Set external caught exception to false.
-    ExternalReference external_caught(
-        Isolate::kExternalCaughtExceptionAddress,
-        isolate());
-    mov(eax, false);
-    mov(Operand::StaticVariable(external_caught), eax);
+  // Remove the code object and state, compute the handler address in edi.
+  pop(edi);  // Code object.
+  pop(edx);  // Index and state.
 
-    // Set pending exception and eax to out of memory exception.
-    ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
-                                        isolate());
-    mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
-    mov(Operand::StaticVariable(pending_exception), eax);
-  }
-
-  // Discard the context saved in the handler and clear the context pointer.
-  pop(edx);
-  Set(esi, Immediate(0));
-
-  // Restore fp from handler and discard handler state.
+  // Clear the context pointer and frame pointer (0 was saved in the handler).
+  pop(esi);
   pop(ebp);
-  pop(edx);  // State.
 
-  ret(0);
+  JumpToHandlerEntry();
 }
 
 
@@ -696,7 +918,7 @@
 
   // When generating debug code, make sure the lexical context is set.
   if (emit_debug_code()) {
-    cmp(Operand(scratch), Immediate(0));
+    cmp(scratch, Immediate(0));
     Check(not_equal, "we should not have an empty lexical context");
   }
   // Load the global context of the current context.
@@ -759,40 +981,39 @@
 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
   // Xor original key with a seed.
   if (Serializer::enabled()) {
-    ExternalReference roots_address =
-        ExternalReference::roots_address(isolate());
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(isolate());
     mov(scratch, Immediate(Heap::kHashSeedRootIndex));
-    mov(scratch, Operand::StaticArray(scratch,
-                                      times_pointer_size,
-                                      roots_address));
+    mov(scratch,
+        Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
     SmiUntag(scratch);
-    xor_(r0, Operand(scratch));
+    xor_(r0, scratch);
   } else {
     int32_t seed = isolate()->heap()->HashSeed();
-    xor_(r0, seed);
+    xor_(r0, Immediate(seed));
   }
 
   // hash = ~hash + (hash << 15);
   mov(scratch, r0);
   not_(r0);
   shl(scratch, 15);
-  add(r0, Operand(scratch));
+  add(r0, scratch);
   // hash = hash ^ (hash >> 12);
   mov(scratch, r0);
   shr(scratch, 12);
-  xor_(r0, Operand(scratch));
+  xor_(r0, scratch);
   // hash = hash + (hash << 2);
   lea(r0, Operand(r0, r0, times_4, 0));
   // hash = hash ^ (hash >> 4);
   mov(scratch, r0);
   shr(scratch, 4);
-  xor_(r0, Operand(scratch));
+  xor_(r0, scratch);
   // hash = hash * 2057;
   imul(r0, r0, 2057);
   // hash = hash ^ (hash >> 16);
   mov(scratch, r0);
   shr(scratch, 16);
-  xor_(r0, Operand(scratch));
+  xor_(r0, scratch);
 }
 
 
@@ -836,9 +1057,9 @@
     mov(r2, r0);
     // Compute the masked index: (hash + i + i * i) & mask.
     if (i > 0) {
-      add(Operand(r2), Immediate(SeededNumberDictionary::GetProbeOffset(i)));
+      add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
     }
-    and_(r2, Operand(r1));
+    and_(r2, r1);
 
     // Scale the index by multiplying by the entry size.
     ASSERT(SeededNumberDictionary::kEntrySize == 3);
@@ -894,7 +1115,7 @@
   if (scratch.is(no_reg)) {
     mov(result, Operand::StaticVariable(new_space_allocation_top));
   } else {
-    mov(Operand(scratch), Immediate(new_space_allocation_top));
+    mov(scratch, Immediate(new_space_allocation_top));
     mov(result, Operand(scratch, 0));
   }
 }
@@ -953,7 +1174,7 @@
   if (!top_reg.is(result)) {
     mov(top_reg, result);
   }
-  add(Operand(top_reg), Immediate(object_size));
+  add(top_reg, Immediate(object_size));
   j(carry, gc_required);
   cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -964,12 +1185,12 @@
   // Tag result if requested.
   if (top_reg.is(result)) {
     if ((flags & TAG_OBJECT) != 0) {
-      sub(Operand(result), Immediate(object_size - kHeapObjectTag));
+      sub(result, Immediate(object_size - kHeapObjectTag));
     } else {
-      sub(Operand(result), Immediate(object_size));
+      sub(result, Immediate(object_size));
     }
   } else if ((flags & TAG_OBJECT) != 0) {
-    add(Operand(result), Immediate(kHeapObjectTag));
+    add(result, Immediate(kHeapObjectTag));
   }
 }
 
@@ -1007,7 +1228,7 @@
   // We assume that element_count*element_size + header_size does not
   // overflow.
   lea(result_end, Operand(element_count, element_size, header_size));
-  add(result_end, Operand(result));
+  add(result_end, result);
   j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -1052,7 +1273,7 @@
   if (!object_size.is(result_end)) {
     mov(result_end, object_size);
   }
-  add(result_end, Operand(result));
+  add(result_end, result);
   j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -1072,7 +1293,7 @@
       ExternalReference::new_space_allocation_top_address(isolate());
 
   // Make sure the object has no tag before resetting top.
-  and_(Operand(object), Immediate(~kHeapObjectTagMask));
+  and_(object, Immediate(~kHeapObjectTagMask));
 #ifdef DEBUG
   cmp(object, Operand::StaticVariable(new_space_allocation_top));
   Check(below, "Undo allocation of non allocated memory");
@@ -1111,7 +1332,7 @@
   ASSERT(kShortSize == 2);
   // scratch1 = length * 2 + kObjectAlignmentMask.
   lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
-  and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+  and_(scratch1, Immediate(~kObjectAlignmentMask));
 
   // Allocate two byte string in new space.
   AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
@@ -1145,10 +1366,10 @@
   ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   mov(scratch1, length);
   ASSERT(kCharSize == 1);
-  add(Operand(scratch1), Immediate(kObjectAlignmentMask));
-  and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+  add(scratch1, Immediate(kObjectAlignmentMask));
+  and_(scratch1, Immediate(~kObjectAlignmentMask));
 
-  // Allocate ascii string in new space.
+  // Allocate ASCII string in new space.
   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
                      times_1,
                      scratch1,
@@ -1176,7 +1397,7 @@
                                          Label* gc_required) {
   ASSERT(length > 0);
 
-  // Allocate ascii string in new space.
+  // Allocate ASCII string in new space.
   AllocateInNewSpace(SeqAsciiString::SizeFor(length),
                      result,
                      scratch1,
@@ -1280,7 +1501,7 @@
                                Register scratch) {
   Label loop, done, short_string, short_loop;
   // Experimentation shows that the short string loop is faster if length < 10.
-  cmp(Operand(length), Immediate(10));
+  cmp(length, Immediate(10));
   j(less_equal, &short_string);
 
   ASSERT(source.is(esi));
@@ -1295,12 +1516,12 @@
   mov(scratch, ecx);
   shr(ecx, 2);
   rep_movs();
-  and_(Operand(scratch), Immediate(0x3));
-  add(destination, Operand(scratch));
+  and_(scratch, Immediate(0x3));
+  add(destination, scratch);
   jmp(&done);
 
   bind(&short_string);
-  test(length, Operand(length));
+  test(length, length);
   j(zero, &done);
 
   bind(&short_loop);
@@ -1315,13 +1536,40 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  jmp(&entry);
+  bind(&loop);
+  mov(Operand(start_offset, 0), filler);
+  add(start_offset, Immediate(kPointerSize));
+  bind(&entry);
+  cmp(start_offset, end_offset);
+  j(less, &loop);
+}
+
+
+void MacroAssembler::BooleanBitTest(Register object,
+                                    int field_offset,
+                                    int bit_index) {
+  bit_index += kSmiTagSize + kSmiShiftSize;
+  ASSERT(IsPowerOf2(kBitsPerByte));
+  int byte_index = bit_index / kBitsPerByte;
+  int byte_bit_index = bit_index & (kBitsPerByte - 1);
+  test_b(FieldOperand(object, field_offset + byte_index),
+         static_cast<byte>(1 << byte_bit_index));
+}
+
+
+
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
   Label ok;
-  test(result, Operand(result));
+  test(result, result);
   j(not_zero, &ok);
-  test(op, Operand(op));
+  test(op, op);
   j(sign, then_label);
   bind(&ok);
 }
@@ -1333,10 +1581,10 @@
                                       Register scratch,
                                       Label* then_label) {
   Label ok;
-  test(result, Operand(result));
+  test(result, result);
   j(not_zero, &ok);
-  mov(scratch, Operand(op1));
-  or_(scratch, Operand(op2));
+  mov(scratch, op1);
+  or_(scratch, op2);
   j(sign, then_label);
   bind(&ok);
 }
@@ -1345,7 +1593,8 @@
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
-                                             Label* miss) {
+                                             Label* miss,
+                                             bool miss_on_bound_function) {
   // Check that the receiver isn't a smi.
   JumpIfSmi(function, miss);
 
@@ -1353,6 +1602,15 @@
   CmpObjectType(function, JS_FUNCTION_TYPE, result);
   j(not_equal, miss);
 
+  if (miss_on_bound_function) {
+    // If a bound function, go to miss label.
+    mov(scratch,
+        FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
+                   SharedFunctionInfo::kBoundFunction);
+    j(not_zero, miss);
+  }
+
   // Make sure that the function has an instance prototype.
   Label non_instance;
   movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
@@ -1366,7 +1624,7 @@
   // If the prototype or initial map is the hole, don't return it and
   // simply miss the cache instead. This will allow us to allocate a
   // prototype object on-demand in the runtime system.
-  cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
+  cmp(result, Immediate(isolate()->factory()->the_hole_value()));
   j(equal, miss);
 
   // If the function does not have an initial map, we're done.
@@ -1389,48 +1647,32 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
   call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
-  return result;
-}
-
-
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
 }
 
 
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
-    add(Operand(esp), Immediate(num_arguments * kPointerSize));
+    add(esp, Immediate(num_arguments * kPointerSize));
   }
   mov(eax, Immediate(isolate()->factory()->undefined_value()));
 }
@@ -1464,18 +1706,11 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(eax, Immediate(function->nargs));
   mov(ebx, Immediate(ExternalReference(function, isolate())));
-  CEntryStub ces(1);
-  ces.SaveDoubles();
+  CEntryStub ces(1, kSaveFPRegs);
   CallStub(&ces);
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
-                                            int num_arguments) {
-  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // If the expected number of arguments of the runtime function is
@@ -1497,26 +1732,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
-                                            int num_arguments) {
-  if (f->nargs >= 0 && f->nargs != num_arguments) {
-    IllegalOperation(num_arguments);
-    // Since we did not call the stub, there was no allocation failure.
-    // Return some non-failure object.
-    return isolate()->heap()->undefined_value();
-  }
-
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(eax, Immediate(num_arguments));
-  mov(ebx, Immediate(ExternalReference(f, isolate())));
-  CEntryStub ces(1);
-  return TryCallStub(&ces);
-}
-
-
 void MacroAssembler::CallExternalReference(ExternalReference ref,
                                            int num_arguments) {
   mov(eax, Immediate(num_arguments));
@@ -1539,17 +1754,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(eax, Immediate(num_arguments));
-  return TryJumpToExternalReference(ext);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -1559,14 +1763,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
-                                                int num_arguments,
-                                                int result_size) {
-  return TryTailCallExternalReference(
-      ExternalReference(fid, isolate()), num_arguments, result_size);
-}
-
-
 // If true, a Handle<T> returned by value from a function with cdecl calling
 // convention will be returned directly as a value of location_ field in a
 // register eax.
@@ -1615,8 +1811,8 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
-                                                         int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+                                              int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   ExternalReference limit_address =
@@ -1629,8 +1825,8 @@
   mov(edi, Operand::StaticVariable(limit_address));
   add(Operand::StaticVariable(level_address), Immediate(1));
 
-  // Call the api function!
-  call(function->address(), RelocInfo::RUNTIME_ENTRY);
+  // Call the api function.
+  call(function_address, RelocInfo::RUNTIME_ENTRY);
 
   if (!kReturnHandlesDirectly) {
     // PrepareCallApiFunction saved pointer to the output slot into
@@ -1645,7 +1841,7 @@
   Label leave_exit_frame;
 
   // Check if the result handle holds 0.
-  test(eax, Operand(eax));
+  test(eax, eax);
   j(zero, &empty_handle);
   // It was non-zero.  Dereference to get the result value.
   mov(eax, Operand(eax, 0));
@@ -1668,11 +1864,8 @@
   LeaveApiExitFrame();
   ret(stack_space * kPointerSize);
   bind(&promote_scheduled_exception);
-  MaybeObject* result =
-      TryTailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+
   bind(&empty_handle);
   // It was zero; the result is undefined.
   mov(eax, isolate()->factory()->undefined_value());
@@ -1686,11 +1879,9 @@
   mov(edi, eax);
   mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
   mov(eax, Immediate(delete_extensions));
-  call(Operand(eax));
+  call(eax);
   mov(eax, edi);
   jmp(&leave_exit_frame);
-
-  return result;
 }
 
 
@@ -1702,15 +1893,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& ext) {
-  // Set the entry point and jump to the C entry runtime stub.
-  mov(ebx, Immediate(ext));
-  CEntryStub ces(1);
-  return TryTailCallStub(&ces);
-}
-
-
 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
   // This macro takes the dst register to make the code more readable
   // at the call sites. However, the dst register has to be ecx to
@@ -1720,10 +1902,10 @@
   if (call_kind == CALL_AS_FUNCTION) {
     // Set to some non-zero smi by updating the least significant
     // byte.
-    mov_b(Operand(dst), 1 << kSmiTagSize);
+    mov_b(dst, 1 << kSmiTagSize);
   } else {
     // Set to smi zero by clearing the register.
-    xor_(dst, Operand(dst));
+    xor_(dst, dst);
   }
 }
 
@@ -1733,11 +1915,13 @@
                                     Handle<Code> code_constant,
                                     const Operand& code_operand,
                                     Label* done,
+                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     Label::Distance done_near,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
+  *definitely_mismatches = false;
   Label invoke;
   if (expected.is_immediate()) {
     ASSERT(actual.is_immediate());
@@ -1753,6 +1937,7 @@
         // arguments.
         definitely_matches = true;
       } else {
+        *definitely_mismatches = true;
         mov(ebx, expected.immediate());
       }
     }
@@ -1768,7 +1953,7 @@
     } else if (!expected.reg().is(actual.reg())) {
       // Both expected and actual are in (different) registers. This
       // is the case when we invoke functions using call and apply.
-      cmp(expected.reg(), Operand(actual.reg()));
+      cmp(expected.reg(), actual.reg());
       j(equal, &invoke);
       ASSERT(actual.reg().is(eax));
       ASSERT(expected.reg().is(ebx));
@@ -1780,7 +1965,7 @@
         isolate()->builtins()->ArgumentsAdaptorTrampoline();
     if (!code_constant.is_null()) {
       mov(edx, Immediate(code_constant));
-      add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+      add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
     } else if (!code_operand.is_reg(edx)) {
       mov(edx, code_operand);
     }
@@ -1790,7 +1975,9 @@
       SetCallKind(ecx, call_kind);
       call(adaptor, RelocInfo::CODE_TARGET);
       call_wrapper.AfterCall();
-      jmp(done, done_near);
+      if (!*definitely_mismatches) {
+        jmp(done, done_near);
+      }
     } else {
       SetCallKind(ecx, call_kind);
       jmp(adaptor, RelocInfo::CODE_TARGET);
@@ -1806,21 +1993,27 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
+  bool definitely_mismatches = false;
   InvokePrologue(expected, actual, Handle<Code>::null(), code,
-                 &done, flag, Label::kNear, call_wrapper,
-                 call_kind);
-  if (flag == CALL_FUNCTION) {
-    call_wrapper.BeforeCall(CallSize(code));
-    SetCallKind(ecx, call_kind);
-    call(code);
-    call_wrapper.AfterCall();
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(ecx, call_kind);
-    jmp(code);
+                 &done, &definitely_mismatches, flag, Label::kNear,
+                 call_wrapper, call_kind);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      SetCallKind(ecx, call_kind);
+      call(code);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(ecx, call_kind);
+      jmp(code);
+    }
+    bind(&done);
   }
-  bind(&done);
 }
 
 
@@ -1831,21 +2024,27 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
-  Operand dummy(eax);
-  InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
-                 call_wrapper, call_kind);
-  if (flag == CALL_FUNCTION) {
-    call_wrapper.BeforeCall(CallSize(code, rmode));
-    SetCallKind(ecx, call_kind);
-    call(code, rmode);
-    call_wrapper.AfterCall();
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(ecx, call_kind);
-    jmp(code, rmode);
+  Operand dummy(eax, 0);
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
+                 flag, Label::kNear, call_wrapper, call_kind);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code, rmode));
+      SetCallKind(ecx, call_kind);
+      call(code, rmode);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(ecx, call_kind);
+      jmp(code, rmode);
+    }
+    bind(&done);
   }
-  bind(&done);
 }
 
 
@@ -1854,6 +2053,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   ASSERT(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -1866,36 +2068,32 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  ASSERT(function->is_compiled());
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   // Get the function and setup the context.
-  mov(edi, Immediate(Handle<JSFunction>(function)));
+  LoadHeapObject(edi, function);
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   ParameterCount expected(function->shared()->formal_parameter_count());
-  if (V8::UseCrankshaft()) {
-    // TODO(kasperl): For now, we always call indirectly through the
-    // code field in the function to allow recompilation to take effect
-    // without changing any of the call sites.
-    InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-               expected, actual, flag, call_wrapper, call_kind);
-  } else {
-    Handle<Code> code(function->code());
-    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
-               flag, call_wrapper, call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+             expected, actual, flag, call_wrapper, call_kind);
 }
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
-  // Calls are not allowed in some stubs.
-  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -1906,6 +2104,7 @@
              expected, expected, flag, call_wrapper, CALL_AS_METHOD);
 }
 
+
 void MacroAssembler::GetBuiltinFunction(Register target,
                                         Builtins::JavaScript id) {
   // Load the JavaScript builtin function from the builtins object.
@@ -1915,6 +2114,7 @@
                            JSBuiltinsObject::OffsetOfFunctionWithId(id)));
 }
 
+
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
   ASSERT(!target.is(edi));
   // Load the JavaScript builtin function from the builtins object.
@@ -1950,6 +2150,46 @@
 }
 
 
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+    ElementsKind expected_kind,
+    ElementsKind transitioned_kind,
+    Register map_in_out,
+    Register scratch,
+    Label* no_map_match) {
+  // Load the global or builtins object from the current context.
+  mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+  // Check that the function's map is the same as the expected cached map.
+  int expected_index =
+      Context::GetContextMapIndexFromElementsKind(expected_kind);
+  cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
+  j(not_equal, no_map_match);
+
+  // Use the transitioned cached map.
+  int trans_index =
+      Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+  mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(
+    Register function_in, Register scratch, Register map_out) {
+  ASSERT(!function_in.is(map_out));
+  Label done;
+  mov(map_out, FieldOperand(function_in,
+                            JSFunction::kPrototypeOrInitialMapOffset));
+  if (!FLAG_smi_only_arrays) {
+    LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                        FAST_ELEMENTS,
+                                        map_out,
+                                        scratch,
+                                        &done);
+  }
+  bind(&done);
+}
+
+
 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   // Load the global or builtins object from the current context.
   mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -2006,6 +2246,29 @@
 }
 
 
+void MacroAssembler::LoadHeapObject(Register result,
+                                    Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    mov(result, Operand::Cell(cell));
+  } else {
+    mov(result, object);
+  }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    push(Operand::Cell(cell));
+  } else {
+    Push(object);
+  }
+}
+
+
 void MacroAssembler::Ret() {
   ret(0);
 }
@@ -2016,7 +2279,7 @@
     ret(bytes_dropped);
   } else {
     pop(scratch);
-    add(Operand(esp), Immediate(bytes_dropped));
+    add(esp, Immediate(bytes_dropped));
     push(scratch);
     ret(0);
   }
@@ -2025,7 +2288,7 @@
 
 void MacroAssembler::Drop(int stack_elements) {
   if (stack_elements > 0) {
-    add(Operand(esp), Immediate(stack_elements * kPointerSize));
+    add(esp, Immediate(stack_elements * kPointerSize));
   }
 }
 
@@ -2037,11 +2300,6 @@
 }
 
 
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
-  mov(dst, value);
-}
-
-
 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   if (FLAG_native_code_counters && counter->Enabled()) {
     mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
@@ -2168,13 +2426,19 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
 
   push(eax);
   push(Immediate(p0));
   push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
-  CallRuntime(Runtime::kAbort, 2);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
   // will not return here
   int3();
 }
@@ -2197,7 +2461,7 @@
   ASSERT(is_uintn(power + HeapNumber::kExponentBias,
                   HeapNumber::kExponentBits));
   mov(scratch, Immediate(power + HeapNumber::kExponentBias));
-  movd(dst, Operand(scratch));
+  movd(dst, scratch);
   psllq(dst, HeapNumber::kMantissaBits);
 }
 
@@ -2223,8 +2487,8 @@
                                                          Label* failure) {
   // Check that both objects are not smis.
   STATIC_ASSERT(kSmiTag == 0);
-  mov(scratch1, Operand(object1));
-  and_(scratch1, Operand(object2));
+  mov(scratch1, object1);
+  and_(scratch1, object2);
   JumpIfSmi(scratch1, failure);
 
   // Load instance type for both strings.
@@ -2233,7 +2497,7 @@
   movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
 
-  // Check that both are flat ascii strings.
+  // Check that both are flat ASCII strings.
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
@@ -2253,12 +2517,12 @@
     // Make stack end at alignment and make room for num_arguments words
     // and the original value of esp.
     mov(scratch, esp);
-    sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
+    sub(esp, Immediate((num_arguments + 1) * kPointerSize));
     ASSERT(IsPowerOf2(frame_alignment));
     and_(esp, -frame_alignment);
     mov(Operand(esp, num_arguments * kPointerSize), scratch);
   } else {
-    sub(Operand(esp), Immediate(num_arguments * kPointerSize));
+    sub(esp, Immediate(num_arguments * kPointerSize));
   }
 }
 
@@ -2266,27 +2530,39 @@
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
   // Trashing eax is ok as it will be the return value.
-  mov(Operand(eax), Immediate(function));
+  mov(eax, Immediate(function));
   CallCFunction(eax, num_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
                                    int num_arguments) {
+  ASSERT(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
   }
 
-  call(Operand(function));
+  call(function);
   if (OS::ActivationFrameAlignment() != 0) {
     mov(esp, Operand(esp, num_arguments * kPointerSize));
   } else {
-    add(Operand(esp), Immediate(num_arguments * kPointerSize));
+    add(esp, Immediate(num_arguments * kPointerSize));
   }
 }
 
 
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
@@ -2308,6 +2584,238 @@
 }
 
 
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met,
+    Label::Distance condition_met_distance) {
+  ASSERT(cc == zero || cc == not_zero);
+  if (scratch.is(object)) {
+    and_(scratch, Immediate(~Page::kPageAlignmentMask));
+  } else {
+    mov(scratch, Immediate(~Page::kPageAlignmentMask));
+    and_(scratch, object);
+  }
+  if (mask < (1 << kBitsPerByte)) {
+    test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+           static_cast<uint8_t>(mask));
+  } else {
+    test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+  }
+  j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register scratch0,
+                                 Register scratch1,
+                                 Label* on_black,
+                                 Label::Distance on_black_near) {
+  HasColor(object, scratch0, scratch1,
+           on_black, on_black_near,
+           1, 0);  // kBlackBitPattern.
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+                              Register bitmap_scratch,
+                              Register mask_scratch,
+                              Label* has_color,
+                              Label::Distance has_color_distance,
+                              int first_bit,
+                              int second_bit) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color, word_boundary;
+  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
+  add(mask_scratch, mask_scratch);  // Shift left 1 by adding.
+  j(zero, &word_boundary, Label::kNear);
+  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+  jmp(&other_color, Label::kNear);
+
+  bind(&word_boundary);
+  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
+
+  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+  bind(&other_color);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
+  mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+  and_(bitmap_reg, addr_reg);
+  mov(ecx, addr_reg);
+  int shift =
+      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+  shr(ecx, shift);
+  and_(ecx,
+       (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
+
+  add(bitmap_reg, ecx);
+  mov(ecx, addr_reg);
+  shr(ecx, kPointerSizeLog2);
+  and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
+  mov(mask_reg, Immediate(1));
+  shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Label* value_is_white_and_not_data,
+    Label::Distance distance) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  j(not_zero, &done, Label::kNear);
+
+  if (FLAG_debug_code) {
+    // Check for impossible bit pattern.
+    Label ok;
+    push(mask_scratch);
+    // shl.  May overflow making the check conservative.
+    add(mask_scratch, mask_scratch);
+    test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+    pop(mask_scratch);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = ecx;  // Holds map while checking type.
+  Register length = ecx;  // Holds length of object after checking type.
+  Label not_heap_number;
+  Label is_data_object;
+
+  // Check for heap-number
+  mov(map, FieldOperand(value, HeapObject::kMapOffset));
+  cmp(map, FACTORY->heap_number_map());
+  j(not_equal, &not_heap_number, Label::kNear);
+  mov(length, Immediate(HeapNumber::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_heap_number);
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = ecx;
+  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+  test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
+  j(not_zero, value_is_white_and_not_data);
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  Label not_external;
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  test_b(instance_type, kExternalStringTag);
+  j(zero, &not_external, Label::kNear);
+  mov(length, Immediate(ExternalString::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_external);
+  // Sequential string, either ASCII or UC16.
+  ASSERT(kAsciiStringTag == 0x04);
+  and_(length, Immediate(kStringEncodingMask));
+  xor_(length, Immediate(kStringEncodingMask));
+  add(length, Immediate(0x04));
+  // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
+  // by 2. If we multiply the string length as smi by this, it still
+  // won't overflow a 32-bit value.
+  ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
+  ASSERT(SeqAsciiString::kMaxSize <=
+         static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
+  imul(length, FieldOperand(value, String::kLengthOffset));
+  shr(length, 2 + kSmiTagSize + kSmiShiftSize);
+  add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+  and_(length, Immediate(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+  add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
+      length);
+  if (FLAG_debug_code) {
+    mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+    cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
+    Check(less_equal, "Live Bytes Count overflow chunk size");
+  }
+
+  bind(&done);
+}
+
+
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+  Label next;
+  mov(ecx, eax);
+  bind(&next);
+
+  // Check that there are no elements.  Register ecx contains the
+  // current JS object we've reached through the prototype chain.
+  cmp(FieldOperand(ecx, JSObject::kElementsOffset),
+      isolate()->factory()->empty_fixed_array());
+  j(not_equal, call_runtime);
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in ebx for the subsequent
+  // prototype load.
+  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+  mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
+  JumpIfSmi(edx, call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (edx).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
+  JumpIfSmi(edx, call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  cmp(ecx, eax);
+  j(equal, &check_prototype, Label::kNear);
+  mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  cmp(edx, isolate()->factory()->empty_fixed_array());
+  j(not_equal, call_runtime);
+
+  // Load the prototype from the map and loop if non-null.
+  bind(&check_prototype);
+  mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+  cmp(ecx, isolate()->factory()->null_value());
+  j(not_equal, &next);
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 8c5f5e9..66d1ce7 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
 
 #include "assembler.h"
+#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -50,6 +51,13 @@
 // distinguish memory operands from other operands on ia32.
 typedef Operand MemOperand;
 
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -61,42 +69,130 @@
 
   // ---------------------------------------------------------------------------
   // GC Support
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
 
-  // For page containing |object| mark region covering |addr| dirty.
-  // RecordWriteHelper only works if the object is not in new
-  // space.
-  void RecordWriteHelper(Register object,
-                         Register addr,
-                         Register scratch);
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
 
-  // Check if object is in new space.
-  // scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,  // equal for new space, not_equal otherwise.
-                  Label* branch,
-                  Label::Distance branch_near = Label::kFar);
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met,
+                     Label::Distance condition_met_distance = Label::kFar);
 
-  // For page containing |object| mark region covering [object+offset]
-  // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. If offset is zero, then the scratch register
-  // contains the array index into the elements array represented as a
-  // Smi. All registers are clobbered by the operation. RecordWrite
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch,
+                           Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, zero, branch, distance);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch,
+                        Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, not_zero, branch, distance);
+  }
+
+  // Check if an object has a given incremental marking color.  Also uses ecx!
+  void HasColor(Register object,
+                Register scratch0,
+                Register scratch1,
+                Label* has_color,
+                Label::Distance has_color_distance,
+                int first_bit,
+                int second_bit);
+
+  void JumpIfBlack(Register object,
+                   Register scratch0,
+                   Register scratch1,
+                   Label* on_black,
+                   Label::Distance on_black_distance = Label::kFar);
+
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Label* object_is_white_and_not_data,
+                      Label::Distance distance);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // Operand(reg, off).
+  void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // Notify the garbage collector that we wrote a pointer into a fixed array.
+  // |array| is the array being stored into, |value| is the
+  // object being stored.  |index| is the array index represented as a
+  // Smi. All registers are clobbered by the operation RecordWriteArray
   // filters out smis so it does not update the write barrier if the
   // value is a smi.
-  void RecordWrite(Register object,
-                   int offset,
-                   Register value,
-                   Register scratch);
+  void RecordWriteArray(
+      Register array,
+      Register value,
+      Register index,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
   // For page containing |object| mark region covering |address|
   // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. All registers are clobbered by the
+  // object being stored. The address and value registers are clobbered by the
   // operation. RecordWrite filters out smis so it does not update the
   // write barrier if the value is a smi.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register value);
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
@@ -105,15 +201,6 @@
   void DebugBreak();
 #endif
 
-  // ---------------------------------------------------------------------------
-  // Activation frames
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter specific kind of exit frame. Expects the number of
   // arguments in register eax and sets up the number of arguments in
   // register edi and the pointer to the first argument in register
@@ -134,6 +221,22 @@
   // Find the function context up the context chain.
   void LoadContext(Register dst, int context_chain_length);
 
+  // Conditionally load the cached Array transitioned map of type
+  // transitioned_kind from the global context if the map in register
+  // map_in_out is the cached Array map in the global context of
+  // expected_kind.
+  void LoadTransitionedArrayMapConditional(
+      ElementsKind expected_kind,
+      ElementsKind transitioned_kind,
+      Register map_in_out,
+      Register scratch,
+      Label* no_map_match);
+
+  // Load the initial map for new Arrays from a JSFunction.
+  void LoadInitialArrayMap(Register function_in,
+                           Register scratch,
+                           Register map_out);
+
   // Load the global function with the given index.
   void LoadGlobalFunction(int index, Register function);
 
@@ -150,15 +253,35 @@
   void StoreToSafepointRegisterSlot(Register dst, Immediate src);
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
+  void PushHeapObject(Handle<HeapObject> object);
+
+  void LoadObject(Register result, Handle<Object> object) {
+    if (object->IsHeapObject()) {
+      LoadHeapObject(result, Handle<HeapObject>::cast(object));
+    } else {
+      Set(result, Immediate(object));
+    }
+  }
+
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
-  // Setup call kind marking in ecx. The method takes ecx as an
+  // Set up call kind marking in ecx. The method takes ecx as an
   // explicit first parameter to make the code more readable at the
   // call sites.
   void SetCallKind(Register dst, CallKind kind);
 
   // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(Register code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag,
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind) {
+    InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
+  }
+
   void InvokeCode(const Operand& code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
@@ -182,7 +305,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
@@ -209,8 +332,9 @@
   void SafeSet(Register dst, const Immediate& x);
   void SafePush(const Immediate& x);
 
-  // Compare a register against a known root, e.g. undefined, null, true, ...
+  // Compare against a known root, e.g. undefined, null, true, ...
   void CompareRoot(Register with, Heap::RootListIndex index);
+  void CompareRoot(const Operand& with, Heap::RootListIndex index);
 
   // Compare object type for heap object.
   // Incoming register is heap_object and outgoing register is map.
@@ -225,13 +349,47 @@
                          Label* fail,
                          Label::Distance distance = Label::kFar);
 
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Label* fail,
+                               Label::Distance distance = Label::kFar);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Label* fail,
+                                Label::Distance distance = Label::kFar);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements, otherwise jump to fail.
+  void StoreNumberToDoubleElements(Register maybe_number,
+                                   Register elements,
+                                   Register key,
+                                   Register scratch1,
+                                   XMMRegister scratch2,
+                                   Label* fail,
+                                   bool specialize_for_processor);
+
+  // Compare an object's map with the specified map and its transitioned
+  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
+  // result of map compare. If multiple map compares are required, the compare
+  // sequences branches to early_success.
+  void CompareMap(Register obj,
+                  Handle<Map> map,
+                  Label* early_success,
+                  CompareMapMode mode = REQUIRE_EXACT_MAP);
+
   // Check if the map of an object is equal to a specified map and branch to
   // label if not. Skip the smi check if not required (object is known to be a
-  // heap object)
+  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+  // against maps that are ElementsKind transition maps of the specified map.
   void CheckMap(Register obj,
                 Handle<Map> map,
                 Label* fail,
-                SmiCheckType smi_check_type);
+                SmiCheckType smi_check_type,
+                CompareMapMode mode = REQUIRE_EXACT_MAP);
 
   // Check if the map of an object is equal to a specified map and branch to a
   // specified target if equal. Skip the smi check if not required (object is
@@ -277,7 +435,7 @@
   void SmiTag(Register reg) {
     STATIC_ASSERT(kSmiTag == 0);
     STATIC_ASSERT(kSmiTagSize == 1);
-    add(reg, Operand(reg));
+    add(reg, reg);
   }
   void SmiUntag(Register reg) {
     sar(reg, kSmiTagSize);
@@ -332,17 +490,17 @@
   // ---------------------------------------------------------------------------
   // Exception handling
 
-  // Push a new try handler and link into try handler chain.  The return
-  // address must be pushed before calling this helper.
-  void PushTryHandler(CodeLocation try_location, HandlerType type);
+  // Push a new try handler and link it into try handler chain.
+  void PushTryHandler(StackHandler::Kind kind, int handler_index);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   void PopTryHandler();
 
-  // Activate the top handler in the try hander chain.
+  // Throw to the top handler in the try hander chain.
   void Throw(Register value);
 
-  void ThrowUncatchable(UncatchableExceptionType type, Register value);
+  // Throw past all JS frames to the top JS entry frame.
+  void ThrowUncatchable(Register value);
 
   // ---------------------------------------------------------------------------
   // Inline caching support
@@ -466,9 +624,19 @@
                  Register length,
                  Register scratch);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
+  // Check a boolean-bit of a Smi field.
+  void BooleanBitTest(Register object, int field_offset, int bit_index);
+
   // Check if result is zero and op is negative.
   void NegativeZeroTest(Register result, Register op, Label* then_label);
 
@@ -485,7 +653,8 @@
   void TryGetFunctionPrototype(Register function,
                                Register result,
                                Register scratch,
-                               Label* miss);
+                               Label* miss,
+                               bool miss_on_bound_function = false);
 
   // Generates code for reporting that an illegal operation has
   // occurred.
@@ -503,19 +672,9 @@
   // Call a code stub.  Generate the code if necessary.
   void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
   // Tail call a code stub (jump).  Generate the code if necessary.
   void TailCallStub(CodeStub* stub);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -523,19 +682,9 @@
   void CallRuntime(const Runtime::Function* f, int num_arguments);
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
-  // Call a runtime function, returning the CodeStub object called.
-  // Try to generate the stub code if necessary.  Do not perform a GC
-  // but instead return a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
-                                              int num_arguments);
-
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
-  // Convenience function: Same as above, but takes the fid instead.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
-                                              int num_arguments);
-
   // Convenience function: call an external reference.
   void CallExternalReference(ExternalReference ref, int num_arguments);
 
@@ -546,23 +695,11 @@
                                  int num_arguments,
                                  int result_size);
 
-  // Tail call of a runtime routine (jump). Try to generate the code if
-  // necessary. Do not perform a GC but instead return a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
-  // Convenience function: tail call a runtime routine (jump). Try to generate
-  // the code if necessary. Do not perform a GC but instead return a retry after
-  // GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
-                                                  int num_arguments,
-                                                  int result_size);
-
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, arguments must be stored in esp[0], esp[4],
   // etc., not pushed. The argument count assumes all arguments are word sized.
@@ -587,19 +724,15 @@
   // stores the pointer to the reserved slot into esi.
   void PrepareCallApiFunction(int argc);
 
-  // Calls an API function. Allocates HandleScope, extracts
-  // returned value from handle and propagates exceptions.
-  // Clobbers ebx, edi and caller-save registers. Restores context.
-  // On return removes stack_space * kPointerSize (GCed).
-  MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
-                                           int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Clobbers ebx, edi and
+  // caller-save registers.  Restores context.  On return removes
+  // stack_space * kPointerSize (GCed).
+  void CallApiFunctionAndReturn(Address function_address, int stack_space);
 
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& ext);
 
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
-
   // ---------------------------------------------------------------------------
   // Utilities
 
@@ -624,10 +757,8 @@
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
-  void Move(Register target, Handle<Object> value);
-
   // Push a handle value.
-  void Push(Handle<Object> handle) { push(handle); }
+  void Push(Handle<Object> handle) { push(Immediate(handle)); }
 
   Handle<Object> CodeObject() {
     ASSERT(!code_object_.is_null());
@@ -668,11 +799,14 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   // ---------------------------------------------------------------------------
   // String utilities.
 
-  // Check whether the instance type represents a flat ascii string. Jump to the
+  // Check whether the instance type represents a flat ASCII string. Jump to the
   // label if not. If the instance type can be scratched specify same register
   // for both instance type and scratch.
   void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
@@ -691,9 +825,18 @@
     return SafepointRegisterStackIndex(reg.code());
   }
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
+  // Expects object in eax and returns map with validated enum cache
+  // in eax.  Assumes that any other register can be used as a scratch.
+  void CheckEnumCache(Label* call_runtime);
+
  private:
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -703,15 +846,12 @@
                       Handle<Code> code_constant,
                       const Operand& code_operand,
                       Label* done,
+                      bool* definitely_mismatches,
                       InvokeFlag flag,
-                      Label::Distance done_near = Label::kFar,
+                      Label::Distance done_distance,
                       const CallWrapper& call_wrapper = NullCallWrapper(),
                       CallKind call_kind = CALL_AS_METHOD);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void EnterExitFramePrologue();
   void EnterExitFrameEpilogue(int argc, bool save_doubles);
 
@@ -730,6 +870,24 @@
                                                     Register scratch,
                                                     bool gc_allowed);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,
+                  Label* condition_met,
+                  Label::Distance condition_met_distance = Label::kFar);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Uses ecx as scratch and leaves addr_reg
+  // unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
+
+  // Helper for throwing exceptions.  Compute a handler address and jump to
+  // it.  See the implementation for register usage.
+  void JumpToHandlerEntry();
 
   // Compute memory operands for safepoint stack slots.
   Operand SafepointRegisterSlot(Register reg);
@@ -765,26 +923,26 @@
 // Static helper functions.
 
 // Generate an Operand for loading a field from an object.
-static inline Operand FieldOperand(Register object, int offset) {
+inline Operand FieldOperand(Register object, int offset) {
   return Operand(object, offset - kHeapObjectTag);
 }
 
 
 // Generate an Operand for loading an indexed field from an object.
-static inline Operand FieldOperand(Register object,
-                                   Register index,
-                                   ScaleFactor scale,
-                                   int offset) {
+inline Operand FieldOperand(Register object,
+                            Register index,
+                            ScaleFactor scale,
+                            int offset) {
   return Operand(object, index, scale, offset - kHeapObjectTag);
 }
 
 
-static inline Operand ContextOperand(Register context, int index) {
+inline Operand ContextOperand(Register context, int index) {
   return Operand(context, Context::SlotOffset(index));
 }
 
 
-static inline Operand GlobalObjectOperand() {
+inline Operand GlobalObjectOperand() {
   return ContextOperand(esi, Context::GLOBAL_INDEX);
 }
 
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index d175d9e..04d6b62 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -134,7 +134,7 @@
 
 void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
   if (by != 0) {
-    __ add(Operand(edi), Immediate(by * char_size()));
+    __ add(edi, Immediate(by * char_size()));
   }
 }
 
@@ -152,8 +152,8 @@
   CheckPreemption();
   // Pop Code* offset from backtrack stack, add Code* and jump to location.
   Pop(ebx);
-  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
-  __ jmp(Operand(ebx));
+  __ add(ebx, Immediate(masm_->CodeObject()));
+  __ jmp(ebx);
 }
 
 
@@ -210,7 +210,7 @@
                                                bool check_end_of_string) {
 #ifdef DEBUG
   // If input is ASCII, don't even bother calling here if the string to
-  // match contains a non-ascii character.
+  // match contains a non-ASCII character.
   if (mode_ == ASCII) {
     ASSERT(String::IsAscii(str.start(), str.length()));
   }
@@ -219,7 +219,7 @@
   int byte_offset = cp_offset * char_size();
   if (check_end_of_string) {
     // Check that there are at least str.length() characters left in the input.
-    __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length)));
+    __ cmp(edi, Immediate(-(byte_offset + byte_length)));
     BranchOrBacktrack(greater, on_failure);
   }
 
@@ -288,7 +288,7 @@
   Label fallthrough;
   __ cmp(edi, Operand(backtrack_stackpointer(), 0));
   __ j(not_equal, &fallthrough);
-  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));  // Pop.
+  __ add(backtrack_stackpointer(), Immediate(kPointerSize));  // Pop.
   BranchOrBacktrack(no_condition, on_equal);
   __ bind(&fallthrough);
 }
@@ -300,7 +300,7 @@
   Label fallthrough;
   __ mov(edx, register_location(start_reg));  // Index of start of capture
   __ mov(ebx, register_location(start_reg + 1));  // Index of end of capture
-  __ sub(ebx, Operand(edx));  // Length of capture.
+  __ sub(ebx, edx);  // Length of capture.
 
   // The length of a capture should not be negative. This can only happen
   // if the end of the capture is unrecorded, or at a point earlier than
@@ -320,9 +320,9 @@
     __ push(backtrack_stackpointer());
     // After this, the eax, ecx, and edi registers are available.
 
-    __ add(edx, Operand(esi));  // Start of capture
-    __ add(edi, Operand(esi));  // Start of text to match against capture.
-    __ add(ebx, Operand(edi));  // End of text to match against capture.
+    __ add(edx, esi);  // Start of capture
+    __ add(edi, esi);  // Start of text to match against capture.
+    __ add(ebx, edi);  // End of text to match against capture.
 
     Label loop;
     __ bind(&loop);
@@ -339,15 +339,15 @@
     __ movzx_b(ecx, Operand(edx, 0));
     __ or_(ecx, 0x20);
 
-    __ cmp(eax, Operand(ecx));
+    __ cmp(eax, ecx);
     __ j(not_equal, &fail);
 
     __ bind(&loop_increment);
     // Increment pointers into match and capture strings.
-    __ add(Operand(edx), Immediate(1));
-    __ add(Operand(edi), Immediate(1));
+    __ add(edx, Immediate(1));
+    __ add(edi, Immediate(1));
     // Compare to end of match, and loop if not done.
-    __ cmp(edi, Operand(ebx));
+    __ cmp(edi, ebx);
     __ j(below, &loop);
     __ jmp(&success);
 
@@ -361,9 +361,9 @@
     // Restore original value before continuing.
     __ pop(backtrack_stackpointer());
     // Drop original value of character position.
-    __ add(Operand(esp), Immediate(kPointerSize));
+    __ add(esp, Immediate(kPointerSize));
     // Compute new value of character position after the matched part.
-    __ sub(edi, Operand(esi));
+    __ sub(edi, esi);
   } else {
     ASSERT(mode_ == UC16);
     // Save registers before calling C function.
@@ -389,16 +389,19 @@
     // Set byte_offset2.
     // Found by adding negative string-end offset of current position (edi)
     // to end of string.
-    __ add(edi, Operand(esi));
+    __ add(edi, esi);
     __ mov(Operand(esp, 1 * kPointerSize), edi);
     // Set byte_offset1.
     // Start of capture, where edx already holds string-end negative offset.
-    __ add(edx, Operand(esi));
+    __ add(edx, esi);
     __ mov(Operand(esp, 0 * kPointerSize), edx);
 
-    ExternalReference compare =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-    __ CallCFunction(compare, argument_count);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference compare =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+      __ CallCFunction(compare, argument_count);
+    }
     // Pop original values before reacting on result value.
     __ pop(ebx);
     __ pop(backtrack_stackpointer());
@@ -406,10 +409,10 @@
     __ pop(esi);
 
     // Check if function returned non-zero for success or zero for failure.
-    __ or_(eax, Operand(eax));
+    __ or_(eax, eax);
     BranchOrBacktrack(zero, on_no_match);
     // On success, increment position by length of capture.
-    __ add(edi, Operand(ebx));
+    __ add(edi, ebx);
   }
   __ bind(&fallthrough);
 }
@@ -425,7 +428,7 @@
   // Find length of back-referenced capture.
   __ mov(edx, register_location(start_reg));
   __ mov(eax, register_location(start_reg + 1));
-  __ sub(eax, Operand(edx));  // Length to check.
+  __ sub(eax, edx);  // Length to check.
   // Fail on partial or illegal capture (start of capture after end of capture).
   BranchOrBacktrack(less, on_no_match);
   // Succeed on empty capture (including no capture)
@@ -433,7 +436,7 @@
 
   // Check that there are sufficient characters left in the input.
   __ mov(ebx, edi);
-  __ add(ebx, Operand(eax));
+  __ add(ebx, eax);
   BranchOrBacktrack(greater, on_no_match);
 
   // Save register to make it available below.
@@ -441,7 +444,7 @@
 
   // Compute pointers to match string and capture string
   __ lea(ebx, Operand(esi, edi, times_1, 0));  // Start of match.
-  __ add(edx, Operand(esi));  // Start of capture.
+  __ add(edx, esi);  // Start of capture.
   __ lea(ecx, Operand(eax, ebx, times_1, 0));  // End of match
 
   Label loop;
@@ -456,10 +459,10 @@
   }
   __ j(not_equal, &fail);
   // Increment pointers into capture and match string.
-  __ add(Operand(edx), Immediate(char_size()));
-  __ add(Operand(ebx), Immediate(char_size()));
+  __ add(edx, Immediate(char_size()));
+  __ add(ebx, Immediate(char_size()));
   // Check if we have reached end of match area.
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(below, &loop);
   __ jmp(&success);
 
@@ -471,7 +474,7 @@
   __ bind(&success);
   // Move current character position to position after match.
   __ mov(edi, ecx);
-  __ sub(Operand(edi), esi);
+  __ sub(edi, esi);
   // Restore backtrack stackpointer.
   __ pop(backtrack_stackpointer());
 
@@ -520,7 +523,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUC16CharCode);
+  ASSERT(minus < String::kMaxUtf16CodeUnit);
   __ lea(eax, Operand(current_character(), -minus));
   __ and_(eax, mask);
   __ cmp(eax, c);
@@ -574,17 +577,17 @@
     return true;
   case '.': {
     // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
-    __ mov(Operand(eax), current_character());
-    __ xor_(Operand(eax), Immediate(0x01));
+    __ mov(eax, current_character());
+    __ xor_(eax, Immediate(0x01));
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
-    __ sub(Operand(eax), Immediate(0x0b));
+    __ sub(eax, Immediate(0x0b));
     __ cmp(eax, 0x0c - 0x0b);
     BranchOrBacktrack(below_equal, on_no_match);
     if (mode_ == UC16) {
       // Compare original value to 0x2028 and 0x2029, using the already
       // computed (current_char ^ 0x01 - 0x0b). I.e., check for
       // 0x201d (0x2028 - 0x0b) or 0x201e.
-      __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+      __ sub(eax, Immediate(0x2028 - 0x0b));
       __ cmp(eax, 0x2029 - 0x2028);
       BranchOrBacktrack(below_equal, on_no_match);
     }
@@ -593,7 +596,7 @@
   case 'w': {
     if (mode_ != ASCII) {
       // Table is 128 entries, so all ASCII characters can be tested.
-      __ cmp(Operand(current_character()), Immediate('z'));
+      __ cmp(current_character(), Immediate('z'));
       BranchOrBacktrack(above, on_no_match);
     }
     ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
@@ -607,7 +610,7 @@
     Label done;
     if (mode_ != ASCII) {
       // Table is 128 entries, so all ASCII characters can be tested.
-      __ cmp(Operand(current_character()), Immediate('z'));
+      __ cmp(current_character(), Immediate('z'));
       __ j(above, &done);
     }
     ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
@@ -627,10 +630,10 @@
   case 'n': {
     // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
     // The opposite of '.'.
-    __ mov(Operand(eax), current_character());
-    __ xor_(Operand(eax), Immediate(0x01));
+    __ mov(eax, current_character());
+    __ xor_(eax, Immediate(0x01));
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
-    __ sub(Operand(eax), Immediate(0x0b));
+    __ sub(eax, Immediate(0x0b));
     __ cmp(eax, 0x0c - 0x0b);
     if (mode_ == ASCII) {
       BranchOrBacktrack(above, on_no_match);
@@ -641,7 +644,7 @@
       // Compare original value to 0x2028 and 0x2029, using the already
       // computed (current_char ^ 0x01 - 0x0b). I.e., check for
       // 0x201d (0x2028 - 0x0b) or 0x201e.
-      __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+      __ sub(eax, Immediate(0x2028 - 0x0b));
       __ cmp(eax, 1);
       BranchOrBacktrack(above, on_no_match);
       __ bind(&done);
@@ -668,7 +671,12 @@
 
   // Entry code:
   __ bind(&entry_label_);
-  // Start new stack frame.
+
+  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
+  // code is generated.
+  FrameScope scope(masm_, StackFrame::MANUAL);
+
+  // Actually emit code to start a new stack frame.
   __ push(ebp);
   __ mov(ebp, esp);
   // Save callee-save registers. Order here should correspond to order of
@@ -699,7 +707,7 @@
 
   __ bind(&stack_limit_hit);
   CallCheckStackGuardState(ebx);
-  __ or_(eax, Operand(eax));
+  __ or_(eax, eax);
   // If returned value is non-zero, we exit with the returned value as result.
   __ j(not_zero, &exit_label_);
 
@@ -708,13 +716,13 @@
   __ mov(ebx, Operand(ebp, kStartIndex));
 
   // Allocate space on stack for registers.
-  __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
+  __ sub(esp, Immediate(num_registers_ * kPointerSize));
   // Load string length.
   __ mov(esi, Operand(ebp, kInputEnd));
   // Load input position.
   __ mov(edi, Operand(ebp, kInputStart));
   // Set up edi to be negative offset from string end.
-  __ sub(edi, Operand(esi));
+  __ sub(edi, esi);
 
   // Set eax to address of char before start of the string.
   // (effectively string position -1).
@@ -736,7 +744,7 @@
     Label init_loop;
     __ bind(&init_loop);
     __ mov(Operand(ebp, ecx, times_1, +0), eax);
-    __ sub(Operand(ecx), Immediate(kPointerSize));
+    __ sub(ecx, Immediate(kPointerSize));
     __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
     __ j(greater, &init_loop);
   }
@@ -777,12 +785,12 @@
       if (mode_ == UC16) {
         __ lea(ecx, Operand(ecx, edx, times_2, 0));
       } else {
-        __ add(ecx, Operand(edx));
+        __ add(ecx, edx);
       }
       for (int i = 0; i < num_saved_registers_; i++) {
         __ mov(eax, register_location(i));
         // Convert to index from start of string, not end.
-        __ add(eax, Operand(ecx));
+        __ add(eax, ecx);
         if (mode_ == UC16) {
           __ sar(eax, 1);  // Convert byte index to character index.
         }
@@ -819,7 +827,7 @@
     __ push(edi);
 
     CallCheckStackGuardState(ebx);
-    __ or_(eax, Operand(eax));
+    __ or_(eax, eax);
     // If returning non-zero, we should end execution with the given
     // result as return value.
     __ j(not_zero, &exit_label_);
@@ -854,7 +862,7 @@
     __ CallCFunction(grow_stack, num_arguments);
     // If return NULL, we have failed to grow the stack, and
     // must exit with a stack-overflow exception.
-    __ or_(eax, Operand(eax));
+    __ or_(eax, eax);
     __ j(equal, &exit_with_exception);
     // Otherwise use return value as new stack pointer.
     __ mov(backtrack_stackpointer(), eax);
@@ -1077,7 +1085,7 @@
   ASSERT(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
-  MaybeObject* result = Execution::HandleStackGuardInterrupt();
+  MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
 
   if (*code_handle != re_code) {  // Return address no longer valid
     int delta = code_handle->address() - re_code->address();
@@ -1133,6 +1141,11 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+    // Subject string might have been a ConsString that underwent
+    // short-circuiting during GC. That will not change start_address but
+    // will change pointer inside the subject handle.
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
@@ -1183,8 +1196,8 @@
 
 void RegExpMacroAssemblerIA32::SafeReturn() {
   __ pop(ebx);
-  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
-  __ jmp(Operand(ebx));
+  __ add(ebx, Immediate(masm_->CodeObject()));
+  __ jmp(ebx);
 }
 
 
@@ -1196,14 +1209,14 @@
 void RegExpMacroAssemblerIA32::Push(Register source) {
   ASSERT(!source.is(backtrack_stackpointer()));
   // Notice: This updates flags, unlike normal Push.
-  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
   __ mov(Operand(backtrack_stackpointer(), 0), source);
 }
 
 
 void RegExpMacroAssemblerIA32::Push(Immediate value) {
   // Notice: This updates flags, unlike normal Push.
-  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
   __ mov(Operand(backtrack_stackpointer(), 0), value);
 }
 
@@ -1212,7 +1225,7 @@
   ASSERT(!target.is(backtrack_stackpointer()));
   __ mov(target, Operand(backtrack_stackpointer(), 0));
   // Notice: This updates flags, unlike normal Pop.
-  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ add(backtrack_stackpointer(), Immediate(kPointerSize));
 }
 
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index ab62764..fd26779 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,19 +44,30 @@
                        Code::Flags flags,
                        StubCache::Table table,
                        Register name,
+                       Register receiver,
+                       // Number of the cache entry pointer-size scaled.
                        Register offset,
                        Register extra) {
   ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
   ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
 
   Label miss;
 
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ lea(offset, Operand(offset, offset, times_2, 0));
+
   if (extra.is_valid()) {
     // Get the code entry from the cache.
-    __ mov(extra, Operand::StaticArray(offset, times_2, value_offset));
+    __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
 
     // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+    __ j(not_equal, &miss);
+
+    // Check the map matches.
+    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
     __ j(not_equal, &miss);
 
     // Check that the flags match what we're looking for.
@@ -65,9 +76,17 @@
     __ cmp(offset, flags);
     __ j(not_equal, &miss);
 
+#ifdef DEBUG
+    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+      __ jmp(&miss);
+    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+      __ jmp(&miss);
+    }
+#endif
+
     // Jump to the first instruction in the code stub.
-    __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(Operand(extra));
+    __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(extra);
 
     __ bind(&miss);
   } else {
@@ -75,11 +94,19 @@
     __ push(offset);
 
     // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
     __ j(not_equal, &miss);
 
+    // Check the map matches.
+    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+    __ j(not_equal, &miss);
+
+    // Restore offset register.
+    __ mov(offset, Operand(esp, 0));
+
     // Get the code entry from the cache.
-    __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
 
     // Check that the flags match what we're looking for.
     __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
@@ -87,13 +114,21 @@
     __ cmp(offset, flags);
     __ j(not_equal, &miss);
 
+#ifdef DEBUG
+    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+      __ jmp(&miss);
+    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+      __ jmp(&miss);
+    }
+#endif
+
     // Restore offset and re-load code entry from cache.
     __ pop(offset);
-    __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
 
     // Jump to the first instruction in the code stub.
-    __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(Operand(offset));
+    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(offset);
 
     // Pop at miss.
     __ bind(&miss);
@@ -107,12 +142,12 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static MaybeObject* GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                                     Label* miss_label,
-                                                     Register receiver,
-                                                     String* name,
-                                                     Register r0,
-                                                     Register r1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                             Label* miss_label,
+                                             Register receiver,
+                                             Handle<String> name,
+                                             Register r0,
+                                             Register r1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1);
@@ -142,19 +177,14 @@
   __ j(not_equal, miss_label);
 
   Label done;
-  MaybeObject* result =
-      StringDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                         miss_label,
-                                                         &done,
-                                                         properties,
-                                                         name,
-                                                         r1);
-  if (result->IsFailure()) return result;
-
+  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     properties,
+                                                     name,
+                                                     r1);
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
-
-  return result;
 }
 
 
@@ -164,56 +194,69 @@
                               Register name,
                               Register scratch,
                               Register extra,
-                              Register extra2) {
-  Isolate* isolate = Isolate::Current();
+                              Register extra2,
+                              Register extra3) {
   Label miss;
-  USE(extra2);  // The register extra2 is not used on the ia32 platform.
 
-  // Make sure that code is valid. The shifting code relies on the
-  // entry size being 8.
-  ASSERT(sizeof(Entry) == 8);
+  // Assert that code is valid.  The multiplying code relies on the entry size
+  // being 12.
+  ASSERT(sizeof(Entry) == 12);
 
-  // Make sure the flags does not name a specific type.
+  // Assert the flags do not name a specific type.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
 
-  // Make sure that there are no register conflicts.
+  // Assert that there are no register conflicts.
   ASSERT(!scratch.is(receiver));
   ASSERT(!scratch.is(name));
   ASSERT(!extra.is(receiver));
   ASSERT(!extra.is(name));
   ASSERT(!extra.is(scratch));
 
-  // Check scratch and extra registers are valid, and extra2 is unused.
+  // Assert scratch and extra registers are valid, and extra2/3 are unused.
   ASSERT(!scratch.is(no_reg));
   ASSERT(extra2.is(no_reg));
+  ASSERT(extra3.is(no_reg));
+
+  Register offset = scratch;
+  scratch = no_reg;
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
 
   // Get the map of the receiver and compute the hash.
-  __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
-  __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(scratch, flags);
-  __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+  __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
+  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(offset, flags);
+  // We mask out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.  Also in the two 'and' instructions below.
+  __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+  // ProbeTable expects the offset to be pointer scaled, which it is, because
+  // the heap object tag size is 2 and the pointer size log 2 is also 2.
+  ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra);
+  ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
 
   // Primary miss: Compute hash for secondary probe.
-  __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
-  __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(scratch, flags);
-  __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
-  __ sub(scratch, Operand(name));
-  __ add(Operand(scratch), Immediate(flags));
-  __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
+  __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
+  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(offset, flags);
+  __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+  __ sub(offset, name);
+  __ add(offset, Immediate(flags));
+  __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra);
+  ProbeTable(
+      isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
   __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
 }
 
 
@@ -228,14 +271,17 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   // Check we're still in the same context.
   __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
          masm->isolate()->global());
   __ j(not_equal, miss);
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(masm->isolate()->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(masm->isolate()->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
   // Load the prototype from the initial map.
@@ -318,7 +364,7 @@
                                                  Register scratch2,
                                                  Label* miss_label) {
   __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mov(eax, Operand(scratch1));
+  __ mov(eax, scratch1);
   __ ret(0);
 }
 
@@ -327,8 +373,10 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst, Register src,
-                                            JSObject* holder, int index) {
+                                            Register dst,
+                                            Register src,
+                                            Handle<JSObject> holder,
+                                            int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -348,12 +396,12 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
   Register scratch = name;
-  __ mov(scratch, Immediate(Handle<Object>(interceptor)));
+  __ mov(scratch, Immediate(interceptor));
   __ push(scratch);
   __ push(receiver);
   __ push(holder);
@@ -361,11 +409,12 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallExternalReference(
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
@@ -406,15 +455,15 @@
   //                                          frame.
   // -----------------------------------
   __ pop(scratch);
-  __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments));
+  __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
   __ push(scratch);
 }
 
 
 // Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
-                                        const CallOptimization& optimization,
-                                        int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+                                const CallOptimization& optimization,
+                                int argc) {
   // ----------- S t a t e -------------
   //  -- esp[0]              : return address
   //  -- esp[4]              : object passing the type check
@@ -429,30 +478,25 @@
   //  -- esp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ mov(edi, Immediate(Handle<JSFunction>(function)));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ LoadHeapObject(edi, function);
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Pass the additional arguments.
   __ mov(Operand(esp, 2 * kPointerSize), edi);
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ mov(ecx, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ mov(ecx, api_call_info);
     __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
     __ mov(Operand(esp, 3 * kPointerSize), ebx);
   } else {
-    __ mov(Operand(esp, 3 * kPointerSize),
-           Immediate(Handle<Object>(call_data)));
+    __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
   }
 
   // Prepare arguments.
   __ lea(eax, Operand(esp, 3 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
   const int kApiArgc = 1;  // API function gets reference to the v8::Arguments.
 
   // Allocate the v8::Arguments structure in the arguments' space since
@@ -462,7 +506,7 @@
   __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
 
   __ mov(ApiParameterOperand(1), eax);  // v8::Arguments::implicit_args_.
-  __ add(Operand(eax), Immediate(argc * kPointerSize));
+  __ add(eax, Immediate(argc * kPointerSize));
   __ mov(ApiParameterOperand(2), eax);  // v8::Arguments::values_.
   __ Set(ApiParameterOperand(3), Immediate(argc));  // v8::Arguments::length_.
   // v8::Arguments::is_construct_call_.
@@ -472,12 +516,10 @@
   __ lea(eax, ApiParameterOperand(1));
   __ mov(ApiParameterOperand(0), eax);
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
-  return masm->TryCallApiFunctionAndReturn(&fun,
-                                           argc + kFastApiCallArguments + 1);
+  // Function address is a foreign pointer outside V8's heap.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  __ CallApiFunctionAndReturn(function_address,
+                              argc + kFastApiCallArguments + 1);
 }
 
 
@@ -486,22 +528,22 @@
   CallInterceptorCompiler(StubCompiler* stub_compiler,
                           const ParameterCount& arguments,
                           Register name,
-                          Code::ExtraICState extra_ic_state)
+                          Code::ExtraICState extra_state)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
         name_(name),
-        extra_ic_state_(extra_ic_state) {}
+        extra_state_(extra_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -509,45 +551,27 @@
     __ JumpIfSmi(receiver, miss);
 
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();  // Success.
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -556,16 +580,14 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 =
-          optimization.GetPrototypeDepthOfExpectedType(object,
-                                                       interceptor_holder);
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 =
-            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                         lookup->holder());
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
       }
-      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                             (depth2 != kInvalidProtoDepth);
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     Counters* counters = masm->isolate()->counters();
@@ -581,9 +603,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver,
-                                        interceptor_holder, scratch1,
-                                        scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -596,10 +618,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -610,11 +633,9 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result =
-          GenerateFastApiCall(masm, optimization, arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
-      CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
           ? CALL_AS_FUNCTION
           : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
@@ -633,33 +654,27 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
     }
-
-    return masm->isolate()->heap()->undefined_value();  // Success.
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -668,27 +683,30 @@
 
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
-    __ push(holder);  // Save the holder.
-    __ push(name_);  // Save the name.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(holder);  // Save the holder.
+      __ push(name_);  // Save the name.
 
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
 
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+      // Leave the internal frame.
+    }
 
     __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
     __ j(not_equal, interceptor_succeeded);
@@ -697,49 +715,39 @@
   StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
-  Code::ExtraICState extra_ic_state_;
+  Code::ExtraICState extra_state_;
 };
 
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Code* code = NULL;
-  if (kind == Code::LOAD_IC) {
-    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
-  } else {
-    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
-  }
-
-  Handle<Code> ic(code);
-  __ jmp(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code = (kind == Code::LOAD_IC)
+      ? masm->isolate()->builtins()->LoadIC_Miss()
+      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+  __ jmp(code, RelocInfo::CODE_TARGET);
 }
 
 
 void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
-  Code* code = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  Handle<Code> ic(code);
-  __ jmp(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(code, RelocInfo::CODE_TARGET);
 }
 
 
 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
 // but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      JSObject* object,
+                                      Handle<JSObject> object,
                                       int index,
-                                      Map* transition,
+                                      Handle<Map> transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
                                       Label* miss_label) {
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver_reg, miss_label);
-
   // Check that the map of the object hasn't changed.
-  __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(object->map())));
-  __ j(not_equal, miss_label);
+  __ CheckMap(receiver_reg, Handle<Map>(object->map()),
+              miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -751,12 +759,12 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ pop(scratch);  // Return address.
     __ push(receiver_reg);
-    __ push(Immediate(Handle<Map>(transition)));
+    __ push(Immediate(transition));
     __ push(eax);
     __ push(scratch);
     __ TailCallExternalReference(
@@ -767,11 +775,11 @@
     return;
   }
 
-  if (transition != NULL) {
+  if (!transition.is_null()) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
     __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset),
-           Immediate(Handle<Map>(transition)));
+           Immediate(transition));
   }
 
   // Adjust for the number of properties stored in the object. Even in the
@@ -786,8 +794,12 @@
 
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
-    __ mov(name_reg, Operand(eax));
-    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+    __ mov(name_reg, eax);
+    __ RecordWriteField(receiver_reg,
+                        offset,
+                        name_reg,
+                        scratch,
+                        kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -797,8 +809,12 @@
 
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
-    __ mov(name_reg, Operand(eax));
-    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+    __ mov(name_reg, eax);
+    __ RecordWriteField(scratch,
+                        offset,
+                        name_reg,
+                        receiver_reg,
+                        kDontSaveFPRegs);
   }
 
   // Return the value (register eax).
@@ -809,70 +825,58 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+                                      Handle<GlobalObject> global,
+                                      Handle<String> name,
+                                      Register scratch,
+                                      Label* miss) {
+  Handle<JSGlobalPropertyCell> cell =
+      GlobalObject::EnsurePropertyCell(global, name);
   ASSERT(cell->value()->IsTheHole());
+  Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
   if (Serializer::enabled()) {
-    __ mov(scratch, Immediate(Handle<Object>(cell)));
+    __ mov(scratch, Immediate(cell));
     __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
-           Immediate(masm->isolate()->factory()->the_hole_value()));
+           Immediate(the_hole));
   } else {
-    __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
-           Immediate(masm->isolate()->factory()->the_hole_value()));
+    __ cmp(Operand::Cell(cell), Immediate(the_hole));
   }
   __ j(not_equal, miss);
-  return cell;
 }
 
 
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
-    MacroAssembler* masm,
-    JSObject* object,
-    JSObject* holder,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  JSObject* current = object;
-  while (current != holder) {
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+                                       Handle<JSObject> object,
+                                       Handle<JSObject> holder,
+                                       Handle<String> name,
+                                       Register scratch,
+                                       Label* miss) {
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
     if (current->IsGlobalObject()) {
-      // Returns a cell or a failure.
-      MaybeObject* result = GenerateCheckPropertyCell(
-          masm,
-          GlobalObject::cast(current),
-          name,
-          scratch,
-          miss);
-      if (result->IsFailure()) return result;
+      GenerateCheckPropertyCell(masm,
+                                Handle<GlobalObject>::cast(current),
+                                name,
+                                scratch,
+                                miss);
     }
-    ASSERT(current->IsJSObject());
-    current = JSObject::cast(current->GetPrototype());
+    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
   }
-  return NULL;
 }
 
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
                                        Register object_reg,
-                                       JSObject* holder,
+                                       Handle<JSObject> holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       String* name,
+                                       Handle<String> name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -882,7 +886,7 @@
 
   // Keep track of the current object in register reg.
   Register reg = object_reg;
-  JSObject* current = object;
+  Handle<JSObject> current = object;
   int depth = 0;
 
   if (save_at_depth == depth) {
@@ -891,79 +895,55 @@
 
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
-  while (current != holder) {
-    depth++;
+  while (!current.is_identical_to(holder)) {
+    ++depth;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    ASSERT(current->GetPrototype()->IsJSObject());
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
-        Object* lookup_result = NULL;  // Initialization to please compiler.
-        if (!maybe_lookup_result->ToObject(&lookup_result)) {
-          set_failure(Failure::cast(maybe_lookup_result));
-          return reg;
-        }
-        name = String::cast(lookup_result);
+        name = factory()->LookupSymbol(name);
       }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
+      ASSERT(current->property_dictionary()->FindEntry(*name) ==
              StringDictionary::kNotFound);
 
-      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
-                                                                      miss,
-                                                                      reg,
-                                                                      name,
-                                                                      scratch1,
-                                                                      scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+                                       scratch1, scratch2);
 
       __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
-      // Branch on the result of the map check.
-      __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
-        // Restore scratch register to be the map of the object.
-        // We load the prototype from the map in the scratch register.
-        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
-      reg = holder_reg;  // from now the object is in holder_reg
+      reg = holder_reg;  // From now on the object will be in holder_reg.
       __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      // Check the map of the current object.
-      __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
-             Immediate(Handle<Map>(current->map())));
-      // Branch on the result of the map check.
-      __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+      bool in_new_space = heap()->InNewSpace(*prototype);
+      Handle<Map> current_map(current->map());
+      if (in_new_space) {
+        // Save the map in scratch1 for later.
+        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
       }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ mov(reg, Handle<JSObject>(prototype));
+      __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
+                  ALLOW_ELEMENT_TRANSITION_MAPS);
+
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+      }
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (in_new_space) {
+        // The prototype is in new space; we cannot store a reference to it
+        // in the code.  Load it from the map.
+        __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        // The prototype is in old space; load it directly.
+        __ mov(reg, prototype);
+      }
     }
 
     if (save_at_depth == depth) {
@@ -973,54 +953,46 @@
     // Go to the next object in the prototype chain.
     current = prototype;
   }
-  ASSERT(current == holder);
+  ASSERT(current.is_identical_to(holder));
 
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
   // Check the holder map.
-  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(holder->map())));
-  __ j(not_equal, miss);
+  __ CheckMap(reg, Handle<Map>(holder->map()),
+              miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform security check for access to the global object.
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
   if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  };
+  }
 
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-  MaybeObject* result = GenerateCheckPropertyCells(masm(),
-                                                   object,
-                                                   holder,
-                                                   name,
-                                                   scratch1,
-                                                   miss);
-  if (result->IsFailure()) set_failure(Failure::cast(result));
+  // If we've skipped any global objects, it's not enough to verify that
+  // their maps haven't changed.  We also need to check that the property
+  // cell for the property is still empty.
+  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(JSObject* object,
-                                     JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+                                     Handle<JSObject> holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     String* name,
+                                     Handle<String> name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check the prototype chain.
-  Register reg =
-      CheckPrototypes(object, receiver, holder,
-                      scratch1, scratch2, scratch3, name, miss);
+  Register reg = CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Get the value from the properties.
   GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
@@ -1028,40 +1000,37 @@
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1,
-                      scratch2, scratch3, name, miss);
-
-  Handle<AccessorInfo> callback_handle(callback);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Insert additional parameters into the stack frame above return address.
   ASSERT(!scratch3.is(reg));
   __ pop(scratch3);  // Get return address to place it below.
 
   __ push(receiver);  // receiver
-  __ mov(scratch2, Operand(esp));
+  __ mov(scratch2, esp);
   ASSERT(!scratch2.is(reg));
   __ push(reg);  // holder
   // Push data from AccessorInfo.
-  if (isolate()->heap()->InNewSpace(callback_handle->data())) {
-    __ mov(scratch1, Immediate(callback_handle));
+  if (isolate()->heap()->InNewSpace(callback->data())) {
+    __ mov(scratch1, Immediate(callback));
     __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
   } else {
-    __ push(Immediate(Handle<Object>(callback_handle->data())));
+    __ push(Immediate(Handle<Object>(callback->data())));
   }
 
   // Save a pointer to where we pushed the arguments pointer.
@@ -1073,59 +1042,56 @@
 
   __ push(scratch3);  // Restore return address.
 
-  // Do call through the api.
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
-  // 3 elements array for v8::Agruments::values_, handler for name and pointer
+  // 3 elements array for v8::Arguments::values_, handler for name and pointer
   // to the values (it considered as smi in GC).
   const int kStackSpace = 5;
   const int kApiArgc = 2;
 
   __ PrepareCallApiFunction(kApiArgc);
   __ mov(ApiParameterOperand(0), ebx);  // name.
-  __ add(Operand(ebx), Immediate(kPointerSize));
+  __ add(ebx, Immediate(kPointerSize));
   __ mov(ApiParameterOperand(1), ebx);  // arguments pointer.
 
   // Emitting a stub call may try to allocate (if the code is not
   // already generated).  Do not allow the assembler to perform a
   // garbage collection but instead return the allocation failure
   // object.
-  return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  __ CallApiFunctionAndReturn(getter_address, kStackSpace);
 }
 
 
-void StubCompiler::GenerateLoadConstant(JSObject* object,
-                                        JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Object* value,
-                                        String* name,
+                                        Handle<JSFunction> value,
+                                        Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(object, receiver, holder,
-                  scratch1, scratch2, scratch3, name, miss);
+  CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ mov(eax, Handle<Object>(value));
+  __ LoadHeapObject(eax, value);
   __ ret(0);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1137,13 +1103,13 @@
   // and CALLBACKS, so inline only them, other cases may be added
   // later.
   bool compile_followup_inline = false;
-  if (lookup->IsProperty() && lookup->IsCacheable()) {
+  if (lookup->IsFound() && lookup->IsCacheable()) {
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+               lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1158,47 +1124,49 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ push(receiver);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ push(receiver);
+      }
+      __ push(holder_reg);
+      __ push(name_reg);
+
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method.)
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ cmp(eax, factory()->no_interceptor_result_sentinel());
+      __ j(equal, &interceptor_failed);
+      frame_scope.GenerateLeaveFrame();
+      __ ret(0);
+
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+
+      // Leave the internal frame.
     }
-    __ push(holder_reg);
-    __ push(name_reg);
-
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method.)
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ cmp(eax, factory()->no_interceptor_result_sentinel());
-    __ j(equal, &interceptor_failed);
-    __ LeaveInternalFrame();
-    __ ret(0);
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into holder_reg.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1210,15 +1178,15 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), eax, holder_reg,
-                               lookup->holder(), lookup->GetFieldIndex());
+                               Handle<JSObject>(lookup->holder()),
+                               lookup->GetFieldIndex());
       __ ret(0);
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1227,7 +1195,7 @@
       __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(holder_reg);
-      __ mov(holder_reg, Immediate(Handle<AccessorInfo>(callback)));
+      __ mov(holder_reg, Immediate(callback));
       __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
       __ push(holder_reg);
       __ push(name_reg);
@@ -1257,17 +1225,17 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+    __ cmp(ecx, Immediate(name));
     __ j(not_equal, miss);
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1277,31 +1245,27 @@
   // Get the receiver from the stack.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  // If the object is the holder then we know that it's a global
-  // object which can only happen for contextual calls. In this case,
-  // the receiver cannot be a smi.
-  if (object != holder) {
-    __ JumpIfSmi(edx, miss);
-  }
 
   // Check that the maps haven't changed.
+  __ JumpIfSmi(edx, miss);
   CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
   if (Serializer::enabled()) {
-    __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(edi, Immediate(cell));
     __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
   } else {
-    __ mov(edi, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(edi, Operand::Cell(cell));
   }
 
   // Check that the cell contains the same function.
-  if (isolate()->heap()->InNewSpace(function)) {
+  if (isolate()->heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1314,31 +1278,26 @@
     // Check the shared function info. Make sure it hasn't changed.
     __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
            Immediate(Handle<SharedFunctionInfo>(function->shared())));
-    __ j(not_equal, miss);
   } else {
-    __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
-    __ j(not_equal, miss);
+    __ cmp(edi, Immediate(function));
   }
+  __ j(not_equal, miss);
 }
 
 
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+  Handle<Code> code =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_ic_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
+                                               extra_state_);
+  __ jmp(code, RelocInfo::CODE_TARGET);
 }
 
 
-MUST_USE_RESULT MaybeObject* CallStubCompiler::CompileCallField(
-    JSObject* object,
-    JSObject* holder,
-    int index,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
+                                                int index,
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1376,7 +1335,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
@@ -1384,19 +1343,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1406,8 +1365,8 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) {
-    return isolate()->heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) {
+    return Handle<Code>::null();
   }
 
   Label miss;
@@ -1421,9 +1380,8 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(edx, &miss);
 
-  CheckPrototypes(JSObject::cast(object), edx,
-                  holder, ebx,
-                  eax, edi, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                  name, &miss);
 
   if (argc == 0) {
     // Noop, return the length.
@@ -1432,51 +1390,85 @@
   } else {
     Label call_builtin;
 
-    // Get the elements array of the object.
-    __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
-
-    // Check that the elements are in fast mode and writable.
-    __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
-           Immediate(factory()->fixed_array_map()));
-    __ j(not_equal, &call_builtin);
-
     if (argc == 1) {  // Otherwise fall through to call builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements, with_write_barrier;
+
+      // Get the elements array of the object.
+      __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
+
+      // Check that the elements are in fast mode and writable.
+      __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+             Immediate(factory()->fixed_array_map()));
+      __ j(not_equal, &call_builtin);
 
       // Get the array's length into eax and calculate new length.
       __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
       STATIC_ASSERT(kSmiTagSize == 1);
       STATIC_ASSERT(kSmiTag == 0);
-      __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
+      __ add(eax, Immediate(Smi::FromInt(argc)));
 
-      // Get the element's length into ecx.
-      __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+      // Get the elements' length into ecx.
+      __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
-      __ cmp(eax, Operand(ecx));
+      __ cmp(eax, ecx);
       __ j(greater, &attempt_to_grow_elements);
 
+      // Check if value is a smi.
+      __ mov(ecx, Operand(esp, argc * kPointerSize));
+      __ JumpIfNotSmi(ecx, &with_write_barrier);
+
       // Save new length.
       __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
 
-      // Push the element.
-      __ lea(edx, FieldOperand(ebx,
-                               eax, times_half_pointer_size,
-                               FixedArray::kHeaderSize - argc * kPointerSize));
-      __ mov(ecx, Operand(esp, argc * kPointerSize));
-      __ mov(Operand(edx, 0), ecx);
+      // Store the value.
+      __ mov(FieldOperand(edi,
+                          eax,
+                          times_half_pointer_size,
+                          FixedArray::kHeaderSize - argc * kPointerSize),
+             ecx);
 
-      // Check if value is a smi.
-      __ JumpIfNotSmi(ecx, &with_write_barrier);
-
-      __ bind(&exit);
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&with_write_barrier);
 
-      __ InNewSpace(ebx, ecx, equal, &exit);
+      __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
 
-      __ RecordWriteHelper(ebx, edx, ecx);
+      if (FLAG_smi_only_arrays  && !FLAG_trace_elements_transitions) {
+        Label fast_object, not_fast_object;
+        __ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
+        __ jmp(&fast_object);
+        // In case of fast smi-only, convert to fast object, otherwise bail out.
+        __ bind(&not_fast_object);
+        __ CheckFastSmiOnlyElements(ebx, &call_builtin);
+        // edi: elements array
+        // edx: receiver
+        // ebx: map
+        __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                               FAST_ELEMENTS,
+                                               ebx,
+                                               edi,
+                                               &call_builtin);
+        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+        // Restore edi.
+        __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
+        __ bind(&fast_object);
+      } else {
+        __ CheckFastObjectElements(ebx, &call_builtin);
+      }
+
+      // Save new length.
+      __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+
+      // Store the value.
+      __ lea(edx, FieldOperand(edi,
+                               eax, times_half_pointer_size,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ mov(Operand(edx, 0), ecx);
+
+      __ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
+
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&attempt_to_grow_elements);
@@ -1484,6 +1476,19 @@
         __ jmp(&call_builtin);
       }
 
+      __ mov(ebx, Operand(esp, argc * kPointerSize));
+      // Growing elements that are SMI-only requires special handling in case
+      // the new element is non-Smi. For now, delegate to the builtin.
+      Label no_fast_elements_check;
+      __ JumpIfSmi(ebx, &no_fast_elements_check);
+      __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
+      __ bind(&no_fast_elements_check);
+
+      // We could be lucky and the elements array could be at the top of
+      // new-space.  In this case we can just grow it in place by moving the
+      // allocation pointer up.
+
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate());
       ExternalReference new_space_allocation_limit =
@@ -1494,36 +1499,46 @@
       __ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
 
       // Check if it's the end of elements.
-      __ lea(edx, FieldOperand(ebx,
+      __ lea(edx, FieldOperand(edi,
                                eax, times_half_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
-      __ cmp(edx, Operand(ecx));
+      __ cmp(edx, ecx);
       __ j(not_equal, &call_builtin);
-      __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
+      __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
       __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
       __ j(above, &call_builtin);
 
       // We fit and could grow elements.
       __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
-      __ mov(ecx, Operand(esp, argc * kPointerSize));
 
       // Push the argument...
-      __ mov(Operand(edx, 0), ecx);
+      __ mov(Operand(edx, 0), ebx);
       // ... and fill the rest with holes.
       for (int i = 1; i < kAllocationDelta; i++) {
         __ mov(Operand(edx, i * kPointerSize),
                Immediate(factory()->the_hole_value()));
       }
 
+      // We know the elements array is in new space so we don't need the
+      // remembered set, but we just pushed a value onto it so we may have to
+      // tell the incremental marker to rescan the object that we just grew.  We
+      // don't need to worry about the holes because they are in old space and
+      // already marked black.
+      __ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+
       // Restore receiver to edx as finish sequence assumes it's here.
       __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
       // Increment element's and array's sizes.
-      __ add(FieldOperand(ebx, FixedArray::kLengthOffset),
+      __ add(FieldOperand(edi, FixedArray::kLengthOffset),
              Immediate(Smi::FromInt(kAllocationDelta)));
+
+      // NOTE: This only happen in new-space, where we don't
+      // care about the black-byte-count on pages. Otherwise we should
+      // update that too if the object is black.
+
       __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
 
-      // Elements are in new space, so write barrier is not required.
       __ ret((argc + 1) * kPointerSize);
     }
 
@@ -1535,19 +1550,19 @@
   }
 
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1557,8 +1572,8 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) {
-    return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) {
+    return Handle<Code>::null();
   }
 
   Label miss, return_undefined, call_builtin;
@@ -1571,9 +1586,8 @@
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(edx, &miss);
-  CheckPrototypes(JSObject::cast(object), edx,
-                  holder, ebx,
-                  eax, edi, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                  name, &miss);
 
   // Get the elements array of the object.
   __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1585,7 +1599,7 @@
 
   // Get the array's length into ecx and calculate new length.
   __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
-  __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
+  __ sub(ecx, Immediate(Smi::FromInt(1)));
   __ j(negative, &return_undefined);
 
   // Get the last element.
@@ -1594,7 +1608,7 @@
   __ mov(eax, FieldOperand(ebx,
                            ecx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
-  __ cmp(Operand(eax), Immediate(factory()->the_hole_value()));
+  __ cmp(eax, Immediate(factory()->the_hole_value()));
   __ j(equal, &call_builtin);
 
   // Set the array's length.
@@ -1618,20 +1632,19 @@
       1);
 
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -1641,8 +1654,8 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) {
-    return isolate()->heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) {
+    return Handle<Code>::null();
   }
 
   const int argc = arguments().immediate();
@@ -1653,7 +1666,7 @@
   Label* index_out_of_range_label = &index_out_of_range;
 
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
@@ -1665,12 +1678,95 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             eax,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                  ebx, edx, edi, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  eax, holder, ebx, edx, edi, name, &miss);
 
   Register receiver = ebx;
   Register index = edi;
+  Register result = eax;
+  __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
+  if (argc > 0) {
+    __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
+  } else {
+    __ Set(index, Immediate(factory()->undefined_value()));
+  }
+
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
+  __ ret((argc + 1) * kPointerSize);
+
+  StubRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ Set(eax, Immediate(factory()->nan_value()));
+    __ ret((argc + 1) * kPointerSize);
+  }
+
+  __ bind(&miss);
+  // Restore function name in ecx.
+  __ Set(ecx, Immediate(name));
+  __ bind(&name_miss);
+  GenerateMissBranch();
+
+  // Return the generated code.
+  return GetCode(function);
+}
+
+
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : function name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || !cell.is_null()) {
+    return Handle<Code>::null();
+  }
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            eax,
+                                            &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  eax, holder, ebx, edx, edi, name, &miss);
+
+  Register receiver = eax;
+  Register index = edi;
   Register scratch = edx;
   Register result = eax;
   __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
@@ -1680,107 +1776,19 @@
     __ Set(index, Immediate(factory()->undefined_value()));
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
-
-  if (index_out_of_range.is_linked()) {
-    __ bind(&index_out_of_range);
-    __ Set(eax, Immediate(factory()->nan_value()));
-    __ ret((argc + 1) * kPointerSize);
-  }
-
-  __ bind(&miss);
-  // Restore function name in ecx.
-  __ Set(ecx, Immediate(Handle<String>(name)));
-  __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
-
-  // Return the generated code.
-  return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
-  // ----------- S t a t e -------------
-  //  -- ecx                 : function name
-  //  -- esp[0]              : return address
-  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- esp[(argc + 1) * 4] : receiver
-  // -----------------------------------
-
-  // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) {
-    return heap()->undefined_value();
-  }
-
-  const int argc = arguments().immediate();
-
-  Label miss;
-  Label name_miss;
-  Label index_out_of_range;
-  Label* index_out_of_range_label = &index_out_of_range;
-
-  if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
-       DEFAULT_STRING_STUB)) {
-    index_out_of_range_label = &miss;
-  }
-
-  GenerateNameCheck(name, &name_miss);
-
-  // Check that the maps starting from the prototype haven't changed.
-  GenerateDirectLoadGlobalFunctionPrototype(masm(),
-                                            Context::STRING_FUNCTION_INDEX,
-                                            eax,
-                                            &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                  ebx, edx, edi, name, &miss);
-
-  Register receiver = eax;
-  Register index = edi;
-  Register scratch1 = ebx;
-  Register scratch2 = edx;
-  Register result = eax;
-  __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
-  if (argc > 0) {
-    __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
-  } else {
-    __ Set(index, Immediate(factory()->undefined_value()));
-  }
-
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
-  __ ret((argc + 1) * kPointerSize);
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1790,22 +1798,21 @@
 
   __ bind(&miss);
   // Restore function name in ecx.
-  __ Set(ecx, Immediate(Handle<String>(name)));
+  __ Set(ecx, Immediate(name));
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -1819,23 +1826,22 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
-
-    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1851,17 +1857,17 @@
   // Convert the smi code to uint16.
   __ and_(code, Immediate(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, eax);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, eax);
+  generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1869,19 +1875,19 @@
 
   __ bind(&miss);
   // ecx: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1891,7 +1897,7 @@
   // -----------------------------------
 
   if (!CpuFeatures::IsSupported(SSE2)) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   CpuFeatures::Scope use_sse2(SSE2);
@@ -1901,23 +1907,24 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
 
-    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1998,19 +2005,19 @@
 
   __ bind(&miss);
   // ecx: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2024,23 +2031,24 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
 
-    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2058,10 +2066,10 @@
   __ sar(ebx, kBitsPerInt - 1);
 
   // Do bitwise not or do nothing depending on ebx.
-  __ xor_(eax, Operand(ebx));
+  __ xor_(eax, ebx);
 
   // Add 1 or do nothing depending on ebx.
-  __ sub(eax, Operand(ebx));
+  __ sub(eax, ebx);
 
   // If the result is still negative, go to the slow case.
   // This only happens for the most negative smi.
@@ -2102,30 +2110,29 @@
 
   __ bind(&miss);
   // ecx: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
 
@@ -2144,11 +2151,11 @@
 
   // Allocate space for v8::Arguments implicit values. Must be initialized
   // before calling any runtime function.
-  __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+  __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), edx, holder,
-                  ebx, eax, edi, name, depth, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                  name, depth, &miss);
 
   // Move the return address on top of the stack.
   __ mov(eax, Operand(esp, 3 * kPointerSize));
@@ -2156,27 +2163,24 @@
 
   // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
   // duplicate of return address and will be overwritten.
-  MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiCall(masm(), optimization, argc);
 
   __ bind(&miss);
-  __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+  __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(
-    Object* object,
-    JSObject* holder,
-    JSFunction* function,
-    String* name,
-    CheckType check) {
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
+                                                   CheckType check) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2186,16 +2190,14 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -2210,15 +2212,13 @@
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(isolate()->counters()->call_const(), 1);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), edx, holder,
-                      ebx, eax, edi, name, &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
+                      edi, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2229,28 +2229,25 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         // Check that the object is a string or a symbol.
         __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, edi, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            eax, holder, ebx, edx, edi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(edx, &fast);
@@ -2260,18 +2257,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, edi, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            eax, holder, ebx, edx, edi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ cmp(edx, factory()->true_value());
@@ -2282,17 +2279,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, edi, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            eax, holder, ebx, edx, edi, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
-    }
-
-    default:
-      UNREACHABLE();
   }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -2300,17 +2298,16 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2325,24 +2322,15 @@
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), ecx, extra_ic_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         edx,
-                                         ebx,
-                                         edi,
-                                         eax,
-                                         &miss);
-  if (result->IsFailure()) return result;
+  CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
+  compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
+                   &miss);
 
   // Restore receiver.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -2361,7 +2349,7 @@
 
   // Invoke the function.
   __ mov(edi, eax);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
@@ -2369,20 +2357,19 @@
 
   // Handle load cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(
-    JSObject* object,
-    GlobalObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2392,23 +2379,17 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy.
@@ -2417,46 +2398,37 @@
     __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
   }
 
-  // Setup the context (function already in edi).
+  // Set up the context (function already in edi).
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Jump to the cached code (tail call).
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1);
-  ASSERT(function->is_compiled());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  if (V8::UseCrankshaft()) {
-    // TODO(kasperl): For now, we always call indirectly through the
-    // code field in the function to allow recompilation to take effect
-    // without changing any of the call sites.
-    __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-                  expected, arguments(), JUMP_FUNCTION,
-                  NullCallWrapper(), call_kind);
-  } else {
-    Handle<Code> code(function->code());
-    __ InvokeCode(code, expected, arguments(),
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION,
-                  NullCallWrapper(), call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+                expected, arguments(), JUMP_FUNCTION,
+                NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                   int index,
-                                                  Map* transition,
-                                                  String* name) {
+                                                  Handle<Map> transition,
+                                                  Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2466,27 +2438,23 @@
   Label miss;
 
   // Generate store field code.  Trashes the name register.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     edx, ecx, ebx,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
-  __ mov(ecx, Immediate(Handle<String>(name)));  // restore name
+  __ mov(ecx, Immediate(name));  // restore name
   Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
-                                                     AccessorInfo* callback,
-                                                     String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+    Handle<JSObject> object,
+    Handle<AccessorInfo> callback,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2495,13 +2463,9 @@
   // -----------------------------------
   Label miss;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(edx, &miss);
-
   // Check that the map of the object hasn't changed.
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(object->map())));
-  __ j(not_equal, &miss);
+  __ CheckMap(edx, Handle<Map>(object->map()),
+              &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -2514,7 +2478,7 @@
 
   __ pop(ebx);  // remove the return address
   __ push(edx);  // receiver
-  __ push(Immediate(Handle<AccessorInfo>(callback)));  // callback info
+  __ push(Immediate(callback));  // callback info
   __ push(ecx);  // name
   __ push(eax);  // value
   __ push(ebx);  // restore return address
@@ -2534,8 +2498,9 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
-                                                        String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+    Handle<JSObject> receiver,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2544,13 +2509,9 @@
   // -----------------------------------
   Label miss;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(edx, &miss);
-
   // Check that the map of the object hasn't changed.
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(receiver->map())));
-  __ j(not_equal, &miss);
+  __ CheckMap(edx, Handle<Map>(receiver->map()),
+              &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
@@ -2583,9 +2544,10 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
-                                                   JSGlobalPropertyCell* cell,
-                                                   String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+    Handle<GlobalObject> object,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2599,13 +2561,9 @@
          Immediate(Handle<Map>(object->map())));
   __ j(not_equal, &miss);
 
-
   // Compute the cell operand to use.
-  Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
-  if (Serializer::enabled()) {
-    __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-    cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
-  }
+  __ mov(ebx, Immediate(cell));
+  Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
 
   // Check that the value in the cell is not the hole. If it is, this
   // cell could have been deleted and reintroducing the global needs
@@ -2616,6 +2574,7 @@
 
   // Store the value in the cell.
   __ mov(cell_operand, eax);
+  // No write barrier here, because cells are always rescanned.
 
   // Return the value (register eax).
   Counters* counters = isolate()->counters();
@@ -2633,10 +2592,10 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                        int index,
-                                                       Map* transition,
-                                                       String* name) {
+                                                       Handle<Map> transition,
+                                                       Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -2649,16 +2608,11 @@
   __ IncrementCounter(counters->keyed_store_field(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+  __ cmp(ecx, Immediate(name));
   __ j(not_equal, &miss);
 
   // Generate store field code.  Trashes the name register.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     edx, ecx, ebx,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -2667,39 +2621,37 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  MaybeObject* maybe_stub =
-      KeyedStoreElementStub(is_jsarray, elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(edx,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub =
+      KeyedStoreElementStub(is_jsarray, elements_kind, grow_mode_).GetCode();
+
+  __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -2707,28 +2659,33 @@
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
-  __ JumpIfSmi(edx, &miss);
-
-  Register map_reg = ebx;
-  __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
-  int receiver_count = receiver_maps->length();
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    __ cmp(map_reg, map);
-    __ j(equal, Handle<Code>(handler_ics->at(current)));
+  __ JumpIfSmi(edx, &miss, Label::kNear);
+  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+  // ebx: receiver->map().
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    __ cmp(edi, receiver_maps->at(i));
+    if (transitioned_maps->at(i).is_null()) {
+      __ j(equal, handler_stubs->at(i));
+    } else {
+      Label next_map;
+      __ j(not_equal, &next_map, Label::kNear);
+      __ mov(ebx, Immediate(transitioned_maps->at(i)));
+      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
   }
   __ bind(&miss);
   Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
-                                                      JSObject* object,
-                                                      JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+                                                      Handle<JSObject> object,
+                                                      Handle<JSObject> last) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2749,15 +2706,8 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                  GlobalObject::cast(last),
-                                                  name,
-                                                  edx,
-                                                  &miss);
-    if (cell->IsFailure()) {
-      miss.Unuse();
-      return cell;
-    }
+    GenerateCheckPropertyCell(
+        masm(), Handle<GlobalObject>::cast(last), name, edx, &miss);
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2769,14 +2719,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, isolate()->heap()->empty_string());
+  return GetCode(NONEXISTENT, factory()->empty_string());
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2793,10 +2743,11 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2804,13 +2755,8 @@
   // -----------------------------------
   Label miss;
 
-  MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
-                                             edi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, callback,
+                       name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2819,10 +2765,10 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value,
-                                                   String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> value,
+                                                   Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2839,9 +2785,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2849,21 +2795,13 @@
   // -----------------------------------
   Label miss;
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          eax,
-                          ecx,
-                          edx,
-                          ebx,
-                          edi,
-                          name,
-                          &miss);
+  GenerateLoadInterceptor(receiver, holder, &lookup, eax, ecx, edx, ebx, edi,
+                          name, &miss);
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2873,11 +2811,12 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 String* name,
-                                                 bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name,
+    bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2885,22 +2824,16 @@
   // -----------------------------------
   Label miss;
 
-  // If the object is the holder then we know that it's a global
-  // object which can only happen for contextual loads. In this case,
-  // the receiver cannot be a smi.
-  if (object != holder) {
-    __ JumpIfSmi(eax, &miss);
-  }
-
   // Check that the maps haven't changed.
+  __ JumpIfSmi(eax, &miss);
   CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
 
   // Get the value from the cell.
   if (Serializer::enabled()) {
-    __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(ebx, Immediate(cell));
     __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
   } else {
-    __ mov(ebx, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(ebx, Operand::Cell(cell));
   }
 
   // Check for deleted property if property can actually be deleted.
@@ -2926,9 +2859,9 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
-                                                     JSObject* receiver,
-                                                     JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+                                                     Handle<JSObject> receiver,
+                                                     Handle<JSObject> holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- eax    : key
@@ -2941,7 +2874,7 @@
   __ IncrementCounter(counters->keyed_load_field(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
   GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
@@ -2955,11 +2888,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -2971,18 +2904,13 @@
   __ IncrementCounter(counters->keyed_load_callback(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
-                                             ecx, edi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
+  GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, callback,
+                       name, &miss);
 
   __ bind(&miss);
-
   __ DecrementCounter(counters->keyed_load_callback(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2991,10 +2919,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
-                                                        JSObject* receiver,
-                                                        JSObject* holder,
-                                                        Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<JSFunction> value) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3006,11 +2935,11 @@
   __ IncrementCounter(counters->keyed_load_constant_function(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
-                       value, name, &miss);
+  GenerateLoadConstant(
+      receiver, holder, edx, ebx, ecx, edi, value, name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_constant_function(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3020,9 +2949,10 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3034,21 +2964,13 @@
   __ IncrementCounter(counters->keyed_load_interceptor(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          edx,
-                          eax,
-                          ecx,
-                          ebx,
-                          edi,
-                          name,
-                          &miss);
+  GenerateLoadInterceptor(receiver, holder, &lookup, edx, eax, ecx, ebx, edi,
+                          name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_interceptor(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3058,7 +2980,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3070,7 +2993,7 @@
   __ IncrementCounter(counters->keyed_load_array_length(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
   GenerateLoadArrayLength(masm(), edx, ecx, &miss);
@@ -3083,7 +3006,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3095,7 +3019,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
   GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
@@ -3108,7 +3032,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3120,7 +3045,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
   GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
@@ -3133,31 +3058,29 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Code* stub;
+
   ElementsKind elements_kind = receiver_map->elements_kind();
-  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(edx,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+  __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
 
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3170,22 +3093,22 @@
   __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
   int receiver_count = receiver_maps->length();
   for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    __ cmp(map_reg, map);
-    __ j(equal, Handle<Code>(handler_ics->at(current)));
+    __ cmp(map_reg, receiver_maps->at(current));
+    __ j(equal, handler_ics->at(current));
   }
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // ----------- S t a t e -------------
   //  -- eax : argc
   //  -- edi : constructor
@@ -3224,12 +3147,8 @@
   // ebx: initial map
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
   __ shl(ecx, kPointerSizeLog2);
-  __ AllocateInNewSpace(ecx,
-                        edx,
-                        ecx,
-                        no_reg,
-                        &generic_stub_call,
-                        NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(ecx, edx, ecx, no_reg,
+                        &generic_stub_call, NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields and add the heap tag.
   // ebx: initial map
@@ -3260,7 +3179,7 @@
   // edi: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       // Check if the argument assigned to the property is actually passed.
@@ -3298,7 +3217,7 @@
   // Move argc to ebx and retrieve and tag the JSObject to return.
   __ mov(ebx, eax);
   __ pop(eax);
-  __ or_(Operand(eax), Immediate(kHeapObjectTag));
+  __ or_(eax, Immediate(kHeapObjectTag));
 
   // Remove caller arguments and receiver from the stack and return.
   __ pop(ecx);
@@ -3312,9 +3231,8 @@
   // Jump to the generic stub in case the specialized code cannot handle the
   // construction.
   __ bind(&generic_stub_call);
-  Handle<Code> generic_construct_stub =
-      isolate()->builtins()->JSConstructStubGeneric();
-  __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+  Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+  __ jmp(code, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
   return GetCode();
@@ -3506,8 +3424,7 @@
   // If we fail allocation of the HeapNumber, we still have a value on
   // top of the FPU stack. Remove it.
   __ bind(&failed_allocation);
-  __ ffree();
-  __ fincstp();
+  __ fstp(0);
   // Fall through to slow case.
 
   // Slow case: Jump to runtime.
@@ -3679,10 +3596,10 @@
             // If the value is NaN or +/-infinity, the result is 0x80000000,
             // which is automatically zero when taken mod 2^n, n < 32.
             __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-            __ sub(Operand(esp), Immediate(2 * kPointerSize));
+            __ sub(esp, Immediate(2 * kPointerSize));
             __ fisttp_d(Operand(esp, 0));
             __ pop(ebx);
-            __ add(Operand(esp), Immediate(kPointerSize));
+            __ add(esp, Immediate(kPointerSize));
           } else {
             ASSERT(CpuFeatures::IsSupported(SSE2));
             CpuFeatures::Scope scope(SSE2);
@@ -3824,8 +3741,7 @@
   // A value was pushed on the floating point stack before the allocation, if
   // the allocation fails it needs to be removed.
   if (!CpuFeatures::IsSupported(SSE2)) {
-    __ ffree();
-    __ fincstp();
+    __ fstp(0);
   }
   Handle<Code> slow_ic =
       masm->isolate()->builtins()->KeyedLoadIC_Slow();
@@ -3838,15 +3754,19 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind,
+    KeyedAccessGrowMode grow_mode) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label miss_force_generic;
+  Label miss_force_generic, grow, slow, transition_elements_kind;
+  Label check_capacity, prepare_slow, finish_store, commit_backing_store;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3854,27 +3774,52 @@
   // Check that the key is a smi.
   __ JumpIfNotSmi(ecx, &miss_force_generic);
 
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(eax, &transition_elements_kind);
+  }
+
   // Get the elements array and make sure it is a fast element array, not 'cow'.
   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->fixed_array_map()));
-  __ j(not_equal, &miss_force_generic);
-
   if (is_js_array) {
     // Check that the key is within bounds.
     __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // smis.
-    __ j(above_equal, &miss_force_generic);
+    if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+      __ j(above_equal, &grow);
+    } else {
+      __ j(above_equal, &miss_force_generic);
+    }
   } else {
     // Check that the key is within bounds.
     __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // smis.
     __ j(above_equal, &miss_force_generic);
   }
 
-  // Do the store and update the write barrier. Make sure to preserve
-  // the value in register eax.
-  __ mov(edx, Operand(eax));
-  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
-  __ RecordWrite(edi, 0, edx, ecx);
+  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+         Immediate(masm->isolate()->factory()->fixed_array_map()));
+  __ j(not_equal, &miss_force_generic);
+
+  __ bind(&finish_store);
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    // ecx is a smi, use times_half_pointer_size instead of
+    // times_pointer_size
+    __ mov(FieldOperand(edi,
+                        ecx,
+                        times_half_pointer_size,
+                        FixedArray::kHeaderSize), eax);
+  } else {
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    // Do the store and update the write barrier.
+    // ecx is a smi, use times_half_pointer_size instead of
+    // times_pointer_size
+    __ lea(ecx, FieldOperand(edi,
+                             ecx,
+                             times_half_pointer_size,
+                             FixedArray::kHeaderSize));
+    __ mov(Operand(ecx, 0), eax);
+    // Make sure to preserve the value in register eax.
+    __ mov(ebx, eax);
+    __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
+  }
 
   // Done.
   __ ret(0);
@@ -3884,20 +3829,99 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+  // Handle transition to other elements kinds without using the generic stub.
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+    // Handle transition requiring the array to grow.
+    __ bind(&grow);
+
+    // Make sure the array is only growing by a single element, anything else
+    // must be handled by the runtime. Flags are already set by previous
+    // compare.
+    __ j(not_equal, &miss_force_generic);
+
+    // Check for the empty array, and preallocate a small backing store if
+    // possible.
+    __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+    __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+    __ j(not_equal, &check_capacity);
+
+    int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+    __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
+    // Restore the key, which is known to be the array length.
+
+    // eax: value
+    // ecx: key
+    // edx: receiver
+    // edi: elements
+    // Make sure that the backing store can hold additional elements.
+    __ mov(FieldOperand(edi, JSObject::kMapOffset),
+           Immediate(masm->isolate()->factory()->fixed_array_map()));
+    __ mov(FieldOperand(edi, FixedArray::kLengthOffset),
+           Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+    __ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
+    for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+      __ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
+    }
+
+    // Store the element at index zero.
+    __ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
+
+    // Install the new backing store in the JSArray.
+    __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
+    __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
+                        kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+    // Increment the length of the array.
+    __ mov(FieldOperand(edx, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(1)));
+    __ ret(0);
+
+    __ bind(&check_capacity);
+    __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+           Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
+    __ j(equal, &miss_force_generic);
+
+    // eax: value
+    // ecx: key
+    // edx: receiver
+    // edi: elements
+    // Make sure that the backing store can hold additional elements.
+    __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+    __ j(above_equal, &slow);
+
+    // Grow the array and finish the store.
+    __ add(FieldOperand(edx, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(1)));
+    __ jmp(&finish_store);
+
+    __ bind(&prepare_slow);
+    // Restore the key, which is known to be the array length.
+    __ mov(ecx, Immediate(0));
+
+    __ bind(&slow);
+    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+    __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+  }
 }
 
 
 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
     MacroAssembler* masm,
-    bool is_js_array) {
+    bool is_js_array,
+    KeyedAccessGrowMode grow_mode) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label miss_force_generic, smi_value, is_nan, maybe_nan;
-  Label have_double_value, not_nan;
+  Label miss_force_generic, transition_elements_kind, grow, slow;
+  Label check_capacity, prepare_slow, finish_store, commit_backing_store;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3912,65 +3936,20 @@
   if (is_js_array) {
     // Check that the key is within bounds.
     __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // smis.
+    if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+      __ j(above_equal, &grow);
+    } else {
+      __ j(above_equal, &miss_force_generic);
+    }
   } else {
     // Check that the key is within bounds.
     __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // smis.
-  }
-  __ j(above_equal, &miss_force_generic);
-
-  __ JumpIfSmi(eax, &smi_value, Label::kNear);
-
-  __ CheckMap(eax,
-              masm->isolate()->factory()->heap_number_map(),
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
-
-  // Double value, canonicalize NaN.
-  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32));
-  __ j(greater_equal, &maybe_nan, Label::kNear);
-
-  __ bind(&not_nan);
-  ExternalReference canonical_nan_reference =
-      ExternalReference::address_of_canonical_non_hole_nan();
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
-    __ bind(&have_double_value);
-    __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize),
-              xmm0);
-    __ ret(0);
-  } else {
-    __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-    __ bind(&have_double_value);
-    __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
-    __ ret(0);
+    __ j(above_equal, &miss_force_generic);
   }
 
-  __ bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  __ j(greater, &is_nan, Label::kNear);
-  __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0));
-  __ j(zero, &not_nan);
-  __ bind(&is_nan);
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference));
-  } else {
-    __ fld_d(Operand::StaticVariable(canonical_nan_reference));
-  }
-  __ jmp(&have_double_value, Label::kNear);
-
-  __ bind(&smi_value);
-  // Value is a smi. convert to a double and store.
-  // Preserve original value.
-  __ mov(edx, eax);
-  __ SmiUntag(edx);
-  __ push(edx);
-  __ fild_s(Operand(esp, 0));
-  __ pop(edx);
-  __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+  __ bind(&finish_store);
+  __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
+                                 &transition_elements_kind, true);
   __ ret(0);
 
   // Handle store cache miss, replacing the ic with the generic stub.
@@ -3978,6 +3957,84 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+  // Handle transition to other elements kinds without using the generic stub.
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+    // Handle transition requiring the array to grow.
+    __ bind(&grow);
+
+    // Make sure the array is only growing by a single element, anything else
+    // must be handled by the runtime. Flags are already set by previous
+    // compare.
+    __ j(not_equal, &miss_force_generic);
+
+    // Transition on values that can't be stored in a FixedDoubleArray.
+    Label value_is_smi;
+    __ JumpIfSmi(eax, &value_is_smi);
+    __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+           Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
+    __ j(not_equal, &transition_elements_kind);
+    __ bind(&value_is_smi);
+
+    // Check for the empty array, and preallocate a small backing store if
+    // possible.
+    __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+    __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+    __ j(not_equal, &check_capacity);
+
+    int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+    __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
+    // Restore the key, which is known to be the array length.
+    __ mov(ecx, Immediate(0));
+
+    // eax: value
+    // ecx: key
+    // edx: receiver
+    // edi: elements
+    // Initialize the new FixedDoubleArray. Leave elements unitialized for
+    // efficiency, they are guaranteed to be initialized before use.
+    __ mov(FieldOperand(edi, JSObject::kMapOffset),
+           Immediate(masm->isolate()->factory()->fixed_double_array_map()));
+    __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
+           Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+
+    // Install the new backing store in the JSArray.
+    __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
+    __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
+                        kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+    // Increment the length of the array.
+    __ add(FieldOperand(edx, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(1)));
+    __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+    __ jmp(&finish_store);
+
+    __ bind(&check_capacity);
+    // eax: value
+    // ecx: key
+    // edx: receiver
+    // edi: elements
+    // Make sure that the backing store can hold additional elements.
+    __ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
+    __ j(above_equal, &slow);
+
+    // Grow the array and finish the store.
+    __ add(FieldOperand(edx, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(1)));
+    __ jmp(&finish_store);
+
+    __ bind(&prepare_slow);
+    // Restore the key, which is known to be the array length.
+    __ mov(ecx, Immediate(0));
+
+    __ bind(&slow);
+    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+    __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+  }
 }
 
 
diff --git a/src/ic-inl.h b/src/ic-inl.h
index b4f789c..6a86921 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,8 @@
 #define V8_IC_INL_H_
 
 #include "ic.h"
+
+#include "compiler.h"
 #include "debug.h"
 #include "macro-assembler.h"
 
@@ -36,7 +38,7 @@
 namespace internal {
 
 
-Address IC::address() {
+Address IC::address() const {
   // Get the address of the call.
   Address result = pc() - Assembler::kCallTargetAddressOffset;
 
@@ -77,16 +79,20 @@
 
 void IC::SetTargetAtAddress(Address address, Code* target) {
   ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
+  Code* old_target = GetTargetAtAddress(address);
 #ifdef DEBUG
   // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
   // ICs as strict mode. The strict-ness of the IC must be preserved.
-  Code* old_target = GetTargetAtAddress(address);
   if (old_target->kind() == Code::STORE_IC ||
       old_target->kind() == Code::KEYED_STORE_IC) {
-    ASSERT(old_target->extra_ic_state() == target->extra_ic_state());
+    ASSERT(Code::GetStrictMode(old_target->extra_ic_state()) ==
+           Code::GetStrictMode(target->extra_ic_state()));
   }
 #endif
   Assembler::set_target_address_at(address, target->instruction_start());
+  target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address,
+                                                                  target);
+  PostPatching(address, target, old_target);
 }
 
 
diff --git a/src/ic.cc b/src/ic.cc
index 0f76a9a..c762127 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -40,13 +40,13 @@
 namespace internal {
 
 #ifdef DEBUG
-static char TransitionMarkFromState(IC::State state) {
+char IC::TransitionMarkFromState(IC::State state) {
   switch (state) {
     case UNINITIALIZED: return '0';
     case PREMONOMORPHIC: return 'P';
     case MONOMORPHIC: return '1';
     case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
-    case MEGAMORPHIC: return 'N';
+    case MEGAMORPHIC: return IsGeneric() ? 'G' : 'N';
 
     // We never see the debugger states here, because the state is
     // computed from the original code - not the patched code. Let
@@ -80,28 +80,34 @@
         raw_frame = it.frame();
       }
     }
-    if (raw_frame->is_java_script()) {
-      JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
-      Code* js_code = frame->unchecked_code();
-      // Find the function on the stack and both the active code for the
-      // function and the original code.
-      JSFunction* function = JSFunction::cast(frame->function());
-      function->PrintName();
-      int code_offset =
-          static_cast<int>(address() - js_code->instruction_start());
-      PrintF("+%d", code_offset);
-    } else {
-      PrintF("<unknown>");
-    }
-    PrintF(" (%c->%c)",
+    JavaScriptFrame::PrintTop(stdout, false, true);
+    bool new_can_grow =
+        Code::GetKeyedAccessGrowMode(new_target->extra_ic_state()) ==
+        ALLOW_JSARRAY_GROWTH;
+    PrintF(" (%c->%c%s)",
            TransitionMarkFromState(old_state),
-           TransitionMarkFromState(new_state));
+           TransitionMarkFromState(new_state),
+           new_can_grow ? ".GROW" : "");
     name->Print();
     PrintF("]\n");
   }
 }
-#endif
 
+#define TRACE_GENERIC_IC(type, reason)                          \
+  do {                                                          \
+    if (FLAG_trace_ic) {                                        \
+      PrintF("[%s patching generic stub in ", type);            \
+      JavaScriptFrame::PrintTop(stdout, false, true);           \
+      PrintF(" (%s)]\n", reason);                               \
+    }                                                           \
+  } while (false)
+
+#else
+#define TRACE_GENERIC_IC(type, reason)
+#endif  // DEBUG
+
+#define TRACE_IC(type, name, old_state, new_target)             \
+  ASSERT((TraceIC(type, name, old_state, new_target), true))
 
 IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
   ASSERT(isolate == Isolate::Current());
@@ -133,7 +139,7 @@
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-Address IC::OriginalCodeAddress() {
+Address IC::OriginalCodeAddress() const {
   HandleScope scope;
   // Compute the JavaScript frame for the frame pointer of this IC
   // structure. We need this to be able to find the function
@@ -167,7 +173,7 @@
                                              LookupResult* lookup,
                                              Object* receiver) {
   Object* end = lookup->IsProperty()
-      ? lookup->holder() : isolate->heap()->null_value();
+      ? lookup->holder() : Object::cast(isolate->heap()->null_value());
   for (Object* current = receiver;
        current != end;
        current = current->GetPrototype()) {
@@ -290,6 +296,61 @@
 }
 
 
+void IC::PostPatching(Address address, Code* target, Code* old_target) {
+  if (FLAG_type_info_threshold > 0) {
+    if (old_target->is_inline_cache_stub() &&
+        target->is_inline_cache_stub()) {
+      State old_state = old_target->ic_state();
+      State new_state = target->ic_state();
+      bool was_uninitialized =
+          old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
+      bool is_uninitialized =
+          new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
+      int delta = 0;
+      if (was_uninitialized && !is_uninitialized) {
+        delta = 1;
+      } else if (!was_uninitialized && is_uninitialized) {
+        delta = -1;
+      }
+      if (delta != 0) {
+        Code* host = target->GetHeap()->isolate()->
+            inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
+        // Not all Code objects have TypeFeedbackInfo.
+        if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
+          TypeFeedbackInfo* info =
+              TypeFeedbackInfo::cast(host->type_feedback_info());
+          info->set_ic_with_typeinfo_count(
+              info->ic_with_typeinfo_count() + delta);
+        }
+      }
+    }
+  }
+  if (FLAG_watch_ic_patching) {
+    Isolate::Current()->runtime_profiler()->NotifyICChanged();
+    // We do not want to optimize until the ICs have settled down,
+    // so when they are patched, we postpone optimization for the
+    // current function and the functions above it on the stack that
+    // might want to inline this one.
+    StackFrameIterator it;
+    if (it.done()) return;
+    it.Advance();
+    static const int kStackFramesToMark = Compiler::kMaxInliningLevels - 1;
+    for (int i = 0; i < kStackFramesToMark; ++i) {
+      if (it.done()) return;
+      StackFrame* raw_frame = it.frame();
+      if (raw_frame->is_java_script()) {
+        JSFunction* function =
+            JSFunction::cast(JavaScriptFrame::cast(raw_frame)->function());
+        if (function->IsOptimized()) continue;
+        SharedFunctionInfo* shared = function->shared();
+        shared->set_profiler_ticks(0);
+      }
+      it.Advance();
+    }
+  }
+}
+
+
 void IC::Clear(Address address) {
   Code* target = GetTargetAtAddress(address);
 
@@ -348,7 +409,7 @@
 void StoreIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
   SetTargetAtAddress(address,
-      (target->extra_ic_state() == kStrictMode)
+      (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
         ? initialize_stub_strict()
         : initialize_stub());
 }
@@ -357,7 +418,7 @@
 void KeyedStoreIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
   SetTargetAtAddress(address,
-      (target->extra_ic_state() == kStrictMode)
+      (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
         ? initialize_stub_strict()
         : initialize_stub());
 }
@@ -368,15 +429,13 @@
 }
 
 
-static void LookupForRead(Object* object,
-                          String* name,
+static void LookupForRead(Handle<Object> object,
+                          Handle<String> name,
                           LookupResult* lookup) {
-  AssertNoAllocation no_gc;  // pointers must stay valid
-
   // Skip all the objects with named interceptors, but
   // without actual getter.
   while (true) {
-    object->Lookup(name, lookup);
+    object->Lookup(*name, lookup);
     // Besides normal conditions (property not found or it's not
     // an interceptor), bail out if lookup is not cacheable: we won't
     // be able to IC it anyway and regular lookup should work fine.
@@ -386,18 +445,18 @@
       return;
     }
 
-    JSObject* holder = lookup->holder();
-    if (HasInterceptorGetter(holder)) {
+    Handle<JSObject> holder(lookup->holder());
+    if (HasInterceptorGetter(*holder)) {
       return;
     }
 
-    holder->LocalLookupRealNamedProperty(name, lookup);
+    holder->LocalLookupRealNamedProperty(*name, lookup);
     if (lookup->IsProperty()) {
       ASSERT(lookup->type() != INTERCEPTOR);
       return;
     }
 
-    Object* proto = holder->GetPrototype();
+    Handle<Object> proto(holder->GetPrototype());
     if (proto->IsNull()) {
       lookup->NotFound();
       return;
@@ -408,31 +467,32 @@
 }
 
 
-Object* CallICBase::TryCallAsFunction(Object* object) {
-  HandleScope scope(isolate());
-  Handle<Object> target(object, isolate());
-  Handle<Object> delegate = Execution::GetFunctionDelegate(target);
+Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
+  Handle<Object> delegate = Execution::GetFunctionDelegate(object);
 
-  if (delegate->IsJSFunction()) {
+  if (delegate->IsJSFunction() && !object->IsJSFunctionProxy()) {
     // Patch the receiver and use the delegate as the function to
-    // invoke. This is used for invoking objects as if they were
-    // functions.
-    const int argc = this->target()->arguments_count();
+    // invoke. This is used for invoking objects as if they were functions.
+    const int argc = target()->arguments_count();
     StackFrameLocator locator;
     JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
     int index = frame->ComputeExpressionsCount() - (argc + 1);
-    frame->SetExpression(index, *target);
+    frame->SetExpression(index, *object);
   }
 
-  return *delegate;
+  return delegate;
 }
 
 
 void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
                                             Handle<Object> object) {
+  while (callee->IsJSFunctionProxy()) {
+    callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap());
+  }
+
   if (callee->IsJSFunction()) {
     Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
-    if (function->shared()->strict_mode() || function->IsBuiltin()) {
+    if (!function->shared()->is_classic_mode() || function->IsBuiltin()) {
       // Do not wrap receiver for strict mode functions or for builtins.
       return;
     }
@@ -464,31 +524,27 @@
   // the element if so.
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
-    Object* result;
-    { MaybeObject* maybe_result = object->GetElement(index);
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-
-    if (result->IsJSFunction()) return result;
+    Handle<Object> result = Object::GetElement(object, index);
+    RETURN_IF_EMPTY_HANDLE(isolate(), result);
+    if (result->IsJSFunction()) return *result;
 
     // Try to find a suitable function delegate for the object at hand.
     result = TryCallAsFunction(result);
-    if (result->IsJSFunction()) return result;
+    if (result->IsJSFunction()) return *result;
 
     // Otherwise, it will fail in the lookup step.
   }
 
   // Lookup the property in the object.
-  LookupResult lookup;
-  LookupForRead(*object, *name, &lookup);
+  LookupResult lookup(isolate());
+  LookupForRead(object, name, &lookup);
 
   if (!lookup.IsProperty()) {
     // If the object does not have the requested property, check which
     // exception we need to throw.
-    if (IsContextual(object)) {
-      return ReferenceError("not_defined", name);
-    }
-    return TypeError("undefined_method", object, name);
+    return IsContextual(object)
+        ? ReferenceError("not_defined", name)
+        : TypeError("undefined_method", object, name);
   }
 
   // Lookup is valid: Update inline cache and stub cache.
@@ -498,53 +554,42 @@
 
   // Get the property.
   PropertyAttributes attr;
-  Object* result;
-  { MaybeObject* maybe_result =
-        object->GetProperty(*object, &lookup, *name, &attr);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Object> result =
+      Object::GetProperty(object, object, &lookup, name, &attr);
+  RETURN_IF_EMPTY_HANDLE(isolate(), result);
 
-  if (lookup.type() == INTERCEPTOR) {
+  if (lookup.type() == INTERCEPTOR && attr == ABSENT) {
     // If the object does not have the requested property, check which
     // exception we need to throw.
-    if (attr == ABSENT) {
-      if (IsContextual(object)) {
-        return ReferenceError("not_defined", name);
-      }
-      return TypeError("undefined_method", object, name);
-    }
+    return IsContextual(object)
+        ? ReferenceError("not_defined", name)
+        : TypeError("undefined_method", object, name);
   }
 
   ASSERT(!result->IsTheHole());
 
-  HandleScope scope(isolate());
-  // Wrap result in a handle because ReceiverToObjectIfRequired may allocate
-  // new object and cause GC.
-  Handle<Object> result_handle(result);
   // Make receiver an object if the callee requires it. Strict mode or builtin
   // functions do not wrap the receiver, non-strict functions and objects
   // called as functions do.
-  ReceiverToObjectIfRequired(result_handle, object);
+  ReceiverToObjectIfRequired(result, object);
 
-  if (result_handle->IsJSFunction()) {
+  if (result->IsJSFunction()) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(result);
 #ifdef ENABLE_DEBUGGER_SUPPORT
     // Handle stepping into a function if step into is active.
     Debug* debug = isolate()->debug();
     if (debug->StepInActive()) {
       // Protect the result in a handle as the debugger can allocate and might
       // cause GC.
-      Handle<JSFunction> function(JSFunction::cast(*result_handle), isolate());
       debug->HandleStepIn(function, object, fp(), false);
-      return *function;
     }
 #endif
-
-    return *result_handle;
+    return *function;
   }
 
   // Try to find a suitable function delegate for the object at hand.
-  result_handle = Handle<Object>(TryCallAsFunction(*result_handle));
-  if (result_handle->IsJSFunction()) return *result_handle;
+  result = TryCallAsFunction(result);
+  if (result->IsJSFunction()) return *result;
 
   return TypeError("property_not_function", object, name);
 }
@@ -594,89 +639,57 @@
 }
 
 
-MaybeObject* CallICBase::ComputeMonomorphicStub(
-    LookupResult* lookup,
-    State state,
-    Code::ExtraICState extra_ic_state,
-    Handle<Object> object,
-    Handle<String> name) {
+Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
+                                                State state,
+                                                Code::ExtraICState extra_state,
+                                                Handle<Object> object,
+                                                Handle<String> name) {
   int argc = target()->arguments_count();
-  MaybeObject* maybe_code = NULL;
+  Handle<JSObject> holder(lookup->holder());
   switch (lookup->type()) {
     case FIELD: {
       int index = lookup->GetFieldIndex();
-      maybe_code = isolate()->stub_cache()->ComputeCallField(argc,
-                                                             kind_,
-                                                             extra_ic_state,
-                                                             *name,
-                                                             *object,
-                                                             lookup->holder(),
-                                                             index);
-      break;
+      return isolate()->stub_cache()->ComputeCallField(
+          argc, kind_, extra_state, name, object, holder, index);
     }
     case CONSTANT_FUNCTION: {
       // Get the constant function and compute the code stub for this
       // call; used for rewriting to monomorphic state and making sure
       // that the code stub is in the stub cache.
-      JSFunction* function = lookup->GetConstantFunction();
-      maybe_code =
-          isolate()->stub_cache()->ComputeCallConstant(argc,
-                                                       kind_,
-                                                       extra_ic_state,
-                                                       *name,
-                                                       *object,
-                                                       lookup->holder(),
-                                                       function);
-      break;
+      Handle<JSFunction> function(lookup->GetConstantFunction());
+      return isolate()->stub_cache()->ComputeCallConstant(
+          argc, kind_, extra_state, name, object, holder, function);
     }
     case NORMAL: {
-      if (!object->IsJSObject()) return NULL;
+      // If we return a null handle, the IC will not be patched.
+      if (!object->IsJSObject()) return Handle<Code>::null();
       Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
-      if (lookup->holder()->IsGlobalObject()) {
-        GlobalObject* global = GlobalObject::cast(lookup->holder());
-        JSGlobalPropertyCell* cell =
-            JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
-        if (!cell->value()->IsJSFunction()) return NULL;
-        JSFunction* function = JSFunction::cast(cell->value());
-        maybe_code = isolate()->stub_cache()->ComputeCallGlobal(argc,
-                                                                kind_,
-                                                                extra_ic_state,
-                                                                *name,
-                                                                *receiver,
-                                                                global,
-                                                                cell,
-                                                                function);
+      if (holder->IsGlobalObject()) {
+        Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+        Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+        if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
+        Handle<JSFunction> function(JSFunction::cast(cell->value()));
+        return isolate()->stub_cache()->ComputeCallGlobal(
+            argc, kind_, extra_state, name, receiver, global, cell, function);
       } else {
         // There is only one shared stub for calling normalized
         // properties. It does not traverse the prototype chain, so the
         // property must be found in the receiver for the stub to be
         // applicable.
-        if (lookup->holder() != *receiver) return NULL;
-        maybe_code = isolate()->stub_cache()->ComputeCallNormal(argc,
-                                                                kind_,
-                                                                extra_ic_state,
-                                                                *name,
-                                                                *receiver);
+        if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
+        return isolate()->stub_cache()->ComputeCallNormal(
+            argc, kind_, extra_state);
       }
       break;
     }
-    case INTERCEPTOR: {
-      ASSERT(HasInterceptorGetter(lookup->holder()));
-      maybe_code = isolate()->stub_cache()->ComputeCallInterceptor(
-          argc,
-          kind_,
-          extra_ic_state,
-          *name,
-          *object,
-          lookup->holder());
-      break;
-    }
+    case INTERCEPTOR:
+      ASSERT(HasInterceptorGetter(*holder));
+      return isolate()->stub_cache()->ComputeCallInterceptor(
+          argc, kind_, extra_state, name, object, holder);
     default:
-      maybe_code = NULL;
-      break;
+      return Handle<Code>::null();
   }
-  return maybe_code;
 }
 
 
@@ -698,75 +711,57 @@
 
   // Compute the number of arguments.
   int argc = target()->arguments_count();
-  MaybeObject* maybe_code = NULL;
   bool had_proto_failure = false;
+  Handle<Code> code;
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    maybe_code =
-        isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
-                                                           kind_,
-                                                           extra_ic_state);
+    code = isolate()->stub_cache()->ComputeCallPreMonomorphic(
+        argc, kind_, extra_ic_state);
   } else if (state == MONOMORPHIC) {
     if (kind_ == Code::CALL_IC &&
         TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
-      maybe_code = ComputeMonomorphicStub(lookup,
-                                          state,
-                                          extra_ic_state,
-                                          object,
-                                          name);
+      code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
+                                    object, name);
     } else if (kind_ == Code::CALL_IC &&
                TryRemoveInvalidPrototypeDependentStub(target(),
                                                       *object,
                                                       *name)) {
       had_proto_failure = true;
-      maybe_code = ComputeMonomorphicStub(lookup,
-                                          state,
-                                          extra_ic_state,
-                                          object,
-                                          name);
+      code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
+                                    object, name);
     } else {
-      maybe_code =
-          isolate()->stub_cache()->ComputeCallMegamorphic(argc,
-                                                          kind_,
-                                                          extra_ic_state);
+      code = isolate()->stub_cache()->ComputeCallMegamorphic(
+          argc, kind_, extra_ic_state);
     }
   } else {
-    maybe_code = ComputeMonomorphicStub(lookup,
-                                        state,
-                                        extra_ic_state,
-                                        object,
-                                        name);
+    code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
+                                  object, name);
   }
 
-  // If we're unable to compute the stub (not enough memory left), we
-  // simply avoid updating the caches.
-  Object* code;
-  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
+  // If there's no appropriate stub we simply avoid updating the caches.
+  if (code.is_null()) return;
 
   // Patch the call site depending on the state of the cache.
   if (state == UNINITIALIZED ||
       state == PREMONOMORPHIC ||
       state == MONOMORPHIC ||
       state == MONOMORPHIC_PROTOTYPE_FAILURE) {
-    set_target(Code::cast(code));
+    set_target(*code);
   } else if (state == MEGAMORPHIC) {
     // Cache code holding map should be consistent with
     // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
-    Map* map = JSObject::cast(object->IsJSObject() ? *object :
-                              object->GetPrototype())->map();
-
+    Handle<JSObject> cache_object = object->IsJSObject()
+        ? Handle<JSObject>::cast(object)
+        : Handle<JSObject>(JSObject::cast(object->GetPrototype()));
     // Update the stub cache.
-    isolate()->stub_cache()->Set(*name, map, Code::cast(code));
+    isolate()->stub_cache()->Set(*name, cache_object->map(), *code);
   }
 
-  USE(had_proto_failure);
-#ifdef DEBUG
   if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
-  TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
-          name, state, target());
-#endif
+  TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
+           name, state, target());
 }
 
 
@@ -786,34 +781,22 @@
 
   if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) {
     int argc = target()->arguments_count();
-    Heap* heap = Handle<HeapObject>::cast(object)->GetHeap();
-    Map* map = heap->non_strict_arguments_elements_map();
+    Handle<Map> map =
+        isolate()->factory()->non_strict_arguments_elements_map();
     if (object->IsJSObject() &&
-        Handle<JSObject>::cast(object)->elements()->map() == map) {
-      MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallArguments(
+        Handle<JSObject>::cast(object)->elements()->map() == *map) {
+      Handle<Code> code = isolate()->stub_cache()->ComputeCallArguments(
           argc, Code::KEYED_CALL_IC);
-      Object* code;
-      if (maybe_code->ToObject(&code)) {
-        set_target(Code::cast(code));
-#ifdef DEBUG
-        TraceIC("KeyedCallIC", key, state, target());
-#endif
-      }
-    } else if (FLAG_use_ic && state != MEGAMORPHIC &&
-               !object->IsAccessCheckNeeded()) {
-      MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
+      set_target(*code);
+      TRACE_IC("KeyedCallIC", key, state, target());
+    } else if (!object->IsAccessCheckNeeded()) {
+      Handle<Code> code = isolate()->stub_cache()->ComputeCallMegamorphic(
           argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
-      Object* code;
-      if (maybe_code->ToObject(&code)) {
-        set_target(Code::cast(code));
-#ifdef DEBUG
-        TraceIC("KeyedCallIC", key, state, target());
-#endif
-      }
+      set_target(*code);
+      TRACE_IC("KeyedCallIC", key, state, target());
     }
   }
 
-  HandleScope scope(isolate());
   Handle<Object> result = GetProperty(object, key);
   RETURN_IF_EMPTY_HANDLE(isolate(), result);
 
@@ -821,9 +804,9 @@
   // functions do not wrap the receiver, non-strict functions and objects
   // called as functions do.
   ReceiverToObjectIfRequired(result, object);
-
   if (result->IsJSFunction()) return *result;
-  result = Handle<Object>(TryCallAsFunction(*result));
+
+  result = TryCallAsFunction(result);
   if (result->IsJSFunction()) return *result;
 
   return TypeError("property_not_function", object, key);
@@ -846,53 +829,44 @@
     // the underlying string value.  See ECMA-262 15.5.5.1.
     if ((object->IsString() || object->IsStringWrapper()) &&
         name->Equals(isolate()->heap()->length_symbol())) {
-      AssertNoAllocation no_allocation;
-      Code* stub = NULL;
+      Handle<Code> stub;
       if (state == UNINITIALIZED) {
         stub = pre_monomorphic_stub();
       } else if (state == PREMONOMORPHIC) {
-        if (object->IsString()) {
-          stub = isolate()->builtins()->builtin(
-              Builtins::kLoadIC_StringLength);
-        } else {
-          stub = isolate()->builtins()->builtin(
-              Builtins::kLoadIC_StringWrapperLength);
-        }
+        stub = object->IsString()
+            ? isolate()->builtins()->LoadIC_StringLength()
+            : isolate()->builtins()->LoadIC_StringWrapperLength();
       } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
-        stub = isolate()->builtins()->builtin(
-            Builtins::kLoadIC_StringWrapperLength);
+        stub = isolate()->builtins()->LoadIC_StringWrapperLength();
       } else if (state != MEGAMORPHIC) {
         stub = megamorphic_stub();
       }
-      if (stub != NULL) {
-        set_target(stub);
+      if (!stub.is_null()) {
+        set_target(*stub);
 #ifdef DEBUG
         if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
 #endif
       }
       // Get the string if we have a string wrapper object.
-      if (object->IsJSValue()) {
-        return Smi::FromInt(
-            String::cast(Handle<JSValue>::cast(object)->value())->length());
-      }
-      return Smi::FromInt(String::cast(*object)->length());
+      Handle<Object> string = object->IsJSValue()
+          ? Handle<Object>(Handle<JSValue>::cast(object)->value())
+          : object;
+      return Smi::FromInt(String::cast(*string)->length());
     }
 
     // Use specialized code for getting the length of arrays.
     if (object->IsJSArray() &&
         name->Equals(isolate()->heap()->length_symbol())) {
-      AssertNoAllocation no_allocation;
-      Code* stub = NULL;
+      Handle<Code> stub;
       if (state == UNINITIALIZED) {
         stub = pre_monomorphic_stub();
       } else if (state == PREMONOMORPHIC) {
-        stub = isolate()->builtins()->builtin(
-            Builtins::kLoadIC_ArrayLength);
+        stub = isolate()->builtins()->LoadIC_ArrayLength();
       } else if (state != MEGAMORPHIC) {
         stub = megamorphic_stub();
       }
-      if (stub != NULL) {
-        set_target(stub);
+      if (!stub.is_null()) {
+        set_target(*stub);
 #ifdef DEBUG
         if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
 #endif
@@ -903,23 +877,20 @@
     // Use specialized code for getting prototype of functions.
     if (object->IsJSFunction() &&
         name->Equals(isolate()->heap()->prototype_symbol()) &&
-        JSFunction::cast(*object)->should_have_prototype()) {
-      { AssertNoAllocation no_allocation;
-        Code* stub = NULL;
-        if (state == UNINITIALIZED) {
-          stub = pre_monomorphic_stub();
-        } else if (state == PREMONOMORPHIC) {
-          stub = isolate()->builtins()->builtin(
-              Builtins::kLoadIC_FunctionPrototype);
-        } else if (state != MEGAMORPHIC) {
-          stub = megamorphic_stub();
-        }
-        if (stub != NULL) {
-          set_target(stub);
+        Handle<JSFunction>::cast(object)->should_have_prototype()) {
+      Handle<Code> stub;
+      if (state == UNINITIALIZED) {
+        stub = pre_monomorphic_stub();
+      } else if (state == PREMONOMORPHIC) {
+        stub = isolate()->builtins()->LoadIC_FunctionPrototype();
+      } else if (state != MEGAMORPHIC) {
+        stub = megamorphic_stub();
+      }
+      if (!stub.is_null()) {
+        set_target(*stub);
 #ifdef DEBUG
-          if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
+        if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
 #endif
-        }
       }
       return Accessors::FunctionGetPrototype(*object, 0);
     }
@@ -931,8 +902,8 @@
   if (name->AsArrayIndex(&index)) return object->GetElement(index);
 
   // Named lookup in the object.
-  LookupResult lookup;
-  LookupForRead(*object, *name, &lookup);
+  LookupResult lookup(isolate());
+  LookupForRead(object, name, &lookup);
 
   // If we did not find a property, check if we need to throw an exception.
   if (!lookup.IsProperty()) {
@@ -948,20 +919,18 @@
   }
 
   PropertyAttributes attr;
-  if (lookup.IsProperty() &&
+  if (lookup.IsFound() &&
       (lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
     // Get the property.
-    Object* result;
-    { MaybeObject* maybe_result =
-          object->GetProperty(*object, &lookup, *name, &attr);
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
+    Handle<Object> result =
+        Object::GetProperty(object, object, &lookup, name, &attr);
+    RETURN_IF_EMPTY_HANDLE(isolate(), result);
     // If the property is not present, check if we need to throw an
     // exception.
     if (attr == ABSENT && IsContextual(object)) {
       return ReferenceError("not_defined", name);
     }
-    return result;
+    return *result;
   }
 
   // Get the property.
@@ -984,120 +953,117 @@
   if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
 
   // Compute the code stub for this load.
-  MaybeObject* maybe_code = NULL;
-  Object* code;
+  Handle<Code> code;
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    maybe_code = pre_monomorphic_stub();
+    code = pre_monomorphic_stub();
   } else if (!lookup->IsProperty()) {
     // Nonexistent property. The result is undefined.
-    maybe_code = isolate()->stub_cache()->ComputeLoadNonexistent(*name,
-                                                                 *receiver);
+    code = isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver);
   } else {
     // Compute monomorphic stub.
+    Handle<JSObject> holder(lookup->holder());
     switch (lookup->type()) {
-      case FIELD: {
-        maybe_code = isolate()->stub_cache()->ComputeLoadField(
-            *name,
-            *receiver,
-            lookup->holder(),
-            lookup->GetFieldIndex());
+      case FIELD:
+        code = isolate()->stub_cache()->ComputeLoadField(
+            name, receiver, holder, lookup->GetFieldIndex());
         break;
-      }
       case CONSTANT_FUNCTION: {
-        Object* constant = lookup->GetConstantFunction();
-        maybe_code = isolate()->stub_cache()->ComputeLoadConstant(
-            *name, *receiver, lookup->holder(), constant);
+        Handle<JSFunction> constant(lookup->GetConstantFunction());
+        code = isolate()->stub_cache()->ComputeLoadConstant(
+            name, receiver, holder, constant);
         break;
       }
-      case NORMAL: {
-        if (lookup->holder()->IsGlobalObject()) {
-          GlobalObject* global = GlobalObject::cast(lookup->holder());
-          JSGlobalPropertyCell* cell =
-              JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
-          maybe_code = isolate()->stub_cache()->ComputeLoadGlobal(*name,
-                                                    *receiver,
-                                                    global,
-                                                    cell,
-                                                    lookup->IsDontDelete());
+      case NORMAL:
+        if (holder->IsGlobalObject()) {
+          Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+          Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+          code = isolate()->stub_cache()->ComputeLoadGlobal(
+              name, receiver, global, cell, lookup->IsDontDelete());
         } else {
           // There is only one shared stub for loading normalized
           // properties. It does not traverse the prototype chain, so the
           // property must be found in the receiver for the stub to be
           // applicable.
-          if (lookup->holder() != *receiver) return;
-          maybe_code = isolate()->stub_cache()->ComputeLoadNormal();
+          if (!holder.is_identical_to(receiver)) return;
+          code = isolate()->stub_cache()->ComputeLoadNormal();
         }
         break;
-      }
       case CALLBACKS: {
-        if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
-        AccessorInfo* callback =
-            AccessorInfo::cast(lookup->GetCallbackObject());
+        Handle<Object> callback_object(lookup->GetCallbackObject());
+        if (!callback_object->IsAccessorInfo()) return;
+        Handle<AccessorInfo> callback =
+            Handle<AccessorInfo>::cast(callback_object);
         if (v8::ToCData<Address>(callback->getter()) == 0) return;
-        maybe_code = isolate()->stub_cache()->ComputeLoadCallback(
-            *name, *receiver, lookup->holder(), callback);
+        code = isolate()->stub_cache()->ComputeLoadCallback(
+            name, receiver, holder, callback);
         break;
       }
-      case INTERCEPTOR: {
-        ASSERT(HasInterceptorGetter(lookup->holder()));
-        maybe_code = isolate()->stub_cache()->ComputeLoadInterceptor(
-            *name, *receiver, lookup->holder());
+      case INTERCEPTOR:
+        ASSERT(HasInterceptorGetter(*holder));
+        code = isolate()->stub_cache()->ComputeLoadInterceptor(
+            name, receiver, holder);
         break;
-      }
       default:
         return;
     }
   }
 
-  // If we're unable to compute the stub (not enough memory left), we
-  // simply avoid updating the caches.
-  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
   // Patch the call site depending on the state of the cache.
-  if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
+  if (state == UNINITIALIZED ||
+      state == PREMONOMORPHIC ||
       state == MONOMORPHIC_PROTOTYPE_FAILURE) {
-    set_target(Code::cast(code));
+    set_target(*code);
   } else if (state == MONOMORPHIC) {
-    set_target(megamorphic_stub());
+    // We are transitioning from monomorphic to megamorphic case.
+    // Place the current monomorphic stub and stub compiled for
+    // the receiver into stub cache.
+    Map* map = target()->FindFirstMap();
+    if (map != NULL) {
+      isolate()->stub_cache()->Set(*name, map, target());
+    }
+    isolate()->stub_cache()->Set(*name, receiver->map(), *code);
+
+    set_target(*megamorphic_stub());
   } else if (state == MEGAMORPHIC) {
     // Cache code holding map should be consistent with
     // GenerateMonomorphicCacheProbe.
-    Map* map = JSObject::cast(object->IsJSObject() ? *object :
-                              object->GetPrototype())->map();
-
-    isolate()->stub_cache()->Set(*name, map, Code::cast(code));
+    isolate()->stub_cache()->Set(*name, receiver->map(), *code);
   }
 
-#ifdef DEBUG
-  TraceIC("LoadIC", name, state, target());
-#endif
+  TRACE_IC("LoadIC", name, state, target());
 }
 
 
-MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck(
+Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
     bool is_js_array,
-    ElementsKind elements_kind) {
-  return KeyedLoadElementStub(elements_kind).TryGetCode();
+    ElementsKind elements_kind,
+    KeyedAccessGrowMode grow_mode) {
+  ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
+  return KeyedLoadElementStub(elements_kind).GetCode();
 }
 
 
-MaybeObject* KeyedLoadIC::ConstructMegamorphicStub(
-    MapList* receiver_maps,
-    CodeList* targets,
-    StrictModeFlag strict_mode) {
-  Object* object;
-  KeyedLoadStubCompiler compiler;
-  MaybeObject* maybe_code = compiler.CompileLoadMegamorphic(receiver_maps,
-                                                            targets);
-  if (!maybe_code->ToObject(&object)) return maybe_code;
+Handle<Code> KeyedLoadIC::ComputePolymorphicStub(
+    MapHandleList* receiver_maps,
+    StrictModeFlag strict_mode,
+    KeyedAccessGrowMode growth_mode) {
+  CodeHandleList handler_ics(receiver_maps->length());
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    Handle<Map> receiver_map = receiver_maps->at(i);
+    Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck(
+        receiver_map, strict_mode, growth_mode);
+    handler_ics.Add(cached_stub);
+  }
+  KeyedLoadStubCompiler compiler(isolate());
+  Handle<Code> code = compiler.CompileLoadPolymorphic(
+      receiver_maps, &handler_ics);
   isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
-  PROFILE(isolate(), CodeCreateEvent(
-      Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG,
-      Code::cast(object), 0));
-  return object;
+  PROFILE(isolate(),
+          CodeCreateEvent(Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG, *code, 0));
+  return code;
 }
 
 
@@ -1107,9 +1073,8 @@
                                bool force_generic_stub) {
   // Check for values that can be converted into a symbol.
   // TODO(1295): Remove this code.
-  HandleScope scope(isolate());
   if (key->IsHeapNumber() &&
-      isnan(HeapNumber::cast(*key)->value())) {
+      isnan(Handle<HeapNumber>::cast(key)->value())) {
     key = isolate()->factory()->nan_symbol();
   } else if (key->IsUndefined()) {
     key = isolate()->factory()->undefined_symbol();
@@ -1131,16 +1096,11 @@
       if (object->IsString() &&
           name->Equals(isolate()->heap()->length_symbol())) {
         Handle<String> string = Handle<String>::cast(object);
-        Object* code = NULL;
-        { MaybeObject* maybe_code =
-              isolate()->stub_cache()->ComputeKeyedLoadStringLength(*name,
-                                                                    *string);
-          if (!maybe_code->ToObject(&code)) return maybe_code;
-        }
-        set_target(Code::cast(code));
-#ifdef DEBUG
-        TraceIC("KeyedLoadIC", name, state, target());
-#endif  // DEBUG
+        Handle<Code> code =
+            isolate()->stub_cache()->ComputeKeyedLoadStringLength(name, string);
+        ASSERT(!code.is_null());
+        set_target(*code);
+        TRACE_IC("KeyedLoadIC", name, state, target());
         return Smi::FromInt(string->length());
       }
 
@@ -1148,34 +1108,25 @@
       if (object->IsJSArray() &&
           name->Equals(isolate()->heap()->length_symbol())) {
         Handle<JSArray> array = Handle<JSArray>::cast(object);
-        Object* code;
-        { MaybeObject* maybe_code =
-              isolate()->stub_cache()->ComputeKeyedLoadArrayLength(*name,
-                                                                   *array);
-          if (!maybe_code->ToObject(&code)) return maybe_code;
-        }
-        set_target(Code::cast(code));
-#ifdef DEBUG
-        TraceIC("KeyedLoadIC", name, state, target());
-#endif  // DEBUG
-        return JSArray::cast(*object)->length();
+        Handle<Code> code =
+            isolate()->stub_cache()->ComputeKeyedLoadArrayLength(name, array);
+        ASSERT(!code.is_null());
+        set_target(*code);
+        TRACE_IC("KeyedLoadIC", name, state, target());
+        return array->length();
       }
 
       // Use specialized code for getting prototype of functions.
       if (object->IsJSFunction() &&
           name->Equals(isolate()->heap()->prototype_symbol()) &&
-        JSFunction::cast(*object)->should_have_prototype()) {
+          Handle<JSFunction>::cast(object)->should_have_prototype()) {
         Handle<JSFunction> function = Handle<JSFunction>::cast(object);
-        Object* code;
-        { MaybeObject* maybe_code =
-              isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
-                  *name, *function);
-          if (!maybe_code->ToObject(&code)) return maybe_code;
-        }
-        set_target(Code::cast(code));
-#ifdef DEBUG
-        TraceIC("KeyedLoadIC", name, state, target());
-#endif  // DEBUG
+        Handle<Code> code =
+            isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
+                name, function);
+        ASSERT(!code.is_null());
+        set_target(*code);
+        TRACE_IC("KeyedLoadIC", name, state, target());
         return Accessors::FunctionGetPrototype(*object, 0);
       }
     }
@@ -1184,15 +1135,14 @@
     // the element or char if so.
     uint32_t index = 0;
     if (name->AsArrayIndex(&index)) {
-      HandleScope scope(isolate());
       // Rewrite to the generic keyed load stub.
-      if (FLAG_use_ic) set_target(generic_stub());
+      if (FLAG_use_ic) set_target(*generic_stub());
       return Runtime::GetElementOrCharAt(isolate(), object, index);
     }
 
     // Named lookup.
-    LookupResult lookup;
-    LookupForRead(*object, *name, &lookup);
+    LookupResult lookup(isolate());
+    LookupForRead(object, name, &lookup);
 
     // If we did not find a property, check if we need to throw an exception.
     if (!lookup.IsProperty() && IsContextual(object)) {
@@ -1204,19 +1154,17 @@
     }
 
     PropertyAttributes attr;
-    if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
+    if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
       // Get the property.
-      Object* result;
-      { MaybeObject* maybe_result =
-            object->GetProperty(*object, &lookup, *name, &attr);
-        if (!maybe_result->ToObject(&result)) return maybe_result;
-      }
+      Handle<Object> result =
+          Object::GetProperty(object, object, &lookup, name, &attr);
+      RETURN_IF_EMPTY_HANDLE(isolate(), result);
       // If the property is not present, check if we need to throw an
       // exception.
       if (attr == ABSENT && IsContextual(object)) {
         return ReferenceError("not_defined", name);
       }
-      return result;
+      return *result;
     }
 
     return object->GetProperty(*object, &lookup, *name, &attr);
@@ -1227,44 +1175,40 @@
   bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
 
   if (use_ic) {
-    Code* stub = generic_stub();
+    Handle<Code> stub = generic_stub();
     if (!force_generic_stub) {
       if (object->IsString() && key->IsNumber()) {
         if (state == UNINITIALIZED) {
           stub = string_stub();
         }
       } else if (object->IsJSObject()) {
-        JSObject* receiver = JSObject::cast(*object);
-        Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
-        Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
-        if (elements_map == heap->non_strict_arguments_elements_map()) {
+        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+        if (receiver->elements()->map() ==
+            isolate()->heap()->non_strict_arguments_elements_map()) {
           stub = non_strict_arguments_stub();
         } else if (receiver->HasIndexedInterceptor()) {
           stub = indexed_interceptor_stub();
-        } else if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
-          MaybeObject* maybe_stub = ComputeStub(receiver,
-                                                false,
-                                                kNonStrictMode,
-                                                stub);
-          stub = maybe_stub->IsFailure() ?
-              NULL : Code::cast(maybe_stub->ToObjectUnchecked());
+        } else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
+          stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub);
         }
       }
+    } else {
+      TRACE_GENERIC_IC("KeyedLoadIC", "force generic");
     }
-    if (stub != NULL) set_target(stub);
+    if (!stub.is_null()) set_target(*stub);
   }
 
-#ifdef DEBUG
-  TraceIC("KeyedLoadIC", key, state, target());
-#endif  // DEBUG
+  TRACE_IC("KeyedLoadIC", key, state, target());
 
   // Get the property.
   return Runtime::GetObjectProperty(isolate(), object, key);
 }
 
 
-void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
-                               Handle<Object> object, Handle<String> name) {
+void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
+                               State state,
+                               Handle<Object> object,
+                               Handle<String> name) {
   // Bail out if we didn't find a result.
   if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
@@ -1274,97 +1218,89 @@
   if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
 
   // Compute the code stub for this load.
-  MaybeObject* maybe_code = NULL;
-  Object* code;
+  Handle<Code> code;
 
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    maybe_code = pre_monomorphic_stub();
+    code = pre_monomorphic_stub();
   } else {
     // Compute a monomorphic stub.
+    Handle<JSObject> holder(lookup->holder());
     switch (lookup->type()) {
-      case FIELD: {
-        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadField(
-            *name, *receiver, lookup->holder(), lookup->GetFieldIndex());
+      case FIELD:
+        code = isolate()->stub_cache()->ComputeKeyedLoadField(
+            name, receiver, holder, lookup->GetFieldIndex());
         break;
-      }
       case CONSTANT_FUNCTION: {
-        Object* constant = lookup->GetConstantFunction();
-        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
-            *name, *receiver, lookup->holder(), constant);
+        Handle<JSFunction> constant(lookup->GetConstantFunction());
+        code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
+            name, receiver, holder, constant);
         break;
       }
       case CALLBACKS: {
-        if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
-        AccessorInfo* callback =
-            AccessorInfo::cast(lookup->GetCallbackObject());
+        Handle<Object> callback_object(lookup->GetCallbackObject());
+        if (!callback_object->IsAccessorInfo()) return;
+        Handle<AccessorInfo> callback =
+            Handle<AccessorInfo>::cast(callback_object);
         if (v8::ToCData<Address>(callback->getter()) == 0) return;
-        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
-            *name, *receiver, lookup->holder(), callback);
+        code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
+            name, receiver, holder, callback);
         break;
       }
-      case INTERCEPTOR: {
+      case INTERCEPTOR:
         ASSERT(HasInterceptorGetter(lookup->holder()));
-        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
-            *name, *receiver, lookup->holder());
+        code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
+            name, receiver, holder);
         break;
-      }
-      default: {
+      default:
         // Always rewrite to the generic case so that we do not
         // repeatedly try to rewrite.
-        maybe_code = generic_stub();
+        code = generic_stub();
         break;
-      }
     }
   }
 
-  // If we're unable to compute the stub (not enough memory left), we
-  // simply avoid updating the caches.
-  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
   // Patch the call site depending on the state of the cache.  Make
   // sure to always rewrite from monomorphic to megamorphic.
   ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
   if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
-    set_target(Code::cast(code));
+    set_target(*code);
   } else if (state == MONOMORPHIC) {
-    set_target(megamorphic_stub());
+    set_target(*megamorphic_stub());
   }
 
-#ifdef DEBUG
-  TraceIC("KeyedLoadIC", name, state, target());
-#endif
+  TRACE_IC("KeyedLoadIC", name, state, target());
 }
 
 
 static bool StoreICableLookup(LookupResult* lookup) {
   // Bail out if we didn't find a result.
-  if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return false;
+  if (!lookup->IsFound() || lookup->type() == NULL_DESCRIPTOR) return false;
 
-  // If the property is read-only, we leave the IC in its current
-  // state.
+  // Bail out if inline caching is not allowed.
+  if (!lookup->IsCacheable()) return false;
+
+  // If the property is read-only, we leave the IC in its current state.
   if (lookup->IsReadOnly()) return false;
 
   return true;
 }
 
 
-static bool LookupForWrite(JSReceiver* receiver,
-                           String* name,
+static bool LookupForWrite(Handle<JSObject> receiver,
+                           Handle<String> name,
                            LookupResult* lookup) {
-  receiver->LocalLookup(name, lookup);
+  receiver->LocalLookup(*name, lookup);
   if (!StoreICableLookup(lookup)) {
     return false;
   }
 
-  if (lookup->type() == INTERCEPTOR) {
-    JSObject* object = JSObject::cast(receiver);
-    if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
-      object->LocalLookupRealNamedProperty(name, lookup);
-      return StoreICableLookup(lookup);
-    }
+  if (lookup->type() == INTERCEPTOR &&
+      receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
+    receiver->LocalLookupRealNamedProperty(*name, lookup);
+    return StoreICableLookup(lookup);
   }
 
   return true;
@@ -1376,65 +1312,69 @@
                             Handle<Object> object,
                             Handle<String> name,
                             Handle<Object> value) {
-  // If the object is undefined or null it's illegal to try to set any
-  // properties on it; throw a TypeError in that case.
-  if (object->IsUndefined() || object->IsNull()) {
-    return TypeError("non_object_property_store", object, name);
-  }
+  if (!object->IsJSObject()) {
+    // Handle proxies.
+    if (object->IsJSProxy()) {
+      return JSProxy::cast(*object)->
+          SetProperty(*name, *value, NONE, strict_mode);
+    }
 
-  if (!object->IsJSReceiver()) {
+    // If the object is undefined or null it's illegal to try to set any
+    // properties on it; throw a TypeError in that case.
+    if (object->IsUndefined() || object->IsNull()) {
+      return TypeError("non_object_property_store", object, name);
+    }
+
     // The length property of string values is read-only. Throw in strict mode.
     if (strict_mode == kStrictMode && object->IsString() &&
         name->Equals(isolate()->heap()->length_symbol())) {
       return TypeError("strict_read_only_property", object, name);
     }
-    // Ignore stores where the receiver is not a JSObject.
+    // Ignore other stores where the receiver is not a JSObject.
+    // TODO(1475): Must check prototype chains of object wrappers.
     return *value;
   }
 
-  // Handle proxies.
-  if (object->IsJSProxy()) {
-    return JSReceiver::cast(*object)->
-        SetProperty(*name, *value, NONE, strict_mode);
-  }
-
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
   // Check if the given name is an array index.
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
-    HandleScope scope(isolate());
-    Handle<Object> result = SetElement(receiver, index, value, strict_mode);
-    if (result.is_null()) return Failure::Exception();
+    Handle<Object> result =
+        JSObject::SetElement(receiver, index, value, NONE, strict_mode);
+    RETURN_IF_EMPTY_HANDLE(isolate(), result);
     return *value;
   }
 
-  // Use specialized code for setting the length of arrays.
-  if (receiver->IsJSArray()
-      && name->Equals(isolate()->heap()->length_symbol())
-      && JSArray::cast(*receiver)->AllowsSetElementsLength()) {
+  // Use specialized code for setting the length of arrays with fast
+  // properties.  Slow properties might indicate redefinition of the
+  // length property.
+  if (receiver->IsJSArray() &&
+      name->Equals(isolate()->heap()->length_symbol()) &&
+      Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
+      receiver->HasFastProperties()) {
 #ifdef DEBUG
     if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
 #endif
-    Builtins::Name target = (strict_mode == kStrictMode)
-        ? Builtins::kStoreIC_ArrayLength_Strict
-        : Builtins::kStoreIC_ArrayLength;
-    set_target(isolate()->builtins()->builtin(target));
+    Handle<Code> stub = (strict_mode == kStrictMode)
+        ? isolate()->builtins()->StoreIC_ArrayLength_Strict()
+        : isolate()->builtins()->StoreIC_ArrayLength();
+    set_target(*stub);
     return receiver->SetProperty(*name, *value, NONE, strict_mode);
   }
 
   // Lookup the property locally in the receiver.
   if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
-    LookupResult lookup;
+    LookupResult lookup(isolate());
 
-    if (LookupForWrite(*receiver, *name, &lookup)) {
+    if (LookupForWrite(receiver, name, &lookup)) {
       // Generate a stub for this store.
       UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
     } else {
       // Strict mode doesn't allow setting non-existent global property
       // or an assignment to a read only property.
       if (strict_mode == kStrictMode) {
-        if (lookup.IsFound() && lookup.IsReadOnly()) {
+        if (lookup.IsProperty() && lookup.IsReadOnly()) {
           return TypeError("strict_read_only_property", object, name);
         } else if (IsContextual(object)) {
           return ReferenceError("not_defined", name);
@@ -1444,16 +1384,15 @@
   }
 
   if (receiver->IsJSGlobalProxy()) {
+    // TODO(ulan): find out why we patch this site even with --no-use-ic
     // Generate a generic stub that goes to the runtime when we see a global
     // proxy as receiver.
-    Code* stub = (strict_mode == kStrictMode)
+    Handle<Code> stub = (strict_mode == kStrictMode)
         ? global_proxy_stub_strict()
         : global_proxy_stub();
-    if (target() != stub) {
-      set_target(stub);
-#ifdef DEBUG
-      TraceIC("StoreIC", name, state, target());
-#endif
+    if (target() != *stub) {
+      set_target(*stub);
+      TRACE_IC("StoreIC", name, state, target());
     }
   }
 
@@ -1468,10 +1407,12 @@
                            Handle<JSObject> receiver,
                            Handle<String> name,
                            Handle<Object> value) {
-  // Skip JSGlobalProxy.
   ASSERT(!receiver->IsJSGlobalProxy());
-
   ASSERT(StoreICableLookup(lookup));
+  // These are not cacheable, so we never see such LookupResults here.
+  ASSERT(lookup->type() != HANDLER);
+  // We get only called for properties or transitions, see StoreICableLookup.
+  ASSERT(lookup->type() != NULL_DESCRIPTOR);
 
   // If the property has a non-field type allowing map transitions
   // where there is extra room in the object, we leave the IC in its
@@ -1481,89 +1422,87 @@
   // Compute the code stub for this store; used for rewriting to
   // monomorphic state and making sure that the code stub is in the
   // stub cache.
-  MaybeObject* maybe_code = NULL;
-  Object* code = NULL;
+  Handle<Code> code;
   switch (type) {
-    case FIELD: {
-      maybe_code = isolate()->stub_cache()->ComputeStoreField(
-          *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
+    case FIELD:
+      code = isolate()->stub_cache()->ComputeStoreField(name,
+                                                        receiver,
+                                                        lookup->GetFieldIndex(),
+                                                        Handle<Map>::null(),
+                                                        strict_mode);
       break;
-    }
     case MAP_TRANSITION: {
       if (lookup->GetAttributes() != NONE) return;
-      HandleScope scope(isolate());
-      ASSERT(type == MAP_TRANSITION);
       Handle<Map> transition(lookup->GetTransitionMap());
       int index = transition->PropertyIndexFor(*name);
-      maybe_code = isolate()->stub_cache()->ComputeStoreField(
-          *name, *receiver, index, *transition, strict_mode);
+      code = isolate()->stub_cache()->ComputeStoreField(
+          name, receiver, index, transition, strict_mode);
       break;
     }
-    case NORMAL: {
+    case NORMAL:
       if (receiver->IsGlobalObject()) {
         // The stub generated for the global object picks the value directly
         // from the property cell. So the property must be directly on the
         // global object.
         Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
-        JSGlobalPropertyCell* cell =
-            JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
-        maybe_code = isolate()->stub_cache()->ComputeStoreGlobal(
-            *name, *global, cell, strict_mode);
+        Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+        code = isolate()->stub_cache()->ComputeStoreGlobal(
+            name, global, cell, strict_mode);
       } else {
         if (lookup->holder() != *receiver) return;
-        maybe_code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
+        code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
       }
       break;
-    }
     case CALLBACKS: {
-      if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+      Handle<Object> callback_object(lookup->GetCallbackObject());
+      if (!callback_object->IsAccessorInfo()) return;
+      Handle<AccessorInfo> callback =
+          Handle<AccessorInfo>::cast(callback_object);
       if (v8::ToCData<Address>(callback->setter()) == 0) return;
-      maybe_code = isolate()->stub_cache()->ComputeStoreCallback(
-          *name, *receiver, callback, strict_mode);
+      code = isolate()->stub_cache()->ComputeStoreCallback(
+          name, receiver, callback, strict_mode);
       break;
     }
-    case INTERCEPTOR: {
+    case INTERCEPTOR:
       ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
-      maybe_code = isolate()->stub_cache()->ComputeStoreInterceptor(
-          *name, *receiver, strict_mode);
+      code = isolate()->stub_cache()->ComputeStoreInterceptor(
+          name, receiver, strict_mode);
       break;
-    }
-    default:
+    case CONSTANT_FUNCTION:
+    case CONSTANT_TRANSITION:
+    case ELEMENTS_TRANSITION:
+      return;
+    case HANDLER:
+    case NULL_DESCRIPTOR:
+      UNREACHABLE();
       return;
   }
 
-  // If we're unable to compute the stub (not enough memory left), we
-  // simply avoid updating the caches.
-  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
   // Patch the call site depending on the state of the cache.
   if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
-    set_target(Code::cast(code));
+    set_target(*code);
   } else if (state == MONOMORPHIC) {
     // Only move to megamorphic if the target changes.
-    if (target() != Code::cast(code)) {
+    if (target() != *code) {
       set_target((strict_mode == kStrictMode)
                    ? megamorphic_stub_strict()
                    : megamorphic_stub());
     }
   } else if (state == MEGAMORPHIC) {
     // Update the stub cache.
-    isolate()->stub_cache()->Set(*name,
-                                 receiver->map(),
-                                 Code::cast(code));
+    isolate()->stub_cache()->Set(*name, receiver->map(), *code);
   }
 
-#ifdef DEBUG
-  TraceIC("StoreIC", name, state, target());
-#endif
+  TRACE_IC("StoreIC", name, state, target());
 }
 
 
-static bool AddOneReceiverMapIfMissing(MapList* receiver_maps,
-                                       Map* new_receiver_map) {
+static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
+                                       Handle<Map> new_receiver_map) {
+  ASSERT(!new_receiver_map.is_null());
   for (int current = 0; current < receiver_maps->length(); ++current) {
-    if (receiver_maps->at(current) == new_receiver_map) {
+    if (!receiver_maps->at(current).is_null() &&
+        receiver_maps->at(current).is_identical_to(new_receiver_map)) {
       return false;
     }
   }
@@ -1572,159 +1511,261 @@
 }
 
 
-void KeyedIC::GetReceiverMapsForStub(Code* stub, MapList* result) {
+void KeyedIC::GetReceiverMapsForStub(Handle<Code> stub,
+                                     MapHandleList* result) {
   ASSERT(stub->is_inline_cache_stub());
-  if (stub == string_stub()) {
-    return result->Add(isolate()->heap()->string_map());
+  if (!string_stub().is_null() && stub.is_identical_to(string_stub())) {
+    return result->Add(isolate()->factory()->string_map());
   } else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) {
     if (stub->ic_state() == MONOMORPHIC) {
-      result->Add(Map::cast(stub->FindFirstMap()));
+      result->Add(Handle<Map>(stub->FindFirstMap()));
     } else {
       ASSERT(stub->ic_state() == MEGAMORPHIC);
       AssertNoAllocation no_allocation;
       int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-      for (RelocIterator it(stub, mask); !it.done(); it.next()) {
+      for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
         RelocInfo* info = it.rinfo();
-        Object* object = info->target_object();
+        Handle<Object> object(info->target_object());
         ASSERT(object->IsMap());
-        result->Add(Map::cast(object));
+        AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object));
       }
     }
   }
 }
 
 
-MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
-                                  bool is_store,
+Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
+                                  StubKind stub_kind,
                                   StrictModeFlag strict_mode,
-                                  Code* generic_stub) {
+                                  Handle<Code> generic_stub) {
   State ic_state = target()->ic_state();
-  if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
-    Code* monomorphic_stub;
-    MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
-                                                     is_store,
-                                                     strict_mode,
-                                                     generic_stub);
-    if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
-
-    return monomorphic_stub;
-  }
-  ASSERT(target() != generic_stub);
+  KeyedAccessGrowMode grow_mode = IsGrowStubKind(stub_kind)
+      ? ALLOW_JSARRAY_GROWTH
+      : DO_NOT_ALLOW_JSARRAY_GROWTH;
 
   // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
   // via megamorphic stubs, since they don't have a map in their relocation info
   // and so the stubs can't be harvested for the object needed for a map check.
   if (target()->type() != NORMAL) {
+    TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type");
     return generic_stub;
   }
 
+  bool monomorphic = false;
+  MapHandleList target_receiver_maps;
+  if (ic_state != UNINITIALIZED && ic_state != PREMONOMORPHIC) {
+    GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
+  }
+  if (!IsTransitionStubKind(stub_kind)) {
+    if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+      monomorphic = true;
+    } else {
+      if (ic_state == MONOMORPHIC) {
+        // The first time a receiver is seen that is a transitioned version of
+        // the previous monomorphic receiver type, assume the new ElementsKind
+        // is the monomorphic type. This benefits global arrays that only
+        // transition once, and all call sites accessing them are faster if they
+        // remain monomorphic. If this optimistic assumption is not true, the IC
+        // will miss again and it will become polymorphic and support both the
+        // untransitioned and transitioned maps.
+        monomorphic = IsMoreGeneralElementsKindTransition(
+            target_receiver_maps.at(0)->elements_kind(),
+            receiver->GetElementsKind());
+      }
+    }
+  }
+
+  if (monomorphic) {
+    return ComputeMonomorphicStub(
+        receiver, stub_kind, strict_mode, generic_stub);
+  }
+  ASSERT(target() != *generic_stub);
+
   // Determine the list of receiver maps that this call site has seen,
   // adding the map that was just encountered.
-  MapList target_receiver_maps;
-  GetReceiverMapsForStub(target(), &target_receiver_maps);
-  if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map())) {
-    // If the miss wasn't due to an unseen map, a MEGAMORPHIC stub
+  Handle<Map> receiver_map(receiver->map());
+  bool map_added =
+      AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
+  if (IsTransitionStubKind(stub_kind)) {
+    Handle<Map> new_map = ComputeTransitionedMap(receiver, stub_kind);
+    map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, new_map);
+  }
+  if (!map_added) {
+    // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the generic stub.
+    TRACE_GENERIC_IC("KeyedIC", "same map added twice");
     return generic_stub;
   }
 
   // If the maximum number of receiver maps has been exceeded, use the generic
   // version of the IC.
   if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+    TRACE_GENERIC_IC("KeyedIC", "max polymorph exceeded");
     return generic_stub;
   }
 
-  PolymorphicCodeCache* cache = isolate()->heap()->polymorphic_code_cache();
-  Code::Flags flags = Code::ComputeFlags(this->kind(),
-                                         MEGAMORPHIC,
-                                         strict_mode);
-  Object* maybe_cached_stub = cache->Lookup(&target_receiver_maps, flags);
-  // If there is a cached stub, use it.
-  if (!maybe_cached_stub->IsUndefined()) {
-    ASSERT(maybe_cached_stub->IsCode());
-    return Code::cast(maybe_cached_stub);
+  if ((Code::GetKeyedAccessGrowMode(target()->extra_ic_state()) ==
+       ALLOW_JSARRAY_GROWTH)) {
+    grow_mode = ALLOW_JSARRAY_GROWTH;
   }
-  // Collect MONOMORPHIC stubs for all target_receiver_maps.
-  CodeList handler_ics(target_receiver_maps.length());
-  for (int i = 0; i < target_receiver_maps.length(); ++i) {
-    Map* receiver_map(target_receiver_maps.at(i));
-    MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
-        receiver_map, strict_mode);
-    Code* cached_stub;
-    if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
-    handler_ics.Add(cached_stub);
-  }
-  // Build the MEGAMORPHIC stub.
-  Code* stub;
-  MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
-                                                     &handler_ics,
-                                                     strict_mode);
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
-  if (maybe_update->IsFailure()) return maybe_update;
+
+  Handle<PolymorphicCodeCache> cache =
+      isolate()->factory()->polymorphic_code_cache();
+  Code::ExtraICState extra_state = Code::ComputeExtraICState(grow_mode,
+                                                             strict_mode);
+  Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, extra_state);
+  Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags);
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  Handle<Code> stub =
+      ComputePolymorphicStub(&target_receiver_maps, strict_mode, grow_mode);
+  PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub);
   return stub;
 }
 
 
-MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
-    Map* receiver_map,
-    StrictModeFlag strict_mode) {
+Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
+    Handle<Map> receiver_map,
+    StrictModeFlag strict_mode,
+    KeyedAccessGrowMode grow_mode) {
   if ((receiver_map->instance_type() & kNotStringTag) == 0) {
-    ASSERT(string_stub() != NULL);
+    ASSERT(!string_stub().is_null());
     return string_stub();
   } else {
     ASSERT(receiver_map->has_dictionary_elements() ||
            receiver_map->has_fast_elements() ||
+           receiver_map->has_fast_smi_only_elements() ||
            receiver_map->has_fast_double_elements() ||
            receiver_map->has_external_array_elements());
     bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
     return GetElementStubWithoutMapCheck(is_js_array,
-                                         receiver_map->elements_kind());
+                                         receiver_map->elements_kind(),
+                                         grow_mode);
   }
 }
 
 
-MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
-                                             bool is_store,
+Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<JSObject> receiver,
+                                             StubKind stub_kind,
                                              StrictModeFlag strict_mode,
-                                             Code* generic_stub) {
-  Code* result = NULL;
+                                             Handle<Code> generic_stub) {
   if (receiver->HasFastElements() ||
+      receiver->HasFastSmiOnlyElements() ||
       receiver->HasExternalArrayElements() ||
       receiver->HasFastDoubleElements() ||
       receiver->HasDictionaryElements()) {
-    MaybeObject* maybe_stub =
-        isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
-            receiver, is_store, strict_mode);
-    if (!maybe_stub->To(&result)) return maybe_stub;
+    return isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
+        receiver, stub_kind, strict_mode);
   } else {
-    result = generic_stub;
+    return generic_stub;
   }
-  return result;
 }
 
 
-MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
+Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
+                                            StubKind stub_kind) {
+  switch (stub_kind) {
+    case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT:
+    case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT:
+    case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
+    case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
+      return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
+      break;
+    case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
+    case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
+      return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
+      break;
+    default:
+      UNREACHABLE();
+      return Handle<Map>::null();
+  }
+}
+
+
+Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck(
     bool is_js_array,
-    ElementsKind elements_kind) {
-  return KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
+    ElementsKind elements_kind,
+    KeyedAccessGrowMode grow_mode) {
+  return KeyedStoreElementStub(is_js_array, elements_kind, grow_mode).GetCode();
 }
 
 
-MaybeObject* KeyedStoreIC::ConstructMegamorphicStub(
-    MapList* receiver_maps,
-    CodeList* targets,
-    StrictModeFlag strict_mode) {
-  Object* object;
-  KeyedStoreStubCompiler compiler(strict_mode);
-  MaybeObject* maybe_code = compiler.CompileStoreMegamorphic(receiver_maps,
-                                                             targets);
-  if (!maybe_code->ToObject(&object)) return maybe_code;
+Handle<Code> KeyedStoreIC::ComputePolymorphicStub(
+    MapHandleList* receiver_maps,
+    StrictModeFlag strict_mode,
+    KeyedAccessGrowMode grow_mode) {
+  // Collect MONOMORPHIC stubs for all target_receiver_maps.
+  CodeHandleList handler_ics(receiver_maps->length());
+  MapHandleList transitioned_maps(receiver_maps->length());
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    Handle<Map> receiver_map(receiver_maps->at(i));
+    Handle<Code> cached_stub;
+    Handle<Map> transitioned_map =
+        receiver_map->FindTransitionedMap(receiver_maps);
+    if (!transitioned_map.is_null()) {
+      cached_stub = ElementsTransitionAndStoreStub(
+          receiver_map->elements_kind(),  // original elements_kind
+          transitioned_map->elements_kind(),
+          receiver_map->instance_type() == JS_ARRAY_TYPE,  // is_js_array
+          strict_mode, grow_mode).GetCode();
+    } else {
+      cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map,
+                                                          strict_mode,
+                                                          grow_mode);
+    }
+    ASSERT(!cached_stub.is_null());
+    handler_ics.Add(cached_stub);
+    transitioned_maps.Add(transitioned_map);
+  }
+  KeyedStoreStubCompiler compiler(isolate(), strict_mode, grow_mode);
+  Handle<Code> code = compiler.CompileStorePolymorphic(
+      receiver_maps, &handler_ics, &transitioned_maps);
   isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
-  PROFILE(isolate(), CodeCreateEvent(
-      Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG,
-      Code::cast(object), 0));
-  return object;
+  PROFILE(isolate(),
+          CodeCreateEvent(Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG, *code, 0));
+  return code;
+}
+
+
+KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
+                                            Handle<Object> key,
+                                            Handle<Object> value) {
+  ASSERT(key->IsSmi());
+  int index = Smi::cast(*key)->value();
+  bool allow_growth = receiver->IsJSArray() &&
+      JSArray::cast(*receiver)->length()->IsSmi() &&
+      index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
+
+  if (allow_growth) {
+    // Handle growing array in stub if necessary.
+    if (receiver->HasFastSmiOnlyElements()) {
+      if (value->IsHeapNumber()) {
+        return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
+      }
+      if (value->IsHeapObject()) {
+        return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
+      }
+    } else if (receiver->HasFastDoubleElements()) {
+      if (!value->IsSmi() && !value->IsHeapNumber()) {
+        return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
+      }
+    }
+    return STORE_AND_GROW_NO_TRANSITION;
+  } else {
+    // Handle only in-bounds elements accesses.
+    if (receiver->HasFastSmiOnlyElements()) {
+      if (value->IsHeapNumber()) {
+        return STORE_TRANSITION_SMI_TO_DOUBLE;
+      } else if (value->IsHeapObject()) {
+        return STORE_TRANSITION_SMI_TO_OBJECT;
+      }
+    } else if (receiver->HasFastDoubleElements()) {
+      if (!value->IsSmi() && !value->IsHeapNumber()) {
+        return STORE_TRANSITION_DOUBLE_TO_OBJECT;
+      }
+    }
+    return STORE_NO_TRANSITION;
+  }
 }
 
 
@@ -1737,6 +1778,12 @@
   if (key->IsSymbol()) {
     Handle<String> name = Handle<String>::cast(key);
 
+    // Handle proxies.
+    if (object->IsJSProxy()) {
+      return JSProxy::cast(*object)->SetProperty(
+          *name, *value, NONE, strict_mode);
+    }
+
     // If the object is undefined or null it's illegal to try to set any
     // properties on it; throw a TypeError in that case.
     if (object->IsUndefined() || object->IsNull()) {
@@ -1750,19 +1797,18 @@
     // Check if the given name is an array index.
     uint32_t index;
     if (name->AsArrayIndex(&index)) {
-      HandleScope scope(isolate());
-      Handle<Object> result = SetElement(receiver, index, value, strict_mode);
-      if (result.is_null()) return Failure::Exception();
+      Handle<Object> result =
+          JSObject::SetElement(receiver, index, value, NONE, strict_mode);
+      RETURN_IF_EMPTY_HANDLE(isolate(), result);
       return *value;
     }
 
-    // Lookup the property locally in the receiver.
-    LookupResult lookup;
-    receiver->LocalLookup(*name, &lookup);
-
     // Update inline cache and stub cache.
-    if (FLAG_use_ic) {
-      UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+    if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
+      LookupResult lookup(isolate());
+      if (LookupForWrite(receiver, name, &lookup)) {
+        UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+      }
     }
 
     // Set the property.
@@ -1775,33 +1821,27 @@
   ASSERT(!(use_ic && object->IsJSGlobalProxy()));
 
   if (use_ic) {
-    Code* stub = (strict_mode == kStrictMode)
+    Handle<Code> stub = (strict_mode == kStrictMode)
         ? generic_stub_strict()
         : generic_stub();
     if (object->IsJSObject()) {
-      JSObject* receiver = JSObject::cast(*object);
-      Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
-      Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
-      if (elements_map == heap->non_strict_arguments_elements_map()) {
+      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+      if (receiver->elements()->map() ==
+          isolate()->heap()->non_strict_arguments_elements_map()) {
         stub = non_strict_arguments_stub();
       } else if (!force_generic) {
-        if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
-          HandleScope scope(isolate());
-          MaybeObject* maybe_stub = ComputeStub(receiver,
-                                                true,
-                                                strict_mode,
-                                                stub);
-          stub = maybe_stub->IsFailure() ?
-              NULL : Code::cast(maybe_stub->ToObjectUnchecked());
+        if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
+          StubKind stub_kind = GetStubKind(receiver, key, value);
+          stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
         }
+      } else {
+        TRACE_GENERIC_IC("KeyedStoreIC", "force generic");
       }
     }
-    if (stub != NULL) set_target(stub);
+    if (!stub.is_null()) set_target(*stub);
   }
 
-#ifdef DEBUG
-  TraceIC("KeyedStoreIC", key, state, target());
-#endif
+  TRACE_IC("KeyedStoreIC", key, state, target());
 
   // Set the property.
   return Runtime::SetObjectProperty(
@@ -1815,15 +1855,12 @@
                                 Handle<JSObject> receiver,
                                 Handle<String> name,
                                 Handle<Object> value) {
-  // Skip JSGlobalProxy.
-  if (receiver->IsJSGlobalProxy()) return;
-
-  // Bail out if we didn't find a result.
-  if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return;
-
-  // If the property is read-only, we leave the IC in its current
-  // state.
-  if (lookup->IsReadOnly()) return;
+  ASSERT(!receiver->IsJSGlobalProxy());
+  ASSERT(StoreICableLookup(lookup));
+  // These are not cacheable, so we never see such LookupResults here.
+  ASSERT(lookup->type() != HANDLER);
+  // We get only called for properties or transitions, see StoreICableLookup.
+  ASSERT(lookup->type() != NULL_DESCRIPTOR);
 
   // If the property has a non-field type allowing map transitions
   // where there is extra room in the object, we leave the IC in its
@@ -1833,75 +1870,68 @@
   // Compute the code stub for this store; used for rewriting to
   // monomorphic state and making sure that the code stub is in the
   // stub cache.
-  MaybeObject* maybe_code = NULL;
-  Object* code = NULL;
+  Handle<Code> code;
 
   switch (type) {
-    case FIELD: {
-      maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
-          *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
+    case FIELD:
+      code = isolate()->stub_cache()->ComputeKeyedStoreField(
+          name, receiver, lookup->GetFieldIndex(),
+          Handle<Map>::null(), strict_mode);
       break;
-    }
-    case MAP_TRANSITION: {
+    case MAP_TRANSITION:
       if (lookup->GetAttributes() == NONE) {
-        HandleScope scope(isolate());
-        ASSERT(type == MAP_TRANSITION);
         Handle<Map> transition(lookup->GetTransitionMap());
         int index = transition->PropertyIndexFor(*name);
-        maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
-            *name, *receiver, index, *transition, strict_mode);
+        code = isolate()->stub_cache()->ComputeKeyedStoreField(
+            name, receiver, index, transition, strict_mode);
         break;
       }
       // fall through.
-    }
-    default: {
+    case NORMAL:
+    case CONSTANT_FUNCTION:
+    case CALLBACKS:
+    case INTERCEPTOR:
+    case CONSTANT_TRANSITION:
+    case ELEMENTS_TRANSITION:
       // Always rewrite to the generic case so that we do not
       // repeatedly try to rewrite.
-      maybe_code = (strict_mode == kStrictMode)
+      code = (strict_mode == kStrictMode)
           ? generic_stub_strict()
           : generic_stub();
       break;
-    }
+    case HANDLER:
+    case NULL_DESCRIPTOR:
+      UNREACHABLE();
+      return;
   }
 
-  // If we're unable to compute the stub (not enough memory left), we
-  // simply avoid updating the caches.
-  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
+  ASSERT(!code.is_null());
 
   // Patch the call site depending on the state of the cache.  Make
   // sure to always rewrite from monomorphic to megamorphic.
   ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
   if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
-    set_target(Code::cast(code));
+    set_target(*code);
   } else if (state == MONOMORPHIC) {
     set_target((strict_mode == kStrictMode)
-                 ? megamorphic_stub_strict()
-                 : megamorphic_stub());
+                 ? *megamorphic_stub_strict()
+                 : *megamorphic_stub());
   }
 
-#ifdef DEBUG
-  TraceIC("KeyedStoreIC", name, state, target());
-#endif
+  TRACE_IC("KeyedStoreIC", name, state, target());
 }
 
 
+#undef TRACE_IC
+
+
 // ----------------------------------------------------------------------------
 // Static IC stub generators.
 //
 
-static JSFunction* CompileFunction(Isolate* isolate,
-                                   JSFunction* function) {
-  // Compile now with optimization.
-  HandleScope scope(isolate);
-  Handle<JSFunction> function_handle(function, isolate);
-  CompileLazy(function_handle, CLEAR_EXCEPTION);
-  return *function_handle;
-}
-
-
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CallIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1910,45 +1940,46 @@
                                               extra_ic_state,
                                               args.at<Object>(0),
                                               args.at<String>(1));
-  Object* result;
-  if (!maybe_result->ToObject(&result)) return maybe_result;
+  // Result could be a function or a failure.
+  JSFunction* raw_function = NULL;
+  if (!maybe_result->To(&raw_function)) return maybe_result;
 
   // The first time the inline cache is updated may be the first time the
-  // function it references gets called.  If the function was lazily compiled
+  // function it references gets called.  If the function is lazily compiled
   // then the first call will trigger a compilation.  We check for this case
   // and we do the compilation immediately, instead of waiting for the stub
-  // currently attached to the JSFunction object to trigger compilation.  We
-  // do this in the case where we know that the inline cache is inside a loop,
-  // because then we know that we want to optimize the function.
-  if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
-    return result;
-  }
-  return CompileFunction(isolate, JSFunction::cast(result));
+  // currently attached to the JSFunction object to trigger compilation.
+  if (raw_function->is_compiled()) return raw_function;
+
+  Handle<JSFunction> function(raw_function);
+  JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+  return *function;
 }
 
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   KeyedCallIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
-  Object* result;
-  { MaybeObject* maybe_result =
+  MaybeObject* maybe_result =
       ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  // Result could be a function or a failure.
+  JSFunction* raw_function = NULL;
+  if (!maybe_result->To(&raw_function)) return maybe_result;
 
-  if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
-    return result;
-  }
-  return CompileFunction(isolate, JSFunction::cast(result));
+  if (raw_function->is_compiled()) return raw_function;
+
+  Handle<JSFunction> function(raw_function);
+  JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+  return *function;
 }
 
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   LoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1958,7 +1989,7 @@
 
 // Used from ic-<arch>.cc
 RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   KeyedLoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1967,7 +1998,7 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   KeyedLoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1977,13 +2008,13 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope;
   ASSERT(args.length() == 3);
   StoreIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
   return ic.Store(state,
-                  static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
+                  Code::GetStrictMode(extra_ic_state),
                   args.at<Object>(0),
                   args.at<String>(1),
                   args.at<Object>(2));
@@ -1994,12 +2025,19 @@
   NoHandleAllocation nha;
 
   ASSERT(args.length() == 2);
-  JSObject* receiver = JSObject::cast(args[0]);
+  JSArray* receiver = JSArray::cast(args[0]);
   Object* len = args[1];
 
   // The generated code should filter out non-Smis before we get here.
   ASSERT(len->IsSmi());
 
+#ifdef DEBUG
+  // The length property has to be a writable callback property.
+  LookupResult debug_lookup(isolate);
+  receiver->LocalLookup(isolate->heap()->length_symbol(), &debug_lookup);
+  ASSERT(debug_lookup.type() == CALLBACKS && !debug_lookup.IsReadOnly());
+#endif
+
   Object* result;
   { MaybeObject* maybe_result = receiver->SetElementsLength(len);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2046,13 +2084,13 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   KeyedStoreIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
   return ic.Store(state,
-                  static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
+                  Code::GetStrictMode(extra_ic_state),
                   args.at<Object>(0),
                   args.at<Object>(1),
                   args.at<Object>(2),
@@ -2068,8 +2106,7 @@
   Handle<Object> object = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
   Handle<Object> value = args.at<Object>(2);
-  StrictModeFlag strict_mode =
-      static_cast<StrictModeFlag>(extra_ic_state & kStrictMode);
+  StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
   return Runtime::SetObjectProperty(isolate,
                                     object,
                                     key,
@@ -2080,13 +2117,13 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   KeyedStoreIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
   return ic.Store(state,
-                  static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
+                  Code::GetStrictMode(extra_ic_state),
                   args.at<Object>(0),
                   args.at<Object>(1),
                   args.at<Object>(2),
@@ -2402,7 +2439,7 @@
   Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
 
   bool caught_exception;
-  Object** builtin_args[] = { right.location() };
+  Handle<Object> builtin_args[] = { right };
   Handle<Object> result = Execution::Call(builtin_function,
                                           left,
                                           ARRAY_SIZE(builtin_args),
@@ -2435,6 +2472,7 @@
     case SMIS: return "SMIS";
     case HEAP_NUMBERS: return "HEAP_NUMBERS";
     case OBJECTS: return "OBJECTS";
+    case KNOWN_OBJECTS: return "OBJECTS";
     case SYMBOLS: return "SYMBOLS";
     case STRINGS: return "STRINGS";
     case GENERIC: return "GENERIC";
@@ -2449,19 +2487,50 @@
                                         bool has_inlined_smi_code,
                                         Handle<Object> x,
                                         Handle<Object> y) {
-  if (!has_inlined_smi_code && state != UNINITIALIZED && state != SYMBOLS) {
-    return GENERIC;
+  switch (state) {
+    case UNINITIALIZED:
+      if (x->IsSmi() && y->IsSmi()) return SMIS;
+      if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+      if (Token::IsOrderedRelationalCompareOp(op_)) {
+        // Ordered comparisons treat undefined as NaN, so the
+        // HEAP_NUMBER stub will do the right thing.
+        if ((x->IsNumber() && y->IsUndefined()) ||
+            (y->IsNumber() && x->IsUndefined())) {
+          return HEAP_NUMBERS;
+        }
+      }
+      if (x->IsSymbol() && y->IsSymbol()) {
+        // We compare symbols as strings if we need to determine
+        // the order in a non-equality compare.
+        return Token::IsEqualityOp(op_) ? SYMBOLS : STRINGS;
+      }
+      if (x->IsString() && y->IsString()) return STRINGS;
+      if (!Token::IsEqualityOp(op_)) return GENERIC;
+      if (x->IsJSObject() && y->IsJSObject()) {
+        if (Handle<JSObject>::cast(x)->map() ==
+            Handle<JSObject>::cast(y)->map() &&
+            Token::IsEqualityOp(op_)) {
+          return KNOWN_OBJECTS;
+        } else {
+          return OBJECTS;
+        }
+      }
+      return GENERIC;
+    case SMIS:
+      return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
+          ? HEAP_NUMBERS
+          : GENERIC;
+    case SYMBOLS:
+      ASSERT(Token::IsEqualityOp(op_));
+      return x->IsString() && y->IsString() ? STRINGS : GENERIC;
+    case HEAP_NUMBERS:
+    case STRINGS:
+    case OBJECTS:
+    case KNOWN_OBJECTS:
+    case GENERIC:
+      return GENERIC;
   }
-  if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
-  if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
-      x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
-  if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
-  if (state == UNINITIALIZED &&
-      x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
-  if ((state == UNINITIALIZED || state == SYMBOLS) &&
-      x->IsString() && y->IsString()) return STRINGS;
-  if (state == UNINITIALIZED &&
-      x->IsJSObject() && y->IsJSObject()) return OBJECTS;
+  UNREACHABLE();
   return GENERIC;
 }
 
diff --git a/src/ic.h b/src/ic.h
index ece5be9..5662552 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -91,10 +91,13 @@
   // Construct the IC structure with the given number of extra
   // JavaScript frames on the stack.
   IC(FrameDepth depth, Isolate* isolate);
+  virtual ~IC() {}
 
   // Get the call-site target; used for determining the state.
-  Code* target() { return GetTargetAtAddress(address()); }
-  inline Address address();
+  Code* target() const { return GetTargetAtAddress(address()); }
+  inline Address address() const;
+
+  virtual bool IsGeneric() const { return false; }
 
   // Compute the current IC state based on the target stub, receiver and name.
   static State StateFrom(Code* target, Object* receiver, Object* name);
@@ -139,13 +142,15 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Computes the address in the original code when the code running is
   // containing break points (calls to DebugBreakXXX builtins).
-  Address OriginalCodeAddress();
+  Address OriginalCodeAddress() const;
 #endif
 
   // Set the call-site target.
   void set_target(Code* code) { SetTargetAtAddress(address(), code); }
 
 #ifdef DEBUG
+  char TransitionMarkFromState(IC::State state);
+
   void TraceIC(const char* type,
                Handle<Object> name,
                State old_state,
@@ -160,6 +165,7 @@
   // Access the target code for the given IC address.
   static inline Code* GetTargetAtAddress(Address address);
   static inline void SetTargetAtAddress(Address address, Code* target);
+  static void PostPatching(Address address, Code* target, Code* old_target);
 
  private:
   // Frame pointer for the frame that uses (calls) the IC.
@@ -198,47 +204,60 @@
   class Contextual: public BitField<bool, 0, 1> {};
   class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
 
- protected:
-  CallICBase(Code::Kind kind, Isolate* isolate)
-      : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
-
- public:
+  // Returns a JSFunction or a Failure.
   MUST_USE_RESULT MaybeObject* LoadFunction(State state,
                                             Code::ExtraICState extra_ic_state,
                                             Handle<Object> object,
                                             Handle<String> name);
 
  protected:
-  Code::Kind kind_;
+  CallICBase(Code::Kind kind, Isolate* isolate)
+      : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
 
   bool TryUpdateExtraICState(LookupResult* lookup,
                              Handle<Object> object,
                              Code::ExtraICState* extra_ic_state);
 
-  MUST_USE_RESULT MaybeObject* ComputeMonomorphicStub(
-      LookupResult* lookup,
-      State state,
-      Code::ExtraICState extra_ic_state,
-      Handle<Object> object,
-      Handle<String> name);
+  // Compute a monomorphic stub if possible, otherwise return a null handle.
+  Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
+                                      State state,
+                                      Code::ExtraICState extra_state,
+                                      Handle<Object> object,
+                                      Handle<String> name);
 
-  // Update the inline cache and the global stub cache based on the
-  // lookup result.
+  // Update the inline cache and the global stub cache based on the lookup
+  // result.
   void UpdateCaches(LookupResult* lookup,
                     State state,
                     Code::ExtraICState extra_ic_state,
                     Handle<Object> object,
                     Handle<String> name);
 
-  // Returns a JSFunction if the object can be called as a function,
-  // and patches the stack to be ready for the call.
-  // Otherwise, it returns the undefined value.
-  Object* TryCallAsFunction(Object* object);
+  // Returns a JSFunction if the object can be called as a function, and
+  // patches the stack to be ready for the call.  Otherwise, it returns the
+  // undefined value.
+  Handle<Object> TryCallAsFunction(Handle<Object> object);
 
   void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
 
   static void Clear(Address address, Code* target);
 
+  // Platform-specific code generation functions used by both call and
+  // keyed call.
+  static void GenerateMiss(MacroAssembler* masm,
+                           int argc,
+                           IC::UtilityId id,
+                           Code::ExtraICState extra_state);
+
+  static void GenerateNormal(MacroAssembler* masm, int argc);
+
+  static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                            int argc,
+                                            Code::Kind kind,
+                                            Code::ExtraICState extra_state);
+
+  Code::Kind kind_;
+
   friend class IC;
 };
 
@@ -252,16 +271,24 @@
   // Code generator routines.
   static void GenerateInitialize(MacroAssembler* masm,
                                  int argc,
-                                 Code::ExtraICState extra_ic_state) {
-    GenerateMiss(masm, argc, extra_ic_state);
+                                 Code::ExtraICState extra_state) {
+    GenerateMiss(masm, argc, extra_state);
   }
+
   static void GenerateMiss(MacroAssembler* masm,
                            int argc,
-                           Code::ExtraICState extra_ic_state);
+                           Code::ExtraICState extra_state) {
+    CallICBase::GenerateMiss(masm, argc, IC::kCallIC_Miss, extra_state);
+  }
+
   static void GenerateMegamorphic(MacroAssembler* masm,
                                   int argc,
                                   Code::ExtraICState extra_ic_state);
-  static void GenerateNormal(MacroAssembler* masm, int argc);
+
+  static void GenerateNormal(MacroAssembler* masm, int argc) {
+    CallICBase::GenerateNormal(masm, argc);
+    GenerateMiss(masm, argc, Code::kNoExtraICState);
+  }
 };
 
 
@@ -280,7 +307,12 @@
   static void GenerateInitialize(MacroAssembler* masm, int argc) {
     GenerateMiss(masm, argc);
   }
-  static void GenerateMiss(MacroAssembler* masm, int argc);
+
+  static void GenerateMiss(MacroAssembler* masm, int argc) {
+    CallICBase::GenerateMiss(masm, argc, IC::kKeyedCallIC_Miss,
+                             Code::kNoExtraICState);
+  }
+
   static void GenerateMegamorphic(MacroAssembler* masm, int argc);
   static void GenerateNormal(MacroAssembler* masm, int argc);
   static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
@@ -321,17 +353,15 @@
                     Handle<String> name);
 
   // Stub accessors.
-  Code* megamorphic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kLoadIC_Megamorphic);
+  Handle<Code> megamorphic_stub() {
+    return isolate()->builtins()->LoadIC_Megamorphic();
   }
   static Code* initialize_stub() {
     return Isolate::Current()->builtins()->builtin(
         Builtins::kLoadIC_Initialize);
   }
-  Code* pre_monomorphic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kLoadIC_PreMonomorphic);
+  Handle<Code> pre_monomorphic_stub() {
+    return isolate()->builtins()->LoadIC_PreMonomorphic();
   }
 
   static void Clear(Address address, Code* target);
@@ -342,41 +372,95 @@
 
 class KeyedIC: public IC {
  public:
+  enum StubKind {
+    LOAD,
+    STORE_NO_TRANSITION,
+    STORE_TRANSITION_SMI_TO_OBJECT,
+    STORE_TRANSITION_SMI_TO_DOUBLE,
+    STORE_TRANSITION_DOUBLE_TO_OBJECT,
+    STORE_AND_GROW_NO_TRANSITION,
+    STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
+    STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
+    STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT
+  };
+
+  static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
+      STORE_NO_TRANSITION;
+  STATIC_ASSERT(kGrowICDelta ==
+                STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT -
+                STORE_TRANSITION_SMI_TO_OBJECT);
+  STATIC_ASSERT(kGrowICDelta ==
+                STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE -
+                STORE_TRANSITION_SMI_TO_DOUBLE);
+  STATIC_ASSERT(kGrowICDelta ==
+                STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT -
+                STORE_TRANSITION_DOUBLE_TO_OBJECT);
+
   explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
   virtual ~KeyedIC() {}
 
-  virtual MaybeObject* GetElementStubWithoutMapCheck(
+  static inline KeyedAccessGrowMode GetGrowModeFromStubKind(
+      StubKind stub_kind) {
+    return (stub_kind >= STORE_AND_GROW_NO_TRANSITION)
+        ? ALLOW_JSARRAY_GROWTH
+        : DO_NOT_ALLOW_JSARRAY_GROWTH;
+  }
+
+  static inline StubKind GetGrowStubKind(StubKind stub_kind) {
+    ASSERT(stub_kind != LOAD);
+    if (stub_kind < STORE_AND_GROW_NO_TRANSITION) {
+      stub_kind = static_cast<StubKind>(static_cast<int>(stub_kind) +
+                                        kGrowICDelta);
+    }
+    return stub_kind;
+  }
+
+  virtual Handle<Code> GetElementStubWithoutMapCheck(
       bool is_js_array,
-      ElementsKind elements_kind) = 0;
+      ElementsKind elements_kind,
+      KeyedAccessGrowMode grow_mode) = 0;
 
  protected:
-  virtual Code* string_stub() {
-    return NULL;
+  virtual Handle<Code> string_stub() {
+    return Handle<Code>::null();
   }
 
   virtual Code::Kind kind() const = 0;
 
-  MaybeObject* ComputeStub(JSObject* receiver,
-                           bool is_store,
+  Handle<Code> ComputeStub(Handle<JSObject> receiver,
+                           StubKind stub_kind,
                            StrictModeFlag strict_mode,
-                           Code* default_stub);
+                           Handle<Code> default_stub);
 
-  virtual MaybeObject* ConstructMegamorphicStub(
-      MapList* receiver_maps,
-      CodeList* targets,
-      StrictModeFlag strict_mode) = 0;
+  virtual Handle<Code> ComputePolymorphicStub(
+      MapHandleList* receiver_maps,
+      StrictModeFlag strict_mode,
+      KeyedAccessGrowMode grow_mode) = 0;
+
+  Handle<Code> ComputeMonomorphicStubWithoutMapCheck(
+      Handle<Map> receiver_map,
+      StrictModeFlag strict_mode,
+      KeyedAccessGrowMode grow_mode);
 
  private:
-  void GetReceiverMapsForStub(Code* stub, MapList* result);
+  void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
 
-  MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
-      Map* receiver_map,
-      StrictModeFlag strict_mode);
-
-  MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
-                                      bool is_store,
+  Handle<Code> ComputeMonomorphicStub(Handle<JSObject> receiver,
+                                      StubKind stub_kind,
                                       StrictModeFlag strict_mode,
-                                      Code* default_stub);
+                                      Handle<Code> default_stub);
+
+  Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
+                                     StubKind stub_kind);
+
+  static bool IsTransitionStubKind(StubKind stub_kind) {
+    return stub_kind > STORE_NO_TRANSITION &&
+        stub_kind != STORE_AND_GROW_NO_TRANSITION;
+  }
+
+  static bool IsGrowStubKind(StubKind stub_kind) {
+    return stub_kind >= STORE_AND_GROW_NO_TRANSITION;
+  }
 };
 
 
@@ -412,21 +496,24 @@
   static const int kSlowCaseBitFieldMask =
       (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
 
-  virtual MaybeObject* GetElementStubWithoutMapCheck(
+  virtual Handle<Code> GetElementStubWithoutMapCheck(
       bool is_js_array,
-      ElementsKind elements_kind);
+      ElementsKind elements_kind,
+      KeyedAccessGrowMode grow_mode);
+
+  virtual bool IsGeneric() const {
+    return target() == *generic_stub();
+  }
 
  protected:
   virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
 
-  virtual MaybeObject* ConstructMegamorphicStub(
-      MapList* receiver_maps,
-      CodeList* targets,
-      StrictModeFlag strict_mode);
+  virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
+                                              StrictModeFlag strict_mode,
+                                              KeyedAccessGrowMode grow_mode);
 
-  virtual Code* string_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_String);
+  virtual Handle<Code> string_stub() {
+    return isolate()->builtins()->KeyedLoadIC_String();
   }
 
  private:
@@ -441,25 +528,20 @@
     return Isolate::Current()->builtins()->builtin(
         Builtins::kKeyedLoadIC_Initialize);
   }
-  Code* megamorphic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_Generic);
+  Handle<Code> megamorphic_stub() {
+    return isolate()->builtins()->KeyedLoadIC_Generic();
   }
-  Code* generic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_Generic);
+  Handle<Code> generic_stub() const {
+    return isolate()->builtins()->KeyedLoadIC_Generic();
   }
-  Code* pre_monomorphic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_PreMonomorphic);
+  Handle<Code> pre_monomorphic_stub() {
+    return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
   }
-  Code* indexed_interceptor_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_IndexedInterceptor);
+  Handle<Code> indexed_interceptor_stub() {
+    return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
   }
-  Code* non_strict_arguments_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_NonStrictArguments);
+  Handle<Code> non_strict_arguments_stub() {
+    return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
   }
 
   static void Clear(Address address, Code* target);
@@ -502,8 +584,8 @@
 
   void set_target(Code* code) {
     // Strict mode must be preserved across IC patching.
-    ASSERT((code->extra_ic_state() & kStrictMode) ==
-           (target()->extra_ic_state() & kStrictMode));
+    ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
+           Code::GetStrictMode(target()->extra_ic_state()));
     IC::set_target(code);
   }
 
@@ -524,13 +606,11 @@
     return Isolate::Current()->builtins()->builtin(
         Builtins::kStoreIC_Initialize_Strict);
   }
-  Code* global_proxy_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kStoreIC_GlobalProxy);
+  Handle<Code> global_proxy_stub() {
+    return isolate()->builtins()->StoreIC_GlobalProxy();
   }
-  Code* global_proxy_stub_strict() {
-    return isolate()->builtins()->builtin(
-        Builtins::kStoreIC_GlobalProxy_Strict);
+  Handle<Code> global_proxy_stub_strict() {
+    return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
   }
 
   static void Clear(Address address, Code* target);
@@ -562,18 +642,25 @@
                                          StrictModeFlag strict_mode);
   static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
   static void GenerateNonStrictArguments(MacroAssembler* masm);
+  static void GenerateTransitionElementsSmiToDouble(MacroAssembler* masm);
+  static void GenerateTransitionElementsDoubleToObject(MacroAssembler* masm);
 
-  virtual MaybeObject* GetElementStubWithoutMapCheck(
+  virtual Handle<Code> GetElementStubWithoutMapCheck(
       bool is_js_array,
-      ElementsKind elements_kind);
+      ElementsKind elements_kind,
+      KeyedAccessGrowMode grow_mode);
+
+  virtual bool IsGeneric() const {
+    return target() == *generic_stub() ||
+        target() == *generic_stub_strict();
+  }
 
  protected:
   virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
 
-  virtual MaybeObject* ConstructMegamorphicStub(
-      MapList* receiver_maps,
-      CodeList* targets,
-      StrictModeFlag strict_mode);
+  virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
+                                              StrictModeFlag strict_mode,
+                                              KeyedAccessGrowMode grow_mode);
 
   private:
   // Update the inline cache.
@@ -586,8 +673,8 @@
 
   void set_target(Code* code) {
     // Strict mode must be preserved across IC patching.
-    ASSERT((code->extra_ic_state() & kStrictMode) ==
-           (target()->extra_ic_state() & kStrictMode));
+    ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
+           Code::GetStrictMode(target()->extra_ic_state()));
     IC::set_target(code);
   }
 
@@ -596,33 +683,32 @@
     return Isolate::Current()->builtins()->builtin(
         Builtins::kKeyedStoreIC_Initialize);
   }
-  Code* megamorphic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedStoreIC_Generic);
-  }
   static Code* initialize_stub_strict() {
     return Isolate::Current()->builtins()->builtin(
         Builtins::kKeyedStoreIC_Initialize_Strict);
   }
-  Code* megamorphic_stub_strict() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedStoreIC_Generic_Strict);
+  Handle<Code> megamorphic_stub() {
+    return isolate()->builtins()->KeyedStoreIC_Generic();
   }
-  Code* generic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedStoreIC_Generic);
+  Handle<Code> megamorphic_stub_strict() {
+    return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
   }
-  Code* generic_stub_strict() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedStoreIC_Generic_Strict);
+  Handle<Code> generic_stub() const {
+    return isolate()->builtins()->KeyedStoreIC_Generic();
   }
-  Code* non_strict_arguments_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedStoreIC_NonStrictArguments);
+  Handle<Code> generic_stub_strict() const {
+    return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+  }
+  Handle<Code> non_strict_arguments_stub() {
+    return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
   }
 
   static void Clear(Address address, Code* target);
 
+  StubKind GetStubKind(Handle<JSObject> receiver,
+                       Handle<Object> key,
+                       Handle<Object> value);
+
   friend class IC;
 };
 
@@ -689,6 +775,7 @@
     SYMBOLS,
     STRINGS,
     OBJECTS,
+    KNOWN_OBJECTS,
     GENERIC
   };
 
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
new file mode 100644
index 0000000..3e3d6c4
--- /dev/null
+++ b/src/incremental-marking-inl.h
@@ -0,0 +1,133 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_INCREMENTAL_MARKING_INL_H_
+#define V8_INCREMENTAL_MARKING_INL_H_
+
+#include "incremental-marking.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
+                                         Object** slot,
+                                         Object* value) {
+  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+  if (Marking::IsWhite(value_bit)) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+
+    // Object is either grey or white.  It will be scanned if survives.
+    return false;
+  }
+  return true;
+}
+
+
+void IncrementalMarking::RecordWrite(HeapObject* obj,
+                                     Object** slot,
+                                     Object* value) {
+  if (IsMarking() && value->NonFailureIsHeapObject()) {
+    RecordWriteSlow(obj, slot, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
+                                                Object** slot,
+                                                Code* value) {
+  if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
+}
+
+
+void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
+                                             RelocInfo* rinfo,
+                                             Object* value) {
+  if (IsMarking() && value->NonFailureIsHeapObject()) {
+    RecordWriteIntoCodeSlow(obj, rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+  if (IsMarking()) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+  }
+}
+
+
+void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
+                                               MarkBit mark_bit) {
+  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+  ASSERT(obj->Size() >= 2*kPointerSize);
+  ASSERT(IsMarking());
+  Marking::BlackToGrey(mark_bit);
+  int obj_size = obj->Size();
+  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size);
+  bytes_scanned_ -= obj_size;
+  int64_t old_bytes_rescanned = bytes_rescanned_;
+  bytes_rescanned_ = old_bytes_rescanned + obj_size;
+  if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
+    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
+      // If we have queued twice the heap size for rescanning then we are
+      // going around in circles, scanning the same objects again and again
+      // as the program mutates the heap faster than we can incrementally
+      // trace it.  In this case we switch to non-incremental marking in
+      // order to finish off this marking phase.
+      if (FLAG_trace_gc) {
+        PrintF("Hurrying incremental marking because of lack of progress\n");
+      }
+      allocation_marking_factor_ = kMaxAllocationMarkingFactor;
+    }
+  }
+
+  marking_deque_.UnshiftGrey(obj);
+}
+
+
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
+  WhiteToGrey(obj, mark_bit);
+  marking_deque_.PushGrey(obj);
+}
+
+
+void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
+  Marking::WhiteToGrey(mark_bit);
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_INCREMENTAL_MARKING_INL_H_
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
new file mode 100644
index 0000000..8fe89b4
--- /dev/null
+++ b/src/incremental-marking.cc
@@ -0,0 +1,937 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "incremental-marking.h"
+
+#include "code-stubs.h"
+#include "compilation-cache.h"
+#include "v8conversions.h"
+
+namespace v8 {
+namespace internal {
+
+
+IncrementalMarking::IncrementalMarking(Heap* heap)
+    : heap_(heap),
+      state_(STOPPED),
+      marking_deque_memory_(NULL),
+      marking_deque_memory_committed_(false),
+      steps_count_(0),
+      steps_took_(0),
+      longest_step_(0.0),
+      old_generation_space_available_at_start_of_incremental_(0),
+      old_generation_space_used_at_start_of_incremental_(0),
+      steps_count_since_last_gc_(0),
+      steps_took_since_last_gc_(0),
+      should_hurry_(false),
+      allocation_marking_factor_(0),
+      allocated_(0),
+      no_marking_scope_depth_(0) {
+}
+
+
+void IncrementalMarking::TearDown() {
+  delete marking_deque_memory_;
+}
+
+
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
+                                         Object** slot,
+                                         Object* value) {
+  if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned we need to record the slot.
+      heap_->mark_compact_collector()->RecordSlot(
+          HeapObject::RawField(obj, 0), slot, value);
+    }
+  }
+}
+
+
+void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
+                                             Object* value,
+                                             Isolate* isolate) {
+  ASSERT(obj->IsHeapObject());
+
+  // Fast cases should already be covered by RecordWriteStub.
+  ASSERT(value->IsHeapObject());
+  ASSERT(!value->IsHeapNumber());
+  ASSERT(!value->IsString() ||
+         value->IsConsString() ||
+         value->IsSlicedString());
+  ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
+
+  IncrementalMarking* marking = isolate->heap()->incremental_marking();
+  ASSERT(!marking->is_compacting_);
+  marking->RecordWrite(obj, NULL, value);
+}
+
+
+void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
+                                                          Object** slot,
+                                                          Isolate* isolate) {
+  IncrementalMarking* marking = isolate->heap()->incremental_marking();
+  ASSERT(marking->is_compacting_);
+  marking->RecordWrite(obj, slot, *slot);
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Code* host,
+                                               Address pc,
+                                               HeapObject* value) {
+  if (IsMarking()) {
+    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RecordWriteIntoCode(host, &rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
+  if (IsMarking()) {
+    Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
+        GcSafeFindCodeForInnerPointer(pc);
+    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RecordWriteIntoCode(host, &rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
+                                                Object** slot,
+                                                Code* value) {
+  if (BaseRecordWrite(host, slot, value) && is_compacting_) {
+    ASSERT(slot != NULL);
+    heap_->mark_compact_collector()->
+        RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
+                                                 RelocInfo* rinfo,
+                                                 Object* value) {
+  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+  if (Marking::IsWhite(value_bit)) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+    // Object is either grey or white.  It will be scanned if survives.
+    return;
+  }
+
+  if (is_compacting_) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned.  We need to record the slot.
+      heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
+                                                       Code::cast(value));
+    }
+  }
+}
+
+
+class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
+ public:
+  IncrementalMarkingMarkingVisitor(Heap* heap,
+                                   IncrementalMarking* incremental_marking)
+      : heap_(heap),
+        incremental_marking_(incremental_marking) {
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) {
+    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    Object* target = rinfo->target_object();
+    if (target->NonFailureIsHeapObject()) {
+      heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+      MarkObject(target);
+    }
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
+        && (target->ic_age() != heap_->global_ic_age())) {
+      IC::Clear(rinfo->pc());
+      target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    }
+    heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
+    MarkObject(target);
+  }
+
+  void VisitDebugTarget(RelocInfo* rinfo) {
+    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+            rinfo->IsPatchedReturnSequence()) ||
+           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+            rinfo->IsPatchedDebugBreakSlotSequence()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
+    MarkObject(target);
+  }
+
+  void VisitCodeEntry(Address entry_address) {
+    Object* target = Code::GetObjectFromEntryAddress(entry_address);
+    heap_->mark_compact_collector()->
+        RecordCodeEntrySlot(entry_address, Code::cast(target));
+    MarkObject(target);
+  }
+
+  void VisitPointer(Object** p) {
+    Object* obj = *p;
+    if (obj->NonFailureIsHeapObject()) {
+      heap_->mark_compact_collector()->RecordSlot(p, p, obj);
+      MarkObject(obj);
+    }
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) {
+      Object* obj = *p;
+      if (obj->NonFailureIsHeapObject()) {
+        heap_->mark_compact_collector()->RecordSlot(start, p, obj);
+        MarkObject(obj);
+      }
+    }
+  }
+
+ private:
+  // Mark object pointed to by p.
+  INLINE(void MarkObject(Object* obj)) {
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (mark_bit.data_only()) {
+      if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+        MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+                                              heap_object->Size());
+      }
+    } else if (Marking::IsWhite(mark_bit)) {
+      incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+    }
+  }
+
+  Heap* heap_;
+  IncrementalMarking* incremental_marking_;
+};
+
+
+class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
+ public:
+  IncrementalMarkingRootMarkingVisitor(Heap* heap,
+                                       IncrementalMarking* incremental_marking)
+      : heap_(heap),
+        incremental_marking_(incremental_marking) {
+  }
+
+  void VisitPointer(Object** p) {
+    MarkObjectByPointer(p);
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+  }
+
+ private:
+  void MarkObjectByPointer(Object** p) {
+    Object* obj = *p;
+    if (!obj->IsHeapObject()) return;
+
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (mark_bit.data_only()) {
+      if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+          MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+                                                heap_object->Size());
+      }
+    } else {
+      if (Marking::IsWhite(mark_bit)) {
+        incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+      }
+    }
+  }
+
+  Heap* heap_;
+  IncrementalMarking* incremental_marking_;
+};
+
+
+void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
+                                              bool is_marking,
+                                              bool is_compacting) {
+  if (is_marking) {
+    chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+
+    // It's difficult to filter out slots recorded for large objects.
+    if (chunk->owner()->identity() == LO_SPACE &&
+        chunk->size() > static_cast<size_t>(Page::kPageSize) &&
+        is_compacting) {
+      chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+  } else if (chunk->owner()->identity() == CELL_SPACE ||
+             chunk->scan_on_scavenge()) {
+    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  } else {
+    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  }
+}
+
+
+void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
+                                              bool is_marking) {
+  chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+  if (is_marking) {
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  } else {
+    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  }
+  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+    PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SetOldSpacePageFlags(p, false, false);
+  }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+    NewSpace* space) {
+  NewSpacePageIterator it(space);
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    SetNewSpacePageFlags(p, false);
+  }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
+
+  LargePage* lop = heap_->lo_space()->first_page();
+  while (lop->is_valid()) {
+    SetOldSpacePageFlags(lop, false, false);
+    lop = lop->next_page();
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SetOldSpacePageFlags(p, true, is_compacting_);
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
+  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    SetNewSpacePageFlags(p, true);
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier() {
+  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
+  ActivateIncrementalWriteBarrier(heap_->old_data_space());
+  ActivateIncrementalWriteBarrier(heap_->cell_space());
+  ActivateIncrementalWriteBarrier(heap_->map_space());
+  ActivateIncrementalWriteBarrier(heap_->code_space());
+  ActivateIncrementalWriteBarrier(heap_->new_space());
+
+  LargePage* lop = heap_->lo_space()->first_page();
+  while (lop->is_valid()) {
+    SetOldSpacePageFlags(lop, true, is_compacting_);
+    lop = lop->next_page();
+  }
+}
+
+
+bool IncrementalMarking::WorthActivating() {
+#ifndef DEBUG
+  static const intptr_t kActivationThreshold = 8 * MB;
+#else
+  // TODO(gc) consider setting this to some low level so that some
+  // debug tests run with incremental marking and some without.
+  static const intptr_t kActivationThreshold = 0;
+#endif
+
+  return !FLAG_expose_gc &&
+      FLAG_incremental_marking &&
+      !Serializer::enabled() &&
+      heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
+}
+
+
+void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
+  ASSERT(RecordWriteStub::GetMode(stub) ==
+         RecordWriteStub::STORE_BUFFER_ONLY);
+
+  if (!IsMarking()) {
+    // Initially stub is generated in STORE_BUFFER_ONLY mode thus
+    // we don't need to do anything if incremental marking is
+    // not active.
+  } else if (IsCompacting()) {
+    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
+  } else {
+    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
+  }
+}
+
+
+static void PatchIncrementalMarkingRecordWriteStubs(
+    Heap* heap, RecordWriteStub::Mode mode) {
+  UnseededNumberDictionary* stubs = heap->code_stubs();
+
+  int capacity = stubs->Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* k = stubs->KeyAt(i);
+    if (stubs->IsKey(k)) {
+      uint32_t key = NumberToUint32(k);
+
+      if (CodeStub::MajorKeyFromKey(key) ==
+          CodeStub::RecordWrite) {
+        Object* e = stubs->ValueAt(i);
+        if (e->IsCode()) {
+          RecordWriteStub::Patch(Code::cast(e), mode);
+        }
+      }
+    }
+  }
+}
+
+
+void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
+  if (marking_deque_memory_ == NULL) {
+    marking_deque_memory_ = new VirtualMemory(4 * MB);
+  }
+  if (!marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Commit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size(),
+        false);  // Not executable.
+    CHECK(success);
+    marking_deque_memory_committed_ = true;
+  }
+}
+
+void IncrementalMarking::UncommitMarkingDeque() {
+  if (state_ == STOPPED && marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Uncommit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size());
+    CHECK(success);
+    marking_deque_memory_committed_ = false;
+  }
+}
+
+
+void IncrementalMarking::Start() {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Start\n");
+  }
+  ASSERT(FLAG_incremental_marking);
+  ASSERT(state_ == STOPPED);
+
+  ResetStepCounters();
+
+  if (heap_->old_pointer_space()->IsSweepingComplete() &&
+      heap_->old_data_space()->IsSweepingComplete()) {
+    StartMarking(ALLOW_COMPACTION);
+  } else {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Start sweeping.\n");
+    }
+    state_ = SWEEPING;
+  }
+
+  heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
+}
+
+
+static void MarkObjectGreyDoNotEnqueue(Object* obj) {
+  if (obj->IsHeapObject()) {
+    HeapObject* heap_obj = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
+    if (Marking::IsBlack(mark_bit)) {
+      MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
+                                            -heap_obj->Size());
+    }
+    Marking::AnyToGrey(mark_bit);
+  }
+}
+
+
+void IncrementalMarking::StartMarking(CompactionFlag flag) {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Start marking\n");
+  }
+
+  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
+      heap_->mark_compact_collector()->StartCompaction(
+          MarkCompactCollector::INCREMENTAL_COMPACTION);
+
+  state_ = MARKING;
+
+  RecordWriteStub::Mode mode = is_compacting_ ?
+      RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
+
+  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
+
+  EnsureMarkingDequeIsCommitted();
+
+  // Initialize marking stack.
+  Address addr = static_cast<Address>(marking_deque_memory_->address());
+  size_t size = marking_deque_memory_->size();
+  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+  marking_deque_.Initialize(addr, addr + size);
+
+  ActivateIncrementalWriteBarrier();
+
+#ifdef DEBUG
+  // Marking bits are cleared by the sweeper.
+  if (FLAG_verify_heap) {
+    heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
+  }
+#endif
+
+  heap_->CompletelyClearInstanceofCache();
+  heap_->isolate()->compilation_cache()->MarkCompactPrologue();
+
+  if (FLAG_cleanup_code_caches_at_gc) {
+    // We will mark cache black with a separate pass
+    // when we finish marking.
+    MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
+  }
+
+  // Mark strong roots grey.
+  IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
+  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+
+  // Ready to start incremental marking.
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Running\n");
+  }
+}
+
+
+void IncrementalMarking::PrepareForScavenge() {
+  if (!IsMarking()) return;
+  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
+                          heap_->new_space()->FromSpaceEnd());
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
+  if (!IsMarking()) return;
+
+  int current = marking_deque_.bottom();
+  int mask = marking_deque_.mask();
+  int limit = marking_deque_.top();
+  HeapObject** array = marking_deque_.array();
+  int new_top = current;
+
+  Map* filler_map = heap_->one_pointer_filler_map();
+
+  while (current != limit) {
+    HeapObject* obj = array[current];
+    ASSERT(obj->IsHeapObject());
+    current = ((current + 1) & mask);
+    if (heap_->InNewSpace(obj)) {
+      MapWord map_word = obj->map_word();
+      if (map_word.IsForwardingAddress()) {
+        HeapObject* dest = map_word.ToForwardingAddress();
+        array[new_top] = dest;
+        new_top = ((new_top + 1) & mask);
+        ASSERT(new_top != marking_deque_.bottom());
+#ifdef DEBUG
+        MarkBit mark_bit = Marking::MarkBitFrom(obj);
+        ASSERT(Marking::IsGrey(mark_bit) ||
+               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+#endif
+      }
+    } else if (obj->map() != filler_map) {
+      // Skip one word filler objects that appear on the
+      // stack when we perform in place array shift.
+      array[new_top] = obj;
+      new_top = ((new_top + 1) & mask);
+      ASSERT(new_top != marking_deque_.bottom());
+#ifdef DEBUG
+        MarkBit mark_bit = Marking::MarkBitFrom(obj);
+        ASSERT(Marking::IsGrey(mark_bit) ||
+               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+#endif
+    }
+  }
+  marking_deque_.set_top(new_top);
+
+  steps_took_since_last_gc_ = 0;
+  steps_count_since_last_gc_ = 0;
+  longest_step_ = 0.0;
+}
+
+
+void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
+  v->VisitPointers(
+      HeapObject::RawField(
+          ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
+      HeapObject::RawField(
+          ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
+
+  MarkCompactCollector* collector = heap_->mark_compact_collector();
+  for (int idx = Context::FIRST_WEAK_SLOT;
+       idx < Context::GLOBAL_CONTEXT_SLOTS;
+       ++idx) {
+    Object** slot =
+        HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
+    collector->RecordSlot(slot, slot, *slot);
+  }
+}
+
+
+void IncrementalMarking::Hurry() {
+  if (state() == MARKING) {
+    double start = 0.0;
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Hurry\n");
+      start = OS::TimeCurrentMillis();
+    }
+    // TODO(gc) hurry can mark objects it encounters black as mutator
+    // was stopped.
+    Map* filler_map = heap_->one_pointer_filler_map();
+    Map* global_context_map = heap_->global_context_map();
+    IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
+    while (!marking_deque_.IsEmpty()) {
+      HeapObject* obj = marking_deque_.Pop();
+
+      // Explicitly skip one word fillers. Incremental markbit patterns are
+      // correct only for objects that occupy at least two words.
+      Map* map = obj->map();
+      if (map == filler_map) {
+        continue;
+      } else if (map == global_context_map) {
+        // Global contexts have weak fields.
+        VisitGlobalContext(Context::cast(obj), &marking_visitor);
+      } else {
+        obj->Iterate(&marking_visitor);
+      }
+
+      MarkBit mark_bit = Marking::MarkBitFrom(obj);
+      ASSERT(!Marking::IsBlack(mark_bit));
+      Marking::MarkBlack(mark_bit);
+      MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+    }
+    state_ = COMPLETE;
+    if (FLAG_trace_incremental_marking) {
+      double end = OS::TimeCurrentMillis();
+      PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+             static_cast<int>(end - start));
+    }
+  }
+
+  if (FLAG_cleanup_code_caches_at_gc) {
+    PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
+    Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
+    MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
+                                          PolymorphicCodeCache::kSize);
+  }
+
+  Object* context = heap_->global_contexts_list();
+  while (!context->IsUndefined()) {
+    // GC can happen when the context is not fully initialized,
+    // so the cache can be undefined.
+    HeapObject* cache = HeapObject::cast(
+        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
+    if (!cache->IsUndefined()) {
+      MarkBit mark_bit = Marking::MarkBitFrom(cache);
+      if (Marking::IsGrey(mark_bit)) {
+        Marking::GreyToBlack(mark_bit);
+        MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
+      }
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void IncrementalMarking::Abort() {
+  if (IsStopped()) return;
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Aborting.\n");
+  }
+  heap_->new_space()->LowerInlineAllocationLimit(0);
+  IncrementalMarking::set_should_hurry(false);
+  ResetStepCounters();
+  if (IsMarking()) {
+    PatchIncrementalMarkingRecordWriteStubs(heap_,
+                                            RecordWriteStub::STORE_BUFFER_ONLY);
+    DeactivateIncrementalWriteBarrier();
+
+    if (is_compacting_) {
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
+  }
+  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
+  state_ = STOPPED;
+  is_compacting_ = false;
+}
+
+
+void IncrementalMarking::Finalize() {
+  Hurry();
+  state_ = STOPPED;
+  is_compacting_ = false;
+  heap_->new_space()->LowerInlineAllocationLimit(0);
+  IncrementalMarking::set_should_hurry(false);
+  ResetStepCounters();
+  PatchIncrementalMarkingRecordWriteStubs(heap_,
+                                          RecordWriteStub::STORE_BUFFER_ONLY);
+  DeactivateIncrementalWriteBarrier();
+  ASSERT(marking_deque_.IsEmpty());
+  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
+}
+
+
+void IncrementalMarking::MarkingComplete() {
+  state_ = COMPLETE;
+  // We will set the stack guard to request a GC now.  This will mean the rest
+  // of the GC gets performed as soon as possible (we can't do a GC here in a
+  // record-write context).  If a few things get allocated between now and then
+  // that shouldn't make us do a scavenge and keep being incremental, so we set
+  // the should-hurry flag to indicate that there can't be much work left to do.
+  set_should_hurry(true);
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Complete (normal).\n");
+  }
+  if (!heap_->idle_notification_will_schedule_next_gc()) {
+    heap_->isolate()->stack_guard()->RequestGC();
+  }
+}
+
+
+void IncrementalMarking::Step(intptr_t allocated_bytes) {
+  if (heap_->gc_state() != Heap::NOT_IN_GC ||
+      !FLAG_incremental_marking ||
+      !FLAG_incremental_marking_steps ||
+      (state_ != SWEEPING && state_ != MARKING)) {
+    return;
+  }
+
+  allocated_ += allocated_bytes;
+
+  if (allocated_ < kAllocatedThreshold) return;
+
+  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
+
+  intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
+  bytes_scanned_ += bytes_to_process;
+
+  double start = 0;
+
+  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+    start = OS::TimeCurrentMillis();
+  }
+
+  if (state_ == SWEEPING) {
+    if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
+      bytes_scanned_ = 0;
+      StartMarking(PREVENT_COMPACTION);
+    }
+  } else if (state_ == MARKING) {
+    Map* filler_map = heap_->one_pointer_filler_map();
+    Map* global_context_map = heap_->global_context_map();
+    IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
+    while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
+      HeapObject* obj = marking_deque_.Pop();
+
+      // Explicitly skip one word fillers. Incremental markbit patterns are
+      // correct only for objects that occupy at least two words.
+      Map* map = obj->map();
+      if (map == filler_map) continue;
+
+      if (obj->IsMap()) {
+        Map* map = Map::cast(obj);
+        heap_->ClearCacheOnMap(map);
+      }
+
+
+      int size = obj->SizeFromMap(map);
+      bytes_to_process -= size;
+      MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+      if (Marking::IsWhite(map_mark_bit)) {
+        WhiteToGreyAndPush(map, map_mark_bit);
+      }
+
+      // TODO(gc) switch to static visitor instead of normal visitor.
+      if (map == global_context_map) {
+        // Global contexts have weak fields.
+        Context* ctx = Context::cast(obj);
+
+        // We will mark cache black with a separate pass
+        // when we finish marking.
+        MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
+
+        VisitGlobalContext(ctx, &marking_visitor);
+      } else {
+        obj->IterateBody(map->instance_type(), size, &marking_visitor);
+      }
+
+      MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
+      SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
+                  (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
+      Marking::MarkBlack(obj_mark_bit);
+      MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
+    }
+    if (marking_deque_.IsEmpty()) MarkingComplete();
+  }
+
+  allocated_ = 0;
+
+  steps_count_++;
+  steps_count_since_last_gc_++;
+
+  bool speed_up = false;
+
+  if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
+    if (FLAG_trace_gc) {
+      PrintF("Speed up marking after %d steps\n",
+             static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
+    }
+    speed_up = true;
+  }
+
+  bool space_left_is_very_small =
+      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
+
+  bool only_1_nth_of_space_that_was_available_still_left =
+      (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
+          old_generation_space_available_at_start_of_incremental_);
+
+  if (space_left_is_very_small ||
+      only_1_nth_of_space_that_was_available_still_left) {
+    if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
+    speed_up = true;
+  }
+
+  bool size_of_old_space_multiplied_by_n_during_marking =
+      (heap_->PromotedTotalSize() >
+       (allocation_marking_factor_ + 1) *
+           old_generation_space_used_at_start_of_incremental_);
+  if (size_of_old_space_multiplied_by_n_during_marking) {
+    speed_up = true;
+    if (FLAG_trace_gc) {
+      PrintF("Speed up marking because of heap size increase\n");
+    }
+  }
+
+  int64_t promoted_during_marking = heap_->PromotedTotalSize()
+      - old_generation_space_used_at_start_of_incremental_;
+  intptr_t delay = allocation_marking_factor_ * MB;
+  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
+
+  // We try to scan at at least twice the speed that we are allocating.
+  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
+    if (FLAG_trace_gc) {
+      PrintF("Speed up marking because marker was not keeping up\n");
+    }
+    speed_up = true;
+  }
+
+  if (speed_up) {
+    if (state_ != MARKING) {
+      if (FLAG_trace_gc) {
+        PrintF("Postponing speeding up marking until marking starts\n");
+      }
+    } else {
+      allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
+      allocation_marking_factor_ = static_cast<int>(
+          Min(kMaxAllocationMarkingFactor,
+              static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
+      if (FLAG_trace_gc) {
+        PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
+      }
+    }
+  }
+
+  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+    double end = OS::TimeCurrentMillis();
+    double delta = (end - start);
+    longest_step_ = Max(longest_step_, delta);
+    steps_took_ += delta;
+    steps_took_since_last_gc_ += delta;
+  }
+}
+
+
+void IncrementalMarking::ResetStepCounters() {
+  steps_count_ = 0;
+  steps_took_ = 0;
+  longest_step_ = 0.0;
+  old_generation_space_available_at_start_of_incremental_ =
+      SpaceLeftInOldSpace();
+  old_generation_space_used_at_start_of_incremental_ =
+      heap_->PromotedTotalSize();
+  steps_count_since_last_gc_ = 0;
+  steps_took_since_last_gc_ = 0;
+  bytes_rescanned_ = 0;
+  allocation_marking_factor_ = kInitialAllocationMarkingFactor;
+  bytes_scanned_ = 0;
+}
+
+
+int64_t IncrementalMarking::SpaceLeftInOldSpace() {
+  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
+}
+
+} }  // namespace v8::internal
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
new file mode 100644
index 0000000..4f8fa6b
--- /dev/null
+++ b/src/incremental-marking.h
@@ -0,0 +1,278 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_INCREMENTAL_MARKING_H_
+#define V8_INCREMENTAL_MARKING_H_
+
+
+#include "execution.h"
+#include "mark-compact.h"
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+class IncrementalMarking {
+ public:
+  enum State {
+    STOPPED,
+    SWEEPING,
+    MARKING,
+    COMPLETE
+  };
+
+  explicit IncrementalMarking(Heap* heap);
+
+  void TearDown();
+
+  State state() {
+    ASSERT(state_ == STOPPED || FLAG_incremental_marking);
+    return state_;
+  }
+
+  bool should_hurry() { return should_hurry_; }
+  void set_should_hurry(bool val) { should_hurry_ = val; }
+
+  inline bool IsStopped() { return state() == STOPPED; }
+
+  INLINE(bool IsMarking()) { return state() >= MARKING; }
+
+  inline bool IsMarkingIncomplete() { return state() == MARKING; }
+
+  inline bool IsComplete() { return state() == COMPLETE; }
+
+  bool WorthActivating();
+
+  void Start();
+
+  void Stop();
+
+  void PrepareForScavenge();
+
+  void UpdateMarkingDequeAfterScavenge();
+
+  void Hurry();
+
+  void Finalize();
+
+  void Abort();
+
+  void MarkingComplete();
+
+  // It's hard to know how much work the incremental marker should do to make
+  // progress in the face of the mutator creating new work for it.  We start
+  // of at a moderate rate of work and gradually increase the speed of the
+  // incremental marker until it completes.
+  // Do some marking every time this much memory has been allocated.
+  static const intptr_t kAllocatedThreshold = 65536;
+  // Start off by marking this many times more memory than has been allocated.
+  static const intptr_t kInitialAllocationMarkingFactor = 1;
+  // But if we are promoting a lot of data we need to mark faster to keep up
+  // with the data that is entering the old space through promotion.
+  static const intptr_t kFastMarking = 3;
+  // After this many steps we increase the marking/allocating factor.
+  static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
+  // This is how much we increase the marking/allocating factor by.
+  static const intptr_t kAllocationMarkingFactorSpeedup = 2;
+  static const intptr_t kMaxAllocationMarkingFactor = 1000;
+
+  void OldSpaceStep(intptr_t allocated) {
+    Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
+  }
+
+  void Step(intptr_t allocated);
+
+  inline void RestartIfNotMarking() {
+    if (state_ == COMPLETE) {
+      state_ = MARKING;
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
+      }
+    }
+  }
+
+  static void RecordWriteFromCode(HeapObject* obj,
+                                  Object* value,
+                                  Isolate* isolate);
+
+  static void RecordWriteForEvacuationFromCode(HeapObject* obj,
+                                               Object** slot,
+                                               Isolate* isolate);
+
+  INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(void RecordWriteIntoCode(HeapObject* obj,
+                                  RelocInfo* rinfo,
+                                  Object* value));
+  INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
+                                     Object** slot,
+                                     Code* value));
+
+
+  void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+  void RecordWriteIntoCodeSlow(HeapObject* obj,
+                               RelocInfo* rinfo,
+                               Object* value);
+  void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
+  void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
+  void RecordCodeTargetPatch(Address pc, HeapObject* value);
+
+  inline void RecordWrites(HeapObject* obj);
+
+  inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
+
+  inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+
+  inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
+
+  // Does white->black or keeps gray or black color. Returns true if converting
+  // white to black.
+  inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
+    ASSERT(!Marking::IsImpossible(mark_bit));
+    if (mark_bit.Get()) {
+      // Grey or black: Keep the color.
+      return false;
+    }
+    mark_bit.Set();
+    ASSERT(Marking::IsBlack(mark_bit));
+    return true;
+  }
+
+  inline int steps_count() {
+    return steps_count_;
+  }
+
+  inline double steps_took() {
+    return steps_took_;
+  }
+
+  inline double longest_step() {
+    return longest_step_;
+  }
+
+  inline int steps_count_since_last_gc() {
+    return steps_count_since_last_gc_;
+  }
+
+  inline double steps_took_since_last_gc() {
+    return steps_took_since_last_gc_;
+  }
+
+  inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
+    SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
+  }
+
+  inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
+    SetNewSpacePageFlags(chunk, IsMarking());
+  }
+
+  MarkingDeque* marking_deque() { return &marking_deque_; }
+
+  bool IsCompacting() { return IsMarking() && is_compacting_; }
+
+  void ActivateGeneratedStub(Code* stub);
+
+  void NotifyOfHighPromotionRate() {
+    if (IsMarking()) {
+      if (allocation_marking_factor_ < kFastMarking) {
+        if (FLAG_trace_gc) {
+          PrintF("Increasing marking speed to %d due to high promotion rate\n",
+                 static_cast<int>(kFastMarking));
+        }
+        allocation_marking_factor_ = kFastMarking;
+      }
+    }
+  }
+
+  void EnterNoMarkingScope() {
+    no_marking_scope_depth_++;
+  }
+
+  void LeaveNoMarkingScope() {
+    no_marking_scope_depth_--;
+  }
+
+  void UncommitMarkingDeque();
+
+ private:
+  int64_t SpaceLeftInOldSpace();
+
+  void ResetStepCounters();
+
+  enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+
+  void StartMarking(CompactionFlag flag);
+
+  void ActivateIncrementalWriteBarrier(PagedSpace* space);
+  static void ActivateIncrementalWriteBarrier(NewSpace* space);
+  void ActivateIncrementalWriteBarrier();
+
+  static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
+  static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
+  void DeactivateIncrementalWriteBarrier();
+
+  static void SetOldSpacePageFlags(MemoryChunk* chunk,
+                                   bool is_marking,
+                                   bool is_compacting);
+
+  static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
+
+  void EnsureMarkingDequeIsCommitted();
+
+  void VisitGlobalContext(Context* ctx, ObjectVisitor* v);
+
+  Heap* heap_;
+
+  State state_;
+  bool is_compacting_;
+
+  VirtualMemory* marking_deque_memory_;
+  bool marking_deque_memory_committed_;
+  MarkingDeque marking_deque_;
+
+  int steps_count_;
+  double steps_took_;
+  double longest_step_;
+  int64_t old_generation_space_available_at_start_of_incremental_;
+  int64_t old_generation_space_used_at_start_of_incremental_;
+  int steps_count_since_last_gc_;
+  double steps_took_since_last_gc_;
+  int64_t bytes_rescanned_;
+  bool should_hurry_;
+  int allocation_marking_factor_;
+  intptr_t bytes_scanned_;
+  intptr_t allocated_;
+
+  int no_marking_scope_depth_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_INCREMENTAL_MARKING_H_
diff --git a/src/inspector.cc b/src/inspector.cc
index 8fb80f1..833d338 100644
--- a/src/inspector.cc
+++ b/src/inspector.cc
@@ -38,11 +38,11 @@
 //============================================================================
 // The Inspector.
 
-void Inspector::DumpObjectType(FILE* out, Object *obj, bool print_more) {
+void Inspector::DumpObjectType(FILE* out, Object* obj, bool print_more) {
   // Dump the object pointer.
   OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj));
   if (obj->IsHeapObject()) {
-    HeapObject *hobj = HeapObject::cast(obj);
+    HeapObject* hobj = HeapObject::cast(obj);
     OS::FPrint(out, " size %d :", hobj->Size());
   }
 
diff --git a/src/inspector.h b/src/inspector.h
index e328bcd..6962e21 100644
--- a/src/inspector.h
+++ b/src/inspector.h
@@ -41,14 +41,14 @@
 
 class Inspector {
  public:
-  static void DumpObjectType(FILE* out, Object *obj, bool print_more);
-  static void DumpObjectType(FILE* out, Object *obj) {
+  static void DumpObjectType(FILE* out, Object* obj, bool print_more);
+  static void DumpObjectType(FILE* out, Object* obj) {
     DumpObjectType(out, obj, false);
   }
-  static void DumpObjectType(Object *obj, bool print_more) {
+  static void DumpObjectType(Object* obj, bool print_more) {
     DumpObjectType(stdout, obj, print_more);
   }
-  static void DumpObjectType(Object *obj) {
+  static void DumpObjectType(Object* obj) {
     DumpObjectType(stdout, obj, false);
   }
 };
diff --git a/src/interface.cc b/src/interface.cc
new file mode 100644
index 0000000..e344b86
--- /dev/null
+++ b/src/interface.cc
@@ -0,0 +1,226 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "interface.h"
+
+namespace v8 {
+namespace internal {
+
+static bool Match(void* key1, void* key2) {
+  String* name1 = *static_cast<String**>(key1);
+  String* name2 = *static_cast<String**>(key2);
+  ASSERT(name1->IsSymbol());
+  ASSERT(name2->IsSymbol());
+  return name1 == name2;
+}
+
+
+Interface* Interface::Lookup(Handle<String> name) {
+  ASSERT(IsModule());
+  ZoneHashMap* map = Chase()->exports_;
+  if (map == NULL) return NULL;
+  ZoneHashMap::Entry* p = map->Lookup(name.location(), name->Hash(), false);
+  if (p == NULL) return NULL;
+  ASSERT(*static_cast<String**>(p->key) == *name);
+  ASSERT(p->value != NULL);
+  return static_cast<Interface*>(p->value);
+}
+
+
+#ifdef DEBUG
+// Current nesting depth for debug output.
+class Nesting {
+ public:
+  Nesting()  { current_ += 2; }
+  ~Nesting() { current_ -= 2; }
+  static int current() { return current_; }
+ private:
+  static int current_;
+};
+
+int Nesting::current_ = 0;
+#endif
+
+
+void Interface::DoAdd(
+    void* name, uint32_t hash, Interface* interface, bool* ok) {
+  MakeModule(ok);
+  if (!*ok) return;
+
+#ifdef DEBUG
+  if (FLAG_print_interface_details) {
+    PrintF("%*s# Adding...\n", Nesting::current(), "");
+    PrintF("%*sthis = ", Nesting::current(), "");
+    this->Print(Nesting::current());
+    PrintF("%*s%s : ", Nesting::current(), "",
+           (*reinterpret_cast<String**>(name))->ToAsciiArray());
+    interface->Print(Nesting::current());
+  }
+#endif
+
+  ZoneHashMap** map = &Chase()->exports_;
+  if (*map == NULL) *map = new ZoneHashMap(Match, 8);
+
+  ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen());
+  if (p == NULL) {
+    // This didn't have name but was frozen already, that's an error.
+    *ok = false;
+  } else if (p->value == NULL) {
+    p->value = interface;
+  } else {
+#ifdef DEBUG
+    Nesting nested;
+#endif
+    reinterpret_cast<Interface*>(p->value)->Unify(interface, ok);
+  }
+
+#ifdef DEBUG
+  if (FLAG_print_interface_details) {
+    PrintF("%*sthis' = ", Nesting::current(), "");
+    this->Print(Nesting::current());
+    PrintF("%*s# Added.\n", Nesting::current(), "");
+  }
+#endif
+}
+
+
+void Interface::Unify(Interface* that, bool* ok) {
+  if (this->forward_) return this->Chase()->Unify(that, ok);
+  if (that->forward_) return this->Unify(that->Chase(), ok);
+  ASSERT(this->forward_ == NULL);
+  ASSERT(that->forward_ == NULL);
+
+  *ok = true;
+  if (this == that) return;
+  if (this->IsValue()) return that->MakeValue(ok);
+  if (that->IsValue()) return this->MakeValue(ok);
+
+#ifdef DEBUG
+  if (FLAG_print_interface_details) {
+    PrintF("%*s# Unifying...\n", Nesting::current(), "");
+    PrintF("%*sthis = ", Nesting::current(), "");
+    this->Print(Nesting::current());
+    PrintF("%*sthat = ", Nesting::current(), "");
+    that->Print(Nesting::current());
+  }
+#endif
+
+  // Merge the smaller interface into the larger, for performance.
+  if (this->exports_ != NULL && (that->exports_ == NULL ||
+      this->exports_->occupancy() >= that->exports_->occupancy())) {
+    this->DoUnify(that, ok);
+  } else {
+    that->DoUnify(this, ok);
+  }
+
+#ifdef DEBUG
+  if (FLAG_print_interface_details) {
+    PrintF("%*sthis' = ", Nesting::current(), "");
+    this->Print(Nesting::current());
+    PrintF("%*sthat' = ", Nesting::current(), "");
+    that->Print(Nesting::current());
+    PrintF("%*s# Unified.\n", Nesting::current(), "");
+  }
+#endif
+}
+
+
+void Interface::DoUnify(Interface* that, bool* ok) {
+  ASSERT(this->forward_ == NULL);
+  ASSERT(that->forward_ == NULL);
+  ASSERT(!this->IsValue());
+  ASSERT(!that->IsValue());
+  ASSERT(*ok);
+
+#ifdef DEBUG
+    Nesting nested;
+#endif
+
+  // Try to merge all members from that into this.
+  ZoneHashMap* map = that->exports_;
+  if (map != NULL) {
+    for (ZoneHashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
+      this->DoAdd(p->key, p->hash, static_cast<Interface*>(p->value), ok);
+      if (!*ok) return;
+    }
+  }
+
+  // If the new interface is larger than that's, then there were members in
+  // 'this' which 'that' didn't have. If 'that' was frozen that is an error.
+  int this_size = this->exports_ == NULL ? 0 : this->exports_->occupancy();
+  int that_size = map == NULL ? 0 : map->occupancy();
+  if (that->IsFrozen() && this_size > that_size) {
+    *ok = false;
+    return;
+  }
+
+  // Merge interfaces.
+  this->flags_ |= that->flags_;
+  that->forward_ = this;
+}
+
+
+#ifdef DEBUG
+void Interface::Print(int n) {
+  int n0 = n > 0 ? n : 0;
+
+  if (FLAG_print_interface_details) {
+    PrintF("%p", static_cast<void*>(this));
+    for (Interface* link = this->forward_; link != NULL; link = link->forward_)
+      PrintF("->%p", static_cast<void*>(link));
+    PrintF(" ");
+  }
+
+  if (IsUnknown()) {
+    PrintF("unknown\n");
+  } else if (IsValue()) {
+    PrintF("value\n");
+  } else if (IsModule()) {
+    PrintF("module %s{", IsFrozen() ? "" : "(unresolved) ");
+    ZoneHashMap* map = Chase()->exports_;
+    if (map == NULL || map->occupancy() == 0) {
+      PrintF("}\n");
+    } else if (n < 0 || n0 >= 2 * FLAG_print_interface_depth) {
+      // Avoid infinite recursion on cyclic types.
+      PrintF("...}\n");
+    } else {
+      PrintF("\n");
+      for (ZoneHashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
+        String* name = *static_cast<String**>(p->key);
+        Interface* interface = static_cast<Interface*>(p->value);
+        PrintF("%*s%s : ", n0 + 2, "", name->ToAsciiArray());
+        interface->Print(n0 + 2);
+      }
+      PrintF("%*s}\n", n0, "");
+    }
+  }
+}
+#endif
+
+} }  // namespace v8::internal
diff --git a/src/interface.h b/src/interface.h
new file mode 100644
index 0000000..c2991cb
--- /dev/null
+++ b/src/interface.h
@@ -0,0 +1,156 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_INTERFACE_H_
+#define V8_INTERFACE_H_
+
+#include "zone-inl.h"  // For operator new.
+
+namespace v8 {
+namespace internal {
+
+
+// This class implements the following abstract grammar of interfaces
+// (i.e. module types):
+//   interface ::= UNDETERMINED | VALUE | MODULE(exports)
+//   exports ::= {name : interface, ...}
+// A frozen module type is one that is fully determined. Unification does not
+// allow adding additional exports to frozen interfaces.
+// Otherwise, unifying modules merges their exports.
+// Undetermined types are unification variables that can be unified freely.
+
+class Interface : public ZoneObject {
+ public:
+  // ---------------------------------------------------------------------------
+  // Factory methods.
+
+  static Interface* NewValue() {
+    static Interface value_interface(VALUE + FROZEN);  // Cached.
+    return &value_interface;
+  }
+
+  static Interface* NewUnknown() {
+    return new Interface(NONE);
+  }
+
+  static Interface* NewModule() {
+    return new Interface(MODULE);
+  }
+
+  // ---------------------------------------------------------------------------
+  // Mutators.
+
+  // Add a name to the list of exports. If it already exists, unify with
+  // interface, otherwise insert unless this is closed.
+  void Add(Handle<String> name, Interface* interface, bool* ok) {
+    DoAdd(name.location(), name->Hash(), interface, ok);
+  }
+
+  // Unify with another interface. If successful, both interface objects will
+  // represent the same type, and changes to one are reflected in the other.
+  void Unify(Interface* that, bool* ok);
+
+  // Determine this interface to be a value interface.
+  void MakeValue(bool* ok) {
+    *ok = !IsModule();
+    if (*ok) Chase()->flags_ |= VALUE;
+  }
+
+  // Determine this interface to be a module interface.
+  void MakeModule(bool* ok) {
+    *ok = !IsValue();
+    if (*ok) Chase()->flags_ |= MODULE;
+  }
+
+  // Do not allow any further refinements, directly or through unification.
+  void Freeze(bool* ok) {
+    *ok = IsValue() || IsModule();
+    if (*ok) Chase()->flags_ |= FROZEN;
+  }
+
+  // ---------------------------------------------------------------------------
+  // Accessors.
+
+  // Look up an exported name. Returns NULL if not (yet) defined.
+  Interface* Lookup(Handle<String> name);
+
+  // Check whether this is still a fully undetermined type.
+  bool IsUnknown() { return Chase()->flags_ == NONE; }
+
+  // Check whether this is a value type.
+  bool IsValue() { return Chase()->flags_ & VALUE; }
+
+  // Check whether this is a module type.
+  bool IsModule() { return Chase()->flags_ & MODULE; }
+
+  // Check whether this is closed (i.e. fully determined).
+  bool IsFrozen() { return Chase()->flags_ & FROZEN; }
+
+  // ---------------------------------------------------------------------------
+  // Debugging.
+#ifdef DEBUG
+  void Print(int n = 0);  // n = indentation; n < 0 => don't print recursively
+#endif
+
+  // ---------------------------------------------------------------------------
+  // Implementation.
+ private:
+  enum Flags {    // All flags are monotonic
+    NONE = 0,
+    VALUE = 1,    // This type describes a value
+    MODULE = 2,   // This type describes a module
+    FROZEN = 4    // This type is fully determined
+  };
+
+  int flags_;
+  Interface* forward_;     // Unification link
+  ZoneHashMap* exports_;   // Module exports and their types (allocated lazily)
+
+  explicit Interface(int flags)
+    : flags_(flags),
+      forward_(NULL),
+      exports_(NULL) {
+#ifdef DEBUG
+    if (FLAG_print_interface_details)
+      PrintF("# Creating %p\n", static_cast<void*>(this));
+#endif
+  }
+
+  Interface* Chase() {
+    Interface* result = this;
+    while (result->forward_ != NULL) result = result->forward_;
+    if (result != this) forward_ = result;  // On-the-fly path compression.
+    return result;
+  }
+
+  void DoAdd(void* name, uint32_t hash, Interface* interface, bool* ok);
+  void DoUnify(Interface* that, bool* ok);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_INTERFACE_H_
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index 796a447..b337e88 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -33,9 +33,9 @@
 #include "utils.h"
 #include "ast.h"
 #include "bytecodes-irregexp.h"
+#include "jsregexp.h"
 #include "interpreter-irregexp.h"
 
-
 namespace v8 {
 namespace internal {
 
@@ -187,12 +187,12 @@
 
 
 template <typename Char>
-static bool RawMatch(Isolate* isolate,
-                     const byte* code_base,
-                     Vector<const Char> subject,
-                     int* registers,
-                     int current,
-                     uint32_t current_char) {
+static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
+                                           const byte* code_base,
+                                           Vector<const Char> subject,
+                                           int* registers,
+                                           int current,
+                                           uint32_t current_char) {
   const byte* pc = code_base;
   // BacktrackStack ensures that the memory allocated for the backtracking stack
   // is returned to the system or cached if there is no stack being cached at
@@ -211,24 +211,24 @@
     switch (insn & BYTECODE_MASK) {
       BYTECODE(BREAK)
         UNREACHABLE();
-        return false;
+        return RegExpImpl::RE_FAILURE;
       BYTECODE(PUSH_CP)
         if (--backtrack_stack_space < 0) {
-          return false;  // No match on backtrack stack overflow.
+          return RegExpImpl::RE_EXCEPTION;
         }
         *backtrack_sp++ = current;
         pc += BC_PUSH_CP_LENGTH;
         break;
       BYTECODE(PUSH_BT)
         if (--backtrack_stack_space < 0) {
-          return false;  // No match on backtrack stack overflow.
+          return RegExpImpl::RE_EXCEPTION;
         }
         *backtrack_sp++ = Load32Aligned(pc + 4);
         pc += BC_PUSH_BT_LENGTH;
         break;
       BYTECODE(PUSH_REGISTER)
         if (--backtrack_stack_space < 0) {
-          return false;  // No match on backtrack stack overflow.
+          return RegExpImpl::RE_EXCEPTION;
         }
         *backtrack_sp++ = registers[insn >> BYTECODE_SHIFT];
         pc += BC_PUSH_REGISTER_LENGTH;
@@ -278,9 +278,9 @@
         pc += BC_POP_REGISTER_LENGTH;
         break;
       BYTECODE(FAIL)
-        return false;
+        return RegExpImpl::RE_FAILURE;
       BYTECODE(SUCCEED)
-        return true;
+        return RegExpImpl::RE_SUCCESS;
       BYTECODE(ADVANCE_CP)
         current += insn >> BYTECODE_SHIFT;
         pc += BC_ADVANCE_CP_LENGTH;
@@ -625,11 +625,12 @@
 }
 
 
-bool IrregexpInterpreter::Match(Isolate* isolate,
-                                Handle<ByteArray> code_array,
-                                Handle<String> subject,
-                                int* registers,
-                                int start_position) {
+RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
+    Isolate* isolate,
+    Handle<ByteArray> code_array,
+    Handle<String> subject,
+    int* registers,
+    int start_position) {
   ASSERT(subject->IsFlat());
 
   AssertNoAllocation a;
diff --git a/src/interpreter-irregexp.h b/src/interpreter-irregexp.h
index 076f0c5..0f45d98 100644
--- a/src/interpreter-irregexp.h
+++ b/src/interpreter-irregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,11 +36,11 @@
 
 class IrregexpInterpreter {
  public:
-  static bool Match(Isolate* isolate,
-                    Handle<ByteArray> code,
-                    Handle<String> subject,
-                    int* captures,
-                    int start_position);
+  static RegExpImpl::IrregexpResult Match(Isolate* isolate,
+                                          Handle<ByteArray> code,
+                                          Handle<String> subject,
+                                          int* captures,
+                                          int start_position);
 };
 
 
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index aa6b537..9fb16fb 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -36,6 +36,29 @@
 namespace internal {
 
 
+SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+  if (isolate->context() != NULL) {
+    context_ = Handle<Context>(isolate->context());
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+    dummy_ = Handle<Context>(isolate->context());
+#endif
+  }
+  isolate->set_save_context(this);
+
+  c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
+}
+
+
+bool Isolate::IsDebuggerActive() {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  if (!NoBarrier_Load(&debugger_initialized_)) return false;
+  return debugger()->IsDebuggerActive();
+#else
+  return false;
+#endif
+}
+
+
 bool Isolate::DebuggerHasBreakPoints() {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   return debug()->has_break_points();
diff --git a/src/isolate.cc b/src/isolate.cc
index fd0f673..bf9b345 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,6 +41,7 @@
 #include "lithium-allocator.h"
 #include "log.h"
 #include "messages.h"
+#include "platform.h"
 #include "regexp-stack.h"
 #include "runtime-profiler.h"
 #include "scopeinfo.h"
@@ -98,6 +99,15 @@
   failed_access_check_callback_ = NULL;
   save_context_ = NULL;
   catcher_ = NULL;
+  top_lookup_result_ = NULL;
+
+  // These members are re-initialized later after deserialization
+  // is complete.
+  pending_exception_ = NULL;
+  has_pending_message_ = false;
+  pending_message_obj_ = NULL;
+  pending_message_script_ = NULL;
+  scheduled_exception_ = NULL;
 }
 
 
@@ -302,7 +312,6 @@
   storage->LinkTo(&free_list_);
 }
 
-
 Isolate* Isolate::default_isolate_ = NULL;
 Thread::LocalStorageKey Isolate::isolate_key_;
 Thread::LocalStorageKey Isolate::thread_id_key_;
@@ -311,26 +320,6 @@
 Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
 
 
-class IsolateInitializer {
- public:
-  IsolateInitializer() {
-    Isolate::EnsureDefaultIsolate();
-  }
-};
-
-static IsolateInitializer* EnsureDefaultIsolateAllocated() {
-  // TODO(isolates): Use the system threading API to do this once?
-  static IsolateInitializer static_initializer;
-  return &static_initializer;
-}
-
-// This variable only needed to trigger static intialization.
-static IsolateInitializer* static_initializer = EnsureDefaultIsolateAllocated();
-
-
-
-
-
 Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
     ThreadId thread_id) {
   ASSERT(!thread_id.Equals(ThreadId::Invalid()));
@@ -381,12 +370,17 @@
     default_isolate_ = new Isolate();
   }
   // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
-  // becase a non-null thread data may be already set.
+  // because a non-null thread data may be already set.
   if (Thread::GetThreadLocal(isolate_key_) == NULL) {
     Thread::SetThreadLocal(isolate_key_, default_isolate_);
   }
 }
 
+struct StaticInitializer {
+  StaticInitializer() {
+    Isolate::EnsureDefaultIsolate();
+  }
+} static_initializer;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 Debugger* Isolate::GetDefaultIsolateDebugger() {
@@ -472,6 +466,9 @@
   for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
     it.frame()->Iterate(v);
   }
+
+  // Iterate pointers in live lookup results.
+  thread->top_lookup_result_->Iterate(v);
 }
 
 
@@ -530,6 +527,18 @@
 }
 
 
+void Isolate::CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object) {
+  if (capture_stack_trace_for_uncaught_exceptions_) {
+    // Capture stack trace for a detailed exception message.
+    Handle<String> key = factory()->hidden_stack_trace_symbol();
+    Handle<JSArray> stack_trace = CaptureCurrentStackTrace(
+        stack_trace_for_uncaught_exceptions_frame_limit_,
+        stack_trace_for_uncaught_exceptions_options_);
+    JSObject::SetHiddenProperty(error_object, key, stack_trace);
+  }
+}
+
+
 Handle<JSArray> Isolate::CaptureCurrentStackTrace(
     int frame_limit, StackTrace::StackTraceOptions options) {
   // Ensure no negative values.
@@ -558,7 +567,7 @@
     frame->Summarize(&frames);
     for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
       // Create a JSObject to hold the information for the StackFrame.
-      Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
+      Handle<JSObject> stack_frame = factory()->NewJSObject(object_function());
 
       Handle<JSFunction> fun = frames[i].function();
       Handle<Script> script(Script::cast(fun->shared()->script()));
@@ -579,16 +588,24 @@
             // tag.
             column_offset += script->column_offset()->value();
           }
-          SetLocalPropertyNoThrow(stackFrame, column_key,
-                                  Handle<Smi>(Smi::FromInt(column_offset + 1)));
+          CHECK_NOT_EMPTY_HANDLE(
+              this,
+              JSObject::SetLocalPropertyIgnoreAttributes(
+                  stack_frame, column_key,
+                  Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE));
         }
-        SetLocalPropertyNoThrow(stackFrame, line_key,
-                                Handle<Smi>(Smi::FromInt(line_number + 1)));
+        CHECK_NOT_EMPTY_HANDLE(
+            this,
+            JSObject::SetLocalPropertyIgnoreAttributes(
+                stack_frame, line_key,
+                Handle<Smi>(Smi::FromInt(line_number + 1)), NONE));
       }
 
       if (options & StackTrace::kScriptName) {
         Handle<Object> script_name(script->name(), this);
-        SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
+        CHECK_NOT_EMPTY_HANDLE(this,
+                               JSObject::SetLocalPropertyIgnoreAttributes(
+                                   stack_frame, script_key, script_name, NONE));
       }
 
       if (options & StackTrace::kScriptNameOrSourceURL) {
@@ -604,8 +621,10 @@
         if (caught_exception) {
           result = factory()->undefined_value();
         }
-        SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
-                                result);
+        CHECK_NOT_EMPTY_HANDLE(this,
+                               JSObject::SetLocalPropertyIgnoreAttributes(
+                                   stack_frame, script_name_or_source_url_key,
+                                   result, NONE));
       }
 
       if (options & StackTrace::kFunctionName) {
@@ -613,23 +632,30 @@
         if (fun_name->ToBoolean()->IsFalse()) {
           fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
         }
-        SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
+        CHECK_NOT_EMPTY_HANDLE(this,
+                               JSObject::SetLocalPropertyIgnoreAttributes(
+                                   stack_frame, function_key, fun_name, NONE));
       }
 
       if (options & StackTrace::kIsEval) {
         int type = Smi::cast(script->compilation_type())->value();
         Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
             factory()->true_value() : factory()->false_value();
-        SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
+        CHECK_NOT_EMPTY_HANDLE(this,
+                               JSObject::SetLocalPropertyIgnoreAttributes(
+                                   stack_frame, eval_key, is_eval, NONE));
       }
 
       if (options & StackTrace::kIsConstructor) {
         Handle<Object> is_constructor = (frames[i].is_constructor()) ?
             factory()->true_value() : factory()->false_value();
-        SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
+        CHECK_NOT_EMPTY_HANDLE(this,
+                               JSObject::SetLocalPropertyIgnoreAttributes(
+                                   stack_frame, constructor_key,
+                                   is_constructor, NONE));
       }
 
-      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
+      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
       frames_seen++;
     }
     it.Advance();
@@ -734,10 +760,12 @@
   HandleScope scope;
   Handle<JSObject> receiver_handle(receiver);
   Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
-  thread_local_top()->failed_access_check_callback_(
-    v8::Utils::ToLocal(receiver_handle),
-    type,
-    v8::Utils::ToLocal(data));
+  { VMState state(this, EXTERNAL);
+    thread_local_top()->failed_access_check_callback_(
+      v8::Utils::ToLocal(receiver_handle),
+      type,
+      v8::Utils::ToLocal(data));
+  }
 }
 
 
@@ -982,7 +1010,7 @@
   // Find the top-most try-catch handler.
   StackHandler* handler =
       StackHandler::FromAddress(Isolate::handler(thread_local_top()));
-  while (handler != NULL && !handler->is_try_catch()) {
+  while (handler != NULL && !handler->is_catch()) {
     handler = handler->next();
   }
 
@@ -1008,22 +1036,39 @@
 }
 
 
-void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
+bool Isolate::IsErrorObject(Handle<Object> obj) {
+  if (!obj->IsJSObject()) return false;
+
+  String* error_key = *(factory()->LookupAsciiSymbol("$Error"));
+  Object* error_constructor =
+      js_builtins_object()->GetPropertyNoExceptionThrown(error_key);
+
+  for (Object* prototype = *obj; !prototype->IsNull();
+       prototype = prototype->GetPrototype()) {
+    if (!prototype->IsJSObject()) return false;
+    if (JSObject::cast(prototype)->map()->constructor() == error_constructor) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+void Isolate::DoThrow(Object* exception, MessageLocation* location) {
   ASSERT(!has_pending_exception());
 
   HandleScope scope;
-  Object* exception_object = Smi::FromInt(0);
-  bool is_object = exception->ToObject(&exception_object);
-  Handle<Object> exception_handle(exception_object);
+  Handle<Object> exception_handle(exception);
 
   // Determine reporting and whether the exception is caught externally.
   bool catchable_by_javascript = is_catchable_by_javascript(exception);
-  // Only real objects can be caught by JS.
-  ASSERT(!catchable_by_javascript || is_object);
   bool can_be_caught_externally = false;
   bool should_report_exception =
       ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
   bool report_exception = catchable_by_javascript && should_report_exception;
+  bool try_catch_needs_message =
+      can_be_caught_externally && try_catch_handler()->capture_message_;
+  bool bootstrapping = bootstrapper()->IsActive();
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Notify debugger of exception.
@@ -1032,63 +1077,74 @@
   }
 #endif
 
-  // Generate the message.
-  Handle<Object> message_obj;
-  MessageLocation potential_computed_location;
-  bool try_catch_needs_message =
-      can_be_caught_externally &&
-      try_catch_handler()->capture_message_;
+  // Generate the message if required.
   if (report_exception || try_catch_needs_message) {
+    MessageLocation potential_computed_location;
     if (location == NULL) {
-      // If no location was specified we use a computed one instead
+      // If no location was specified we use a computed one instead.
       ComputeLocation(&potential_computed_location);
       location = &potential_computed_location;
     }
-    if (!bootstrapper()->IsActive()) {
-      // It's not safe to try to make message objects or collect stack
-      // traces while the bootstrapper is active since the infrastructure
-      // may not have been properly initialized.
+    // It's not safe to try to make message objects or collect stack traces
+    // while the bootstrapper is active since the infrastructure may not have
+    // been properly initialized.
+    if (!bootstrapping) {
       Handle<String> stack_trace;
       if (FLAG_trace_exception) stack_trace = StackTraceString();
       Handle<JSArray> stack_trace_object;
-      if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
+      if (capture_stack_trace_for_uncaught_exceptions_) {
+        if (IsErrorObject(exception_handle)) {
+          // We fetch the stack trace that corresponds to this error object.
+          String* key = heap()->hidden_stack_trace_symbol();
+          Object* stack_property =
+              JSObject::cast(*exception_handle)->GetHiddenProperty(key);
+          // Property lookup may have failed.  In this case it's probably not
+          // a valid Error object.
+          if (stack_property->IsJSArray()) {
+            stack_trace_object = Handle<JSArray>(JSArray::cast(stack_property));
+          }
+        }
+        if (stack_trace_object.is_null()) {
+          // Not an error object, we capture at throw site.
           stack_trace_object = CaptureCurrentStackTrace(
               stack_trace_for_uncaught_exceptions_frame_limit_,
               stack_trace_for_uncaught_exceptions_options_);
+        }
       }
-      ASSERT(is_object);  // Can't use the handle unless there's a real object.
-      message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
-          location, HandleVector<Object>(&exception_handle, 1), stack_trace,
+      Handle<Object> message_obj = MessageHandler::MakeMessageObject(
+          "uncaught_exception",
+          location,
+          HandleVector<Object>(&exception_handle, 1),
+          stack_trace,
           stack_trace_object);
+      thread_local_top()->pending_message_obj_ = *message_obj;
+      if (location != NULL) {
+        thread_local_top()->pending_message_script_ = *location->script();
+        thread_local_top()->pending_message_start_pos_ = location->start_pos();
+        thread_local_top()->pending_message_end_pos_ = location->end_pos();
+      }
+    } else if (location != NULL && !location->script().is_null()) {
+      // We are bootstrapping and caught an error where the location is set
+      // and we have a script for the location.
+      // In this case we could have an extension (or an internal error
+      // somewhere) and we print out the line number at which the error occured
+      // to the console for easier debugging.
+      int line_number = GetScriptLineNumberSafe(location->script(),
+                                                location->start_pos());
+      OS::PrintError("Extension or internal compilation error at line %d.\n",
+                     line_number);
     }
   }
 
   // Save the message for reporting if the the exception remains uncaught.
   thread_local_top()->has_pending_message_ = report_exception;
-  if (!message_obj.is_null()) {
-    thread_local_top()->pending_message_obj_ = *message_obj;
-    if (location != NULL) {
-      thread_local_top()->pending_message_script_ = *location->script();
-      thread_local_top()->pending_message_start_pos_ = location->start_pos();
-      thread_local_top()->pending_message_end_pos_ = location->end_pos();
-    }
-  }
 
   // Do not forget to clean catcher_ if currently thrown exception cannot
   // be caught.  If necessary, ReThrow will update the catcher.
   thread_local_top()->catcher_ = can_be_caught_externally ?
       try_catch_handler() : NULL;
 
-  // NOTE: Notifying the debugger or generating the message
-  // may have caused new exceptions. For now, we just ignore
-  // that and set the pending exception to the original one.
-  if (is_object) {
-    set_pending_exception(*exception_handle);
-  } else {
-    // Failures are not on the heap so they neither need nor work with handles.
-    ASSERT(exception_handle->IsFailure());
-    set_pending_exception(exception);
-  }
+  set_pending_exception(*exception_handle);
 }
 
 
@@ -1124,8 +1180,8 @@
   StackHandler* handler =
       StackHandler::FromAddress(Isolate::handler(thread_local_top()));
   while (handler != NULL && handler->address() < external_handler_address) {
-    ASSERT(!handler->is_try_catch());
-    if (handler->is_try_finally()) return false;
+    ASSERT(!handler->is_catch());
+    if (handler->is_finally()) return false;
 
     handler = handler->next();
   }
@@ -1180,7 +1236,7 @@
   ASSERT(has_pending_exception());
   PropagatePendingExceptionToExternalTryCatch();
 
-  // Allways reschedule out of memory exceptions.
+  // Always reschedule out of memory exceptions.
   if (!is_out_of_memory()) {
     bool is_termination_exception =
         pending_exception() == heap_.termination_exception();
@@ -1284,6 +1340,9 @@
   memcpy(to, reinterpret_cast<char*>(thread_local_top()),
          sizeof(ThreadLocalTop));
   InitializeThreadLocal();
+  clear_pending_exception();
+  clear_pending_message();
+  clear_scheduled_exception();
   return to + sizeof(ThreadLocalTop);
 }
 
@@ -1403,14 +1462,18 @@
       in_use_list_(0),
       free_list_(0),
       preallocated_storage_preallocated_(false),
-      pc_to_code_cache_(NULL),
+      inner_pointer_to_code_cache_(NULL),
       write_input_buffer_(NULL),
       global_handles_(NULL),
       context_switcher_(NULL),
       thread_manager_(NULL),
+      fp_stubs_generated_(false),
+      has_installed_extensions_(false),
       string_tracker_(NULL),
       regexp_stack_(NULL),
-      embedder_data_(NULL) {
+      date_cache_(NULL),
+      embedder_data_(NULL),
+      context_exit_happened_(false) {
   TRACE_ISOLATE(constructor);
 
   memset(isolate_addresses_, 0,
@@ -1541,6 +1604,9 @@
   delete unicode_cache_;
   unicode_cache_ = NULL;
 
+  delete date_cache_;
+  date_cache_ = NULL;
+
   delete regexp_stack_;
   regexp_stack_ = NULL;
 
@@ -1575,8 +1641,8 @@
   compilation_cache_ = NULL;
   delete bootstrapper_;
   bootstrapper_ = NULL;
-  delete pc_to_code_cache_;
-  pc_to_code_cache_ = NULL;
+  delete inner_pointer_to_code_cache_;
+  inner_pointer_to_code_cache_ = NULL;
   delete write_input_buffer_;
   write_input_buffer_ = NULL;
 
@@ -1610,9 +1676,6 @@
 void Isolate::InitializeThreadLocal() {
   thread_local_top_.isolate_ = this;
   thread_local_top_.Initialize();
-  clear_pending_exception();
-  clear_pending_message();
-  clear_scheduled_exception();
 }
 
 
@@ -1700,7 +1763,7 @@
   context_slot_cache_ = new ContextSlotCache();
   descriptor_lookup_cache_ = new DescriptorLookupCache();
   unicode_cache_ = new UnicodeCache();
-  pc_to_code_cache_ = new PcToCodeCache(this);
+  inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
   write_input_buffer_ = new StringInputBuffer();
   global_handles_ = new GlobalHandles(this);
   bootstrapper_ = new Bootstrapper();
@@ -1708,12 +1771,13 @@
   stub_cache_ = new StubCache(this);
   regexp_stack_ = new RegExpStack();
   regexp_stack_->isolate_ = this;
+  date_cache_ = new DateCache();
 
   // Enable logging before setting up the heap
-  logger_->Setup();
+  logger_->SetUp();
 
-  CpuProfiler::Setup();
-  HeapProfiler::Setup();
+  CpuProfiler::SetUp();
+  HeapProfiler::SetUp();
 
   // Initialize other runtime facilities
 #if defined(USE_SIMULATOR)
@@ -1730,10 +1794,10 @@
     stack_guard_.InitThread(lock);
   }
 
-  // Setup the object heap.
+  // SetUp the object heap.
   const bool create_heap_objects = (des == NULL);
-  ASSERT(!heap_.HasBeenSetup());
-  if (!heap_.Setup(create_heap_objects)) {
+  ASSERT(!heap_.HasBeenSetUp());
+  if (!heap_.SetUp(create_heap_objects)) {
     V8::SetFatalError();
     return false;
   }
@@ -1741,7 +1805,7 @@
   InitializeThreadLocal();
 
   bootstrapper_->Initialize(create_heap_objects);
-  builtins_.Setup(create_heap_objects);
+  builtins_.SetUp(create_heap_objects);
 
   // Only preallocate on the first initialization.
   if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
@@ -1760,15 +1824,19 @@
   }
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  debug_->Setup(create_heap_objects);
+  debug_->SetUp(create_heap_objects);
 #endif
-  stub_cache_->Initialize(create_heap_objects);
 
   // If we are deserializing, read the state into the now-empty heap.
   if (des != NULL) {
     des->Deserialize();
-    stub_cache_->Clear();
   }
+  stub_cache_->Initialize();
+
+  // Finish initialization of ThreadLocal after deserialization is done.
+  clear_pending_exception();
+  clear_pending_message();
+  clear_scheduled_exception();
 
   // Deserializing may put strange things in the root array's copy of the
   // stack guard.
@@ -1776,7 +1844,7 @@
 
   deoptimizer_data_ = new DeoptimizerData;
   runtime_profiler_ = new RuntimeProfiler(this);
-  runtime_profiler_->Setup();
+  runtime_profiler_->SetUp();
 
   // If we are deserializing, log non-function code objects and compiled
   // functions found in the snapshot.
@@ -1787,6 +1855,7 @@
   }
 
   state_ = INITIALIZED;
+  time_millis_at_init_ = OS::TimeCurrentMillis();
   return true;
 }
 
diff --git a/src/isolate.h b/src/isolate.h
index 2582da6..2ff1318 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,8 +36,10 @@
 #include "contexts.h"
 #include "execution.h"
 #include "frames.h"
+#include "date.h"
 #include "global-handles.h"
 #include "handles.h"
+#include "hashmap.h"
 #include "heap.h"
 #include "regexp-stack.h"
 #include "runtime-profiler.h"
@@ -66,7 +68,7 @@
 class HeapProfiler;
 class InlineRuntimeFunctionsTable;
 class NoAllocationStringAllocator;
-class PcToCodeCache;
+class InnerPointerToCodeCache;
 class PreallocatedMemoryThread;
 class RegExpStack;
 class SaveContext;
@@ -106,15 +108,28 @@
 // of handles to the actual constants.
 typedef ZoneList<Handle<Object> > ZoneObjectList;
 
-#define RETURN_IF_SCHEDULED_EXCEPTION(isolate)    \
-  if (isolate->has_scheduled_exception())         \
-      return isolate->PromoteScheduledException()
+#define RETURN_IF_SCHEDULED_EXCEPTION(isolate)            \
+  do {                                                    \
+    Isolate* __isolate__ = (isolate);                     \
+    if (__isolate__->has_scheduled_exception()) {         \
+      return __isolate__->PromoteScheduledException();    \
+    }                                                     \
+  } while (false)
 
 #define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
-  if (call.is_null()) {                                    \
-    ASSERT(isolate->has_pending_exception());              \
-    return value;                                          \
-  }
+  do {                                                     \
+    if ((call).is_null()) {                                \
+      ASSERT((isolate)->has_pending_exception());          \
+      return (value);                                      \
+    }                                                      \
+  } while (false)
+
+#define CHECK_NOT_EMPTY_HANDLE(isolate, call)     \
+  do {                                            \
+    ASSERT(!(isolate)->has_pending_exception());  \
+    CHECK(!(call).is_null());                     \
+    CHECK(!(isolate)->has_pending_exception());   \
+  } while (false)
 
 #define RETURN_IF_EMPTY_HANDLE(isolate, call)                       \
   RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
@@ -245,7 +260,7 @@
 #endif
 #endif  // USE_SIMULATOR
 
-  Address js_entry_sp_;  // the stack pointer of the bottom js entry frame
+  Address js_entry_sp_;  // the stack pointer of the bottom JS entry frame
   Address external_callback_;  // the external callback we're currently in
   StateTag current_vm_state_;
 
@@ -255,6 +270,9 @@
   // Call back function to report unsafe JS accesses.
   v8::FailedAccessCheckCallback failed_access_check_callback_;
 
+  // Head of the list of live LookupResults.
+  LookupResult* top_lookup_result_;
+
   // Whether out of memory exceptions should be ignored.
   bool ignore_out_of_memory_;
 
@@ -264,23 +282,6 @@
   Address try_catch_handler_address_;
 };
 
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
-
-#define ISOLATE_PLATFORM_INIT_LIST(V)                                          \
-  /* VirtualFrame::SpilledScope state */                                       \
-  V(bool, is_virtual_frame_in_spilled_scope, false)                            \
-  /* CodeGenerator::EmitNamedStore state */                                    \
-  V(int, inlined_write_barrier_size, -1)
-
-#if !defined(__arm__) && !defined(__mips__)
-class HashMap;
-#endif
-
-#else
-
-#define ISOLATE_PLATFORM_INIT_LIST(V)
-
-#endif
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
@@ -311,15 +312,12 @@
   V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
   V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
   V(int, suffix_table, (kBMMaxShift + 1))                                      \
-  V(uint32_t, random_seed, 2)                                                  \
   V(uint32_t, private_random_seed, 2)                                          \
   ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
 
 typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
 
 #define ISOLATE_INIT_LIST(V)                                                   \
-  /* AssertNoZoneAllocation state. */                                          \
-  V(bool, zone_allow_allocation, true)                                         \
   /* SerializerDeserializer state. */                                          \
   V(int, serialize_partial_snapshot_cache_length, 0)                           \
   /* Assembler state. */                                                       \
@@ -347,14 +345,13 @@
   /* Serializer state. */                                                      \
   V(ExternalReferenceTable*, external_reference_table, NULL)                   \
   /* AstNode state. */                                                         \
-  V(unsigned, ast_node_id, 0)                                                  \
+  V(int, ast_node_id, 0)                                                       \
   V(unsigned, ast_node_count, 0)                                               \
   /* SafeStackFrameIterator activations count. */                              \
   V(int, safe_stack_iterator_counter, 0)                                       \
   V(uint64_t, enabled_cpu_features, 0)                                         \
   V(CpuProfiler*, cpu_profiler, NULL)                                          \
   V(HeapProfiler*, heap_profiler, NULL)                                        \
-  ISOLATE_PLATFORM_INIT_LIST(V)                                                \
   ISOLATE_DEBUGGER_INIT_LIST(V)
 
 class Isolate {
@@ -470,7 +467,7 @@
   bool IsDefaultIsolate() const { return this == default_isolate_; }
 
   // Ensures that process-wide resources and the default isolate have been
-  // allocated. It is only necessary to call this method in rare casses, for
+  // allocated. It is only necessary to call this method in rare cases, for
   // example if you are using V8 from within the body of a static initializer.
   // Safe to call multiple times.
   static void EnsureDefaultIsolate();
@@ -501,6 +498,8 @@
     return thread_id_key_;
   }
 
+  static Thread::LocalStorageKey per_isolate_thread_data_key();
+
   // If a client attempts to create a Locker without specifying an isolate,
   // we assume that the client is using legacy behavior. Set up the current
   // thread to be inside the implicit isolate (or fail a check if we have
@@ -620,7 +619,7 @@
   void* formal_count_address() { return &thread_local_top_.formal_count_; }
 
   // Returns the global object of the current context. It could be
-  // a builtin object, or a js global object.
+  // a builtin object, or a JS global object.
   Handle<GlobalObject> global() {
     return Handle<GlobalObject>(context()->global());
   }
@@ -688,6 +687,8 @@
       int frame_limit,
       StackTrace::StackTraceOptions options);
 
+  void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object);
+
   // Returns if the top context may access the given global object. If
   // the result is false, the pending exception is guaranteed to be
   // set.
@@ -714,7 +715,7 @@
 
   // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
   Failure* PromoteScheduledException();
-  void DoThrow(MaybeObject* exception, MessageLocation* location);
+  void DoThrow(Object* exception, MessageLocation* location);
   // Checks if exception should be reported and finds out if it's
   // caught externally.
   bool ShouldReportException(bool* can_be_caught_externally,
@@ -841,7 +842,9 @@
     return unicode_cache_;
   }
 
-  PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; }
+  InnerPointerToCodeCache* inner_pointer_to_code_cache() {
+    return inner_pointer_to_code_cache_;
+  }
 
   StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
 
@@ -879,12 +882,24 @@
 
   RuntimeState* runtime_state() { return &runtime_state_; }
 
+  void set_fp_stubs_generated(bool value) {
+    fp_stubs_generated_ = value;
+  }
+
+  bool fp_stubs_generated() { return fp_stubs_generated_; }
+
   StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
     return &compiler_safe_string_input_buffer_;
   }
 
   Builtins* builtins() { return &builtins_; }
 
+  void NotifyExtensionInstalled() {
+    has_installed_extensions_ = true;
+  }
+
+  bool has_installed_extensions() { return has_installed_extensions_; }
+
   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
       regexp_macro_assembler_canonicalize() {
     return &regexp_macro_assembler_canonicalize_;
@@ -912,6 +927,7 @@
   }
 #endif
 
+  inline bool IsDebuggerActive();
   inline bool DebuggerHasBreakPoints();
 
 #ifdef DEBUG
@@ -987,9 +1003,41 @@
   void SetData(void* data) { embedder_data_ = data; }
   void* GetData() { return embedder_data_; }
 
+  LookupResult* top_lookup_result() {
+    return thread_local_top_.top_lookup_result_;
+  }
+  void SetTopLookupResult(LookupResult* top) {
+    thread_local_top_.top_lookup_result_ = top;
+  }
+
+  bool context_exit_happened() {
+    return context_exit_happened_;
+  }
+  void set_context_exit_happened(bool context_exit_happened) {
+    context_exit_happened_ = context_exit_happened;
+  }
+
+  double time_millis_since_init() {
+    return OS::TimeCurrentMillis() - time_millis_at_init_;
+  }
+
+  DateCache* date_cache() {
+    return date_cache_;
+  }
+
+  void set_date_cache(DateCache* date_cache) {
+    if (date_cache != date_cache_) {
+      delete date_cache_;
+    }
+    date_cache_ = date_cache;
+  }
+
  private:
   Isolate();
 
+  friend struct GlobalState;
+  friend struct InitializeGlobalState;
+
   // The per-process lock should be acquired before the ThreadDataTable is
   // modified.
   class ThreadDataTable {
@@ -1028,6 +1076,7 @@
     Isolate* previous_isolate;
     EntryStackItem* previous_item;
 
+   private:
     DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
   };
 
@@ -1062,7 +1111,7 @@
   // If one does not yet exist, allocate a new one.
   PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
 
-// PreInits and returns a default isolate. Needed when a new thread tries
+  // PreInits and returns a default isolate. Needed when a new thread tries
   // to create a Locker for the first time (the lock itself is in the isolate).
   static Isolate* GetDefaultIsolateForLocking();
 
@@ -1093,6 +1142,10 @@
 
   void InitializeDebugger();
 
+  // Traverse prototype chain to find out whether the object is derived from
+  // the Error object.
+  bool IsErrorObject(Handle<Object> obj);
+
   int stack_trace_nesting_level_;
   StringStream* incomplete_message_;
   // The preallocated memory thread singleton.
@@ -1130,14 +1183,16 @@
   PreallocatedStorage in_use_list_;
   PreallocatedStorage free_list_;
   bool preallocated_storage_preallocated_;
-  PcToCodeCache* pc_to_code_cache_;
+  InnerPointerToCodeCache* inner_pointer_to_code_cache_;
   StringInputBuffer* write_input_buffer_;
   GlobalHandles* global_handles_;
   ContextSwitcher* context_switcher_;
   ThreadManager* thread_manager_;
   RuntimeState runtime_state_;
+  bool fp_stubs_generated_;
   StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
   Builtins builtins_;
+  bool has_installed_extensions_;
   StringTracker* string_tracker_;
   unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
   unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
@@ -1147,9 +1202,19 @@
   unibrow::Mapping<unibrow::Ecma262Canonicalize>
       regexp_macro_assembler_canonicalize_;
   RegExpStack* regexp_stack_;
+
+  DateCache* date_cache_;
+
   unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
   void* embedder_data_;
 
+  // The garbage collector should be a little more aggressive when it knows
+  // that a context was recently exited.
+  bool context_exit_happened_;
+
+  // Time stamp at initialization.
+  double time_millis_at_init_;
+
 #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
     defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
   bool simulator_initialized_;
@@ -1210,19 +1275,7 @@
 // versions of GCC. See V8 issue 122 for details.
 class SaveContext BASE_EMBEDDED {
  public:
-  explicit SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
-    if (isolate->context() != NULL) {
-      context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
-      dummy_ = Handle<Context>(isolate->context());
-#endif
-    }
-    isolate->set_save_context(this);
-
-    // If there is no JS frame under the current C frame, use the value 0.
-    JavaScriptFrameIterator it(isolate);
-    js_sp_ = it.done() ? 0 : it.frame()->sp();
-  }
+  inline explicit SaveContext(Isolate* isolate);
 
   ~SaveContext() {
     if (context_.is_null()) {
@@ -1240,8 +1293,8 @@
   SaveContext* prev() { return prev_; }
 
   // Returns true if this save context is below a given JavaScript frame.
-  bool below(JavaScriptFrame* frame) {
-    return (js_sp_ == 0) || (frame->sp() < js_sp_);
+  bool IsBelowFrame(JavaScriptFrame* frame) {
+    return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
   }
 
  private:
@@ -1250,7 +1303,7 @@
   Handle<Context> dummy_;
 #endif
   SaveContext* prev_;
-  Address js_sp_;  // The top JS frame's sp when saving context.
+  Address c_entry_fp_;
 };
 
 
diff --git a/src/json-parser.h b/src/json-parser.h
index 68eab65..d22cd0d 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -130,7 +130,7 @@
   // An object literal is a squiggly-braced and comma separated sequence
   // (possibly empty) of key/value pairs, where the key is a JSON string
   // literal, the value is a JSON value, and the two are separated by a colon.
-  // A JSON array dosn't allow numbers and identifiers as keys, like a
+  // A JSON array doesn't allow numbers and identifiers as keys, like a
   // JavaScript array.
   Handle<Object> ParseJsonObject();
 
@@ -165,7 +165,7 @@
 
 template <bool seq_ascii>
 Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
-  isolate_ = source->map()->isolate();
+  isolate_ = source->map()->GetHeap()->isolate();
   FlattenString(source);
   source_ = source;
   source_length_ = source_->length();
@@ -177,7 +177,7 @@
 
   // Set initial position right before the string.
   position_ = -1;
-  // Advance to the first character (posibly EOS)
+  // Advance to the first character (possibly EOS)
   AdvanceSkipWhitespace();
   Handle<Object> result = ParseJsonValue();
   if (result.is_null() || c0_ != kEndOfString) {
@@ -303,11 +303,12 @@
 
       uint32_t index;
       if (key->AsArrayIndex(&index)) {
-        SetOwnElement(json_object, index, value, kNonStrictMode);
+        JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
       } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
         SetPrototype(json_object, value);
       } else {
-        SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
+        JSObject::SetLocalPropertyIgnoreAttributes(
+            json_object, key, value, NONE);
       }
     } while (MatchSkipWhiteSpace(','));
     if (c0_ != '}') {
diff --git a/src/json.js b/src/json.js
index deba126..ccef445 100644
--- a/src/json.js
+++ b/src/json.js
@@ -345,4 +345,4 @@
   ));
 }
 
-SetUpJSON()
+SetUpJSON();
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 3ebfbdf..8ccbae4 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -68,9 +68,9 @@
                                                Handle<String> flags,
                                                bool* has_pending_exception) {
   // Call the construct code with 2 arguments.
-  Object** argv[2] = { Handle<Object>::cast(pattern).location(),
-                       Handle<Object>::cast(flags).location() };
-  return Execution::New(constructor, 2, argv, has_pending_exception);
+  Handle<Object> argv[] = { pattern, flags };
+  return Execution::New(constructor, ARRAY_SIZE(argv), argv,
+                        has_pending_exception);
 }
 
 
@@ -175,7 +175,8 @@
     case JSRegExp::IRREGEXP: {
       Handle<Object> result =
           IrregexpExec(regexp, subject, index, last_match_info);
-      ASSERT(!result.is_null() || Isolate::Current()->has_pending_exception());
+      ASSERT(!result.is_null() ||
+             regexp->GetIsolate()->has_pending_exception());
       return result;
     }
     default:
@@ -509,14 +510,16 @@
   }
   Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
 
-  if (IrregexpInterpreter::Match(isolate,
-                                 byte_codes,
-                                 subject,
-                                 register_vector,
-                                 index)) {
-    return RE_SUCCESS;
+  IrregexpResult result = IrregexpInterpreter::Match(isolate,
+                                                     byte_codes,
+                                                     subject,
+                                                     register_vector,
+                                                     index);
+  if (result == RE_EXCEPTION) {
+    ASSERT(!isolate->has_pending_exception());
+    isolate->StackOverflow();
   }
-  return RE_FAILURE;
+  return result;
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -525,6 +528,7 @@
                                         Handle<String> subject,
                                         int previous_index,
                                         Handle<JSArray> last_match_info) {
+  Isolate* isolate = jsregexp->GetIsolate();
   ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
 
   // Prepare space for the return values.
@@ -540,11 +544,11 @@
   int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject);
   if (required_registers < 0) {
     // Compiling failed with an exception.
-    ASSERT(Isolate::Current()->has_pending_exception());
+    ASSERT(isolate->has_pending_exception());
     return Handle<Object>::null();
   }
 
-  OffsetsVector registers(required_registers);
+  OffsetsVector registers(required_registers, isolate);
 
   IrregexpResult res = RegExpImpl::IrregexpExecOnce(
       jsregexp, subject, previous_index, Vector<int>(registers.vector(),
@@ -566,11 +570,11 @@
     return last_match_info;
   }
   if (res == RE_EXCEPTION) {
-    ASSERT(Isolate::Current()->has_pending_exception());
+    ASSERT(isolate->has_pending_exception());
     return Handle<Object>::null();
   }
   ASSERT(res == RE_FAILURE);
-  return Isolate::Current()->factory()->null_value();
+  return isolate->factory()->null_value();
 }
 
 
@@ -702,7 +706,7 @@
 //   the virtualized backtrack stack and some register changes.  When a node is
 //   to be emitted it can flush the Trace or update it.  Flushing the Trace
 //   will emit code to bring the actual state into line with the virtual state.
-//   Avoiding flushing the state can postpone some work (eg updates of capture
+//   Avoiding flushing the state can postpone some work (e.g. updates of capture
 //   registers).  Postponing work can save time when executing the regular
 //   expression since it may be found that the work never has to be done as a
 //   failure to match can occur.  In addition it is much faster to jump to a
@@ -1440,7 +1444,7 @@
   if (ascii) {
     char_mask = String::kMaxAsciiCharCode;
   } else {
-    char_mask = String::kMaxUC16CharCode;
+    char_mask = String::kMaxUtf16CodeUnit;
   }
   uc16 exor = c1 ^ c2;
   // Check whether exor has only one bit set.
@@ -1542,7 +1546,7 @@
   if (ascii) {
     max_char = String::kMaxAsciiCharCode;
   } else {
-    max_char = String::kMaxUC16CharCode;
+    max_char = String::kMaxUtf16CodeUnit;
   }
 
   Label success;
@@ -1638,7 +1642,7 @@
         macro_assembler->CheckCharacterLT(from, on_failure);
       }
     }
-    if (to != String::kMaxUC16CharCode) {
+    if (to != String::kMaxUtf16CodeUnit) {
       if (cc->is_negated()) {
         macro_assembler->CheckCharacterLT(to + 1, on_failure);
       } else {
@@ -1831,7 +1835,7 @@
   if (asc) {
     char_mask = String::kMaxAsciiCharCode;
   } else {
-    char_mask = String::kMaxUC16CharCode;
+    char_mask = String::kMaxUtf16CodeUnit;
   }
   mask_ = 0;
   value_ = 0;
@@ -1883,7 +1887,7 @@
     if (compiler->ascii()) {
       char_mask = String::kMaxAsciiCharCode;
     } else {
-      char_mask = String::kMaxUC16CharCode;
+      char_mask = String::kMaxUtf16CodeUnit;
     }
     if ((mask & char_mask) == char_mask) need_mask = false;
     mask &= char_mask;
@@ -1935,7 +1939,7 @@
   if (compiler->ascii()) {
     char_mask = String::kMaxAsciiCharCode;
   } else {
-    char_mask = String::kMaxUC16CharCode;
+    char_mask = String::kMaxUtf16CodeUnit;
   }
   for (int k = 0; k < elms_->length(); k++) {
     TextElement elm = elms_->at(k);
@@ -2634,7 +2638,7 @@
     TextElement elm = elms_->at(i);
     if (elm.type == TextElement::CHAR_CLASS) {
       RegExpCharacterClass* cc = elm.data.u_char_class;
-      // None of the standard character classses is different in the case
+      // None of the standard character classes is different in the case
       // independent case and it slows us down if we don't know that.
       if (cc->is_standard()) continue;
       ZoneList<CharacterRange>* ranges = cc->ranges();
@@ -3595,22 +3599,20 @@
 // -------------------------------------------------------------------
 // Tree to graph conversion
 
-static const int kSpaceRangeCount = 20;
-static const int kSpaceRangeAsciiCount = 4;
-static const uc16 kSpaceRanges[kSpaceRangeCount] = { 0x0009, 0x000D, 0x0020,
-    0x0020, 0x00A0, 0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A,
-    0x2028, 0x2029, 0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000 };
+static const uc16 kSpaceRanges[] = { 0x0009, 0x000D, 0x0020, 0x0020, 0x00A0,
+    0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A, 0x2028, 0x2029,
+    0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000, 0xFEFF, 0xFEFF };
+static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
 
-static const int kWordRangeCount = 8;
-static const uc16 kWordRanges[kWordRangeCount] = { '0', '9', 'A', 'Z', '_',
-    '_', 'a', 'z' };
+static const uc16 kWordRanges[] = { '0', '9', 'A', 'Z', '_', '_', 'a', 'z' };
+static const int kWordRangeCount = ARRAY_SIZE(kWordRanges);
 
-static const int kDigitRangeCount = 2;
-static const uc16 kDigitRanges[kDigitRangeCount] = { '0', '9' };
+static const uc16 kDigitRanges[] = { '0', '9' };
+static const int kDigitRangeCount = ARRAY_SIZE(kDigitRanges);
 
-static const int kLineTerminatorRangeCount = 6;
-static const uc16 kLineTerminatorRanges[kLineTerminatorRangeCount] = { 0x000A,
-    0x000A, 0x000D, 0x000D, 0x2028, 0x2029 };
+static const uc16 kLineTerminatorRanges[] = { 0x000A, 0x000A, 0x000D, 0x000D,
+    0x2028, 0x2029 };
+static const int kLineTerminatorRangeCount = ARRAY_SIZE(kLineTerminatorRanges);
 
 RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
                                RegExpNode* on_success) {
@@ -4077,7 +4079,7 @@
                             int elmc,
                             ZoneList<CharacterRange>* ranges) {
   ASSERT(elmv[0] != 0x0000);
-  ASSERT(elmv[elmc-1] != String::kMaxUC16CharCode);
+  ASSERT(elmv[elmc-1] != String::kMaxUtf16CodeUnit);
   uc16 last = 0x0000;
   for (int i = 0; i < elmc; i += 2) {
     ASSERT(last <= elmv[i] - 1);
@@ -4085,7 +4087,7 @@
     ranges->Add(CharacterRange(last, elmv[i] - 1));
     last = elmv[i + 1] + 1;
   }
-  ranges->Add(CharacterRange(last, String::kMaxUC16CharCode));
+  ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit));
 }
 
 
@@ -4631,8 +4633,8 @@
     from = range.to();
     i++;
   }
-  if (from < String::kMaxUC16CharCode) {
-    negated_ranges->Add(CharacterRange(from + 1, String::kMaxUC16CharCode));
+  if (from < String::kMaxUtf16CodeUnit) {
+    negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit));
   }
 }
 
@@ -4723,7 +4725,6 @@
 
 
 const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
-const DispatchTable::Entry DispatchTable::Config::kNoValue;
 
 
 void DispatchTable::AddRange(CharacterRange full_range, int value) {
@@ -4796,7 +4797,7 @@
       entry->AddValue(value);
       // Bail out if the last interval ended at 0xFFFF since otherwise
       // adding 1 will wrap around to 0.
-      if (entry->to() == String::kMaxUC16CharCode)
+      if (entry->to() == String::kMaxUtf16CodeUnit)
         break;
       ASSERT(entry->to() + 1 > current.from());
       current.set_from(entry->to() + 1);
@@ -5116,7 +5117,7 @@
         int new_length = length + 1;
         if (length > 0) {
           if (ranges->at(0).from() == 0) new_length--;
-          if (ranges->at(length - 1).to() == String::kMaxUC16CharCode) {
+          if (ranges->at(length - 1).to() == String::kMaxUtf16CodeUnit) {
             new_length--;
           }
         }
@@ -5206,14 +5207,14 @@
     if (last < range.from())
       AddRange(CharacterRange(last, range.from() - 1));
     if (range.to() >= last) {
-      if (range.to() == String::kMaxUC16CharCode) {
+      if (range.to() == String::kMaxUtf16CodeUnit) {
         return;
       } else {
         last = range.to() + 1;
       }
     }
   }
-  AddRange(CharacterRange(last, String::kMaxUC16CharCode));
+  AddRange(CharacterRange(last, String::kMaxUtf16CodeUnit));
 }
 
 
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 54297a4..8875de9 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,14 +29,17 @@
 #define V8_JSREGEXP_H_
 
 #include "allocation.h"
+#include "assembler.h"
 #include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
 
-
+class NodeVisitor;
+class RegExpCompiler;
 class RegExpMacroAssembler;
-
+class RegExpNode;
+class RegExpTree;
 
 class RegExpImpl {
  public:
@@ -388,7 +391,7 @@
     typedef uc16 Key;
     typedef Entry Value;
     static const uc16 kNoKey;
-    static const Entry kNoValue;
+    static const Entry NoValue() { return Value(); }
     static inline int Compare(uc16 a, uc16 b) {
       if (a == b)
         return 0;
@@ -633,7 +636,7 @@
   static const int kNodeIsTooComplexForGreedyLoops = -1;
   virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
   Label* label() { return &label_; }
-  // If non-generic code is generated for a node (ie the node is not at the
+  // If non-generic code is generated for a node (i.e. the node is not at the
   // start of the trace) then it cannot be reused.  This variable sets a limit
   // on how often we allow that to happen before we insist on starting a new
   // trace and generating generic code for a node that can be reused by flushing
@@ -1463,12 +1466,12 @@
 
 class OffsetsVector {
  public:
-  explicit inline OffsetsVector(int num_registers)
+  inline OffsetsVector(int num_registers, Isolate* isolate)
       : offsets_vector_length_(num_registers) {
     if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
       vector_ = NewArray<int>(offsets_vector_length_);
     } else {
-      vector_ = Isolate::Current()->jsregexp_static_offsets_vector();
+      vector_ = isolate->jsregexp_static_offsets_vector();
     }
   }
   inline ~OffsetsVector() {
diff --git a/src/lazy-instance.h b/src/lazy-instance.h
new file mode 100644
index 0000000..d0893e5
--- /dev/null
+++ b/src/lazy-instance.h
@@ -0,0 +1,259 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The LazyInstance<Type, Traits> class manages a single instance of Type,
+// which will be lazily created on the first time it's accessed.  This class is
+// useful for places you would normally use a function-level static, but you
+// need to have guaranteed thread-safety.  The Type constructor will only ever
+// be called once, even if two threads are racing to create the object.  Get()
+// and Pointer() will always return the same, completely initialized instance.
+//
+// LazyInstance is completely thread safe, assuming that you create it safely.
+// The class was designed to be POD initialized, so it shouldn't require a
+// static constructor.  It really only makes sense to declare a LazyInstance as
+// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
+//
+// LazyInstance is similar to Singleton, except it does not have the singleton
+// property.  You can have multiple LazyInstance's of the same type, and each
+// will manage a unique instance.  It also preallocates the space for Type, as
+// to avoid allocating the Type instance on the heap.  This may help with the
+// performance of creating the instance, and reducing heap fragmentation.  This
+// requires that Type be a complete type so we can determine the size. See
+// notes for advanced users below for more explanations.
+//
+// Example usage:
+//   static LazyInstance<MyClass>::type my_instance = LAZY_INSTANCE_INITIALIZER;
+//   void SomeMethod() {
+//     my_instance.Get().SomeMethod();  // MyClass::SomeMethod()
+//
+//     MyClass* ptr = my_instance.Pointer();
+//     ptr->DoDoDo();  // MyClass::DoDoDo
+//   }
+//
+// Additionally you can override the way your instance is constructed by
+// providing your own trait:
+// Example usage:
+//   struct MyCreateTrait {
+//     static void Construct(MyClass* allocated_ptr) {
+//       new (allocated_ptr) MyClass(/* extra parameters... */);
+//     }
+//   };
+//   static LazyInstance<MyClass, MyCreateTrait>::type my_instance =
+//      LAZY_INSTANCE_INITIALIZER;
+//
+// WARNING: This implementation of LazyInstance is NOT thread-safe by default.
+// See ThreadSafeInitOnceTrait declared below for that.
+//
+// Notes for advanced users:
+// LazyInstance can actually be used in two different ways:
+//
+// - "Static mode" which is the default mode since it is the most efficient
+//   (no extra heap allocation). In this mode, the instance is statically
+//   allocated (stored in the global data section at compile time).
+//   The macro LAZY_STATIC_INSTANCE_INITIALIZER (= LAZY_INSTANCE_INITIALIZER)
+//   must be used to initialize static lazy instances.
+//
+// - "Dynamic mode". In this mode, the instance is dynamically allocated and
+//   constructed (using new) by default. This mode is useful if you have to
+//   deal with some code already allocating the instance for you (e.g.
+//   OS::Mutex() which returns a new private OS-dependent subclass of Mutex).
+//   The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize
+//   dynamic lazy instances.
+
+#ifndef V8_LAZY_INSTANCE_H_
+#define V8_LAZY_INSTANCE_H_
+
+#include "once.h"
+
+namespace v8 {
+namespace internal {
+
+#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, {} }
+#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
+
+// Default to static mode.
+#define LAZY_INSTANCE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+template <typename T>
+struct LeakyInstanceTrait {
+  static void Destroy(T* /* instance */) {}
+};
+
+
+// Traits that define how an instance is allocated and accessed.
+
+// TODO(kalmard): __alignof__ is only defined for GCC > 4.2. Fix alignment issue
+// on MIPS with other compilers.
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
+#define LAZY_ALIGN(x) __attribute__((aligned(__alignof__(x))))
+#else
+#define LAZY_ALIGN(x)
+#endif
+
+template <typename T>
+struct StaticallyAllocatedInstanceTrait {
+  typedef char StorageType[sizeof(T)] LAZY_ALIGN(T);
+
+  static T* MutableInstance(StorageType* storage) {
+    return reinterpret_cast<T*>(storage);
+  }
+
+  template <typename ConstructTrait>
+  static void InitStorageUsingTrait(StorageType* storage) {
+    ConstructTrait::Construct(MutableInstance(storage));
+  }
+};
+
+#undef LAZY_ALIGN
+
+
+template <typename T>
+struct DynamicallyAllocatedInstanceTrait {
+  typedef T* StorageType;
+
+  static T* MutableInstance(StorageType* storage) {
+    return *storage;
+  }
+
+  template <typename CreateTrait>
+  static void InitStorageUsingTrait(StorageType* storage) {
+    *storage = CreateTrait::Create();
+  }
+};
+
+
+template <typename T>
+struct DefaultConstructTrait {
+  // Constructs the provided object which was already allocated.
+  static void Construct(T* allocated_ptr) {
+    new(allocated_ptr) T();
+  }
+};
+
+
+template <typename T>
+struct DefaultCreateTrait {
+  static T* Create() {
+    return new T();
+  }
+};
+
+
+struct ThreadSafeInitOnceTrait {
+  template <typename Function, typename Storage>
+  static void Init(OnceType* once, Function function, Storage storage) {
+    CallOnce(once, function, storage);
+  }
+};
+
+
+// Initialization trait for users who don't care about thread-safety.
+struct SingleThreadInitOnceTrait {
+  template <typename Function, typename Storage>
+  static void Init(OnceType* once, Function function, Storage storage) {
+    if (*once == ONCE_STATE_UNINITIALIZED) {
+      function(storage);
+      *once = ONCE_STATE_DONE;
+    }
+  }
+};
+
+
+// TODO(pliard): Handle instances destruction (using global destructors).
+template <typename T, typename AllocationTrait, typename CreateTrait,
+          typename InitOnceTrait, typename DestroyTrait  /* not used yet. */>
+struct LazyInstanceImpl {
+ public:
+  typedef typename AllocationTrait::StorageType StorageType;
+
+ private:
+  static void InitInstance(StorageType* storage) {
+    AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
+  }
+
+  void Init() const {
+    InitOnceTrait::Init(
+        &once_,
+        // Casts to void* are needed here to avoid breaking strict aliasing
+        // rules.
+        reinterpret_cast<void(*)(void*)>(&InitInstance),  // NOLINT
+        reinterpret_cast<void*>(&storage_));
+  }
+
+ public:
+  T* Pointer() {
+    Init();
+    return AllocationTrait::MutableInstance(&storage_);
+  }
+
+  const T& Get() const {
+    Init();
+    return *AllocationTrait::MutableInstance(&storage_);
+  }
+
+  mutable OnceType once_;
+  // Note that the previous field, OnceType, is an AtomicWord which guarantees
+  // 4-byte alignment of the storage field below. If compiling with GCC (>4.2),
+  // the LAZY_ALIGN macro above will guarantee correctness for any alignment.
+  mutable StorageType storage_;
+};
+
+
+template <typename T,
+          typename CreateTrait = DefaultConstructTrait<T>,
+          typename InitOnceTrait = SingleThreadInitOnceTrait,
+          typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyStaticInstance {
+  typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>,
+      CreateTrait, InitOnceTrait, DestroyTrait> type;
+};
+
+
+template <typename T,
+          typename CreateTrait = DefaultConstructTrait<T>,
+          typename InitOnceTrait = SingleThreadInitOnceTrait,
+          typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyInstance {
+  // A LazyInstance is a LazyStaticInstance.
+  typedef typename LazyStaticInstance<T, CreateTrait, InitOnceTrait,
+      DestroyTrait>::type type;
+};
+
+
+template <typename T,
+          typename CreateTrait = DefaultConstructTrait<T>,
+          typename InitOnceTrait = SingleThreadInitOnceTrait,
+          typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyDynamicInstance {
+  typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>,
+      CreateTrait, InitOnceTrait, DestroyTrait> type;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_LAZY_INSTANCE_H_
diff --git a/src/list-inl.h b/src/list-inl.h
index 80bccc9..7c2c83f 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -72,9 +72,9 @@
 template<typename T, class P>
 void List<T, P>::ResizeAddInternal(const T& element) {
   ASSERT(length_ >= capacity_);
-  // Grow the list capacity by 50%, but make sure to let it grow
+  // Grow the list capacity by 100%, but make sure to let it grow
   // even when the capacity is zero (possible initial case).
-  int new_capacity = 1 + capacity_ + (capacity_ >> 1);
+  int new_capacity = 1 + 2 * capacity_;
   // Since the element reference could be an element of the list, copy
   // it out of the old backing storage before resizing.
   T temp = element;
@@ -216,11 +216,11 @@
     int mid = (low + high) / 2;
     T mid_elem = list[mid];
 
-    if (mid_elem > elem) {
+    if (cmp(&mid_elem, &elem) > 0) {
       high = mid - 1;
       continue;
     }
-    if (mid_elem < elem) {
+    if (cmp(&mid_elem, &elem) < 0) {
       low = mid + 1;
       continue;
     }
@@ -236,6 +236,7 @@
   return SortedListBSearch<T>(list, elem, PointerValueCompare<T>);
 }
 
+
 } }  // namespace v8::internal
 
 #endif  // V8_LIST_INL_H_
diff --git a/src/list.h b/src/list.h
index 0558709..adddea4 100644
--- a/src/list.h
+++ b/src/list.h
@@ -67,7 +67,7 @@
 
   // Returns a reference to the element at index i.  This reference is
   // not safe to use after operations that can change the list's
-  // backing store (eg, Add).
+  // backing store (e.g. Add).
   inline T& operator[](int i) const {
     ASSERT(0 <= i);
     ASSERT(i < length_);
@@ -165,8 +165,11 @@
 
 class Map;
 class Code;
+template<typename T> class Handle;
 typedef List<Map*> MapList;
 typedef List<Code*> CodeList;
+typedef List<Handle<Map> > MapHandleList;
+typedef List<Handle<Code> > CodeHandleList;
 
 // Perform binary search for an element in an already sorted
 // list. Returns the index of the element of -1 if it was not found.
@@ -176,6 +179,7 @@
 template <typename T>
 int SortedListBSearch(const List<T>& list, T elem);
 
+
 } }  // namespace v8::internal
 
 
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 4661106..4396c73 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -46,29 +46,6 @@
 namespace v8 {
 namespace internal {
 
-
-#define DEFINE_OPERAND_CACHE(name, type)                      \
-  name name::cache[name::kNumCachedOperands];                 \
-  void name::SetupCache() {                                   \
-    for (int i = 0; i < kNumCachedOperands; i++) {            \
-      cache[i].ConvertTo(type, i);                            \
-    }                                                         \
-  }                                                           \
-  static bool name##_initialize() {                           \
-    name::SetupCache();                                       \
-    return true;                                              \
-  }                                                           \
-  static bool name##_cache_initialized = name##_initialize();
-
-DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
-DEFINE_OPERAND_CACHE(LStackSlot,       STACK_SLOT)
-DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
-DEFINE_OPERAND_CACHE(LRegister,        REGISTER)
-DEFINE_OPERAND_CACHE(LDoubleRegister,  DOUBLE_REGISTER)
-
-#undef DEFINE_OPERAND_CACHE
-
-
 static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
   return a.Value() < b.Value() ? a : b;
 }
@@ -110,9 +87,9 @@
 }
 
 
-void UseInterval::SplitAt(LifetimePosition pos) {
+void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
   ASSERT(Contains(pos) && pos.Value() != start().Value());
-  UseInterval* after = new UseInterval(pos, end_);
+  UseInterval* after = new(zone) UseInterval(pos, end_);
   after->next_ = next_;
   next_ = after;
   end_ = pos;
@@ -149,11 +126,11 @@
 #endif
 
 
-LiveRange::LiveRange(int id)
+LiveRange::LiveRange(int id, Zone* zone)
     : id_(id),
       spilled_(false),
+      is_double_(false),
       assigned_register_(kInvalidAssignment),
-      assigned_register_kind_(NONE),
       last_interval_(NULL),
       first_interval_(NULL),
       first_pos_(NULL),
@@ -161,37 +138,39 @@
       next_(NULL),
       current_interval_(NULL),
       last_processed_use_(NULL),
-      spill_start_index_(kMaxInt) {
-  spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
-}
+      spill_operand_(new(zone) LOperand()),
+      spill_start_index_(kMaxInt) { }
 
 
-void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) {
+void LiveRange::set_assigned_register(int reg,
+                                      RegisterKind register_kind,
+                                      Zone* zone) {
   ASSERT(!HasRegisterAssigned() && !IsSpilled());
   assigned_register_ = reg;
-  assigned_register_kind_ = register_kind;
-  ConvertOperands();
+  is_double_ = (register_kind == DOUBLE_REGISTERS);
+  ConvertOperands(zone);
 }
 
 
-void LiveRange::MakeSpilled() {
+void LiveRange::MakeSpilled(Zone* zone) {
   ASSERT(!IsSpilled());
   ASSERT(TopLevel()->HasAllocatedSpillOperand());
   spilled_ = true;
   assigned_register_ = kInvalidAssignment;
-  ConvertOperands();
+  ConvertOperands(zone);
 }
 
 
 bool LiveRange::HasAllocatedSpillOperand() const {
-  return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
+  ASSERT(spill_operand_ != NULL);
+  return !spill_operand_->IsIgnored();
 }
 
 
 void LiveRange::SetSpillOperand(LOperand* operand) {
   ASSERT(!operand->IsUnallocated());
   ASSERT(spill_operand_ != NULL);
-  ASSERT(spill_operand_->IsUnallocated());
+  ASSERT(spill_operand_->IsIgnored());
   spill_operand_->ConvertTo(operand->kind(), operand->index());
 }
 
@@ -234,7 +213,8 @@
   // at the current or the immediate next position.
   UsePosition* use_pos = NextRegisterPosition(pos);
   if (use_pos == NULL) return true;
-  return use_pos->pos().Value() > pos.NextInstruction().Value();
+  return
+      use_pos->pos().Value() > pos.NextInstruction().InstructionEnd().Value();
 }
 
 
@@ -245,7 +225,7 @@
 }
 
 
-LOperand* LiveRange::CreateAssignedOperand() {
+LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
   LOperand* op = NULL;
   if (HasRegisterAssigned()) {
     ASSERT(!IsSpilled());
@@ -259,7 +239,7 @@
     op = TopLevel()->GetSpillOperand();
     ASSERT(!op->IsUnallocated());
   } else {
-    LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
+    LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE);
     unalloc->set_virtual_register(id_);
     op = unalloc;
   }
@@ -291,7 +271,9 @@
 }
 
 
-void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
+void LiveRange::SplitAt(LifetimePosition position,
+                        LiveRange* result,
+                        Zone* zone) {
   ASSERT(Start().Value() < position.Value());
   ASSERT(result->IsEmpty());
   // Find the last interval that ends before the position. If the
@@ -310,7 +292,7 @@
 
   while (current != NULL) {
     if (current->Contains(position)) {
-      current->SplitAt(position);
+      current->SplitAt(position, zone);
       break;
     }
     UseInterval* next = current->next();
@@ -403,7 +385,9 @@
 }
 
 
-void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) {
+void LiveRange::EnsureInterval(LifetimePosition start,
+                               LifetimePosition end,
+                               Zone* zone) {
   LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
                          id_,
                          start.Value(),
@@ -417,7 +401,7 @@
     first_interval_ = first_interval_->next();
   }
 
-  UseInterval* new_interval = new UseInterval(start, new_end);
+  UseInterval* new_interval = new(zone) UseInterval(start, new_end);
   new_interval->next_ = first_interval_;
   first_interval_ = new_interval;
   if (new_interval->next() == NULL) {
@@ -426,20 +410,22 @@
 }
 
 
-void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end) {
+void LiveRange::AddUseInterval(LifetimePosition start,
+                               LifetimePosition end,
+                               Zone* zone) {
   LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n",
                          id_,
                          start.Value(),
                          end.Value());
   if (first_interval_ == NULL) {
-    UseInterval* interval = new UseInterval(start, end);
+    UseInterval* interval = new(zone) UseInterval(start, end);
     first_interval_ = interval;
     last_interval_ = interval;
   } else {
     if (end.Value() == first_interval_->start().Value()) {
       first_interval_->set_start(start);
     } else if (end.Value() < first_interval_->start().Value()) {
-      UseInterval* interval = new UseInterval(start, end);
+      UseInterval* interval = new(zone) UseInterval(start, end);
       interval->set_next(first_interval_);
       first_interval_ = interval;
     } else {
@@ -455,11 +441,12 @@
 
 
 UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
-                                       LOperand* operand) {
+                                       LOperand* operand,
+                                       Zone* zone) {
   LAllocator::TraceAlloc("Add to live range %d use position %d\n",
                          id_,
                          pos.Value());
-  UsePosition* use_pos = new UsePosition(pos, operand);
+  UsePosition* use_pos = new(zone) UsePosition(pos, operand);
   UsePosition* prev = NULL;
   UsePosition* current = first_pos_;
   while (current != NULL && current->pos().Value() < pos.Value()) {
@@ -479,8 +466,8 @@
 }
 
 
-void LiveRange::ConvertOperands() {
-  LOperand* op = CreateAssignedOperand();
+void LiveRange::ConvertOperands(Zone* zone) {
+  LOperand* op = CreateAssignedOperand(zone);
   UsePosition* use_pos = first_pos();
   while (use_pos != NULL) {
     ASSERT(Start().Value() <= use_pos->pos().Value() &&
@@ -544,7 +531,8 @@
 
 
 LAllocator::LAllocator(int num_values, HGraph* graph)
-    : chunk_(NULL),
+    : zone_(graph->zone()),
+      chunk_(NULL),
       live_in_sets_(graph->blocks()->length()),
       live_ranges_(num_values * 2),
       fixed_live_ranges_(NULL),
@@ -555,10 +543,11 @@
       reusable_slots_(8),
       next_virtual_register_(num_values),
       first_artificial_register_(num_values),
-      mode_(NONE),
+      mode_(GENERAL_REGISTERS),
       num_registers_(-1),
       graph_(graph),
-      has_osr_entry_(false) {}
+      has_osr_entry_(false),
+      allocation_ok_(true) { }
 
 
 void LAllocator::InitializeLivenessAnalysis() {
@@ -572,7 +561,7 @@
 BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
   // Compute live out for the given block, except not including backward
   // successor edges.
-  BitVector* live_out = new BitVector(next_virtual_register_);
+  BitVector* live_out = new(zone_) BitVector(next_virtual_register_, zone_);
 
   // Process all successor blocks.
   for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
@@ -610,7 +599,7 @@
   while (!iterator.Done()) {
     int operand_index = iterator.Current();
     LiveRange* range = LiveRangeFor(operand_index);
-    range->AddUseInterval(start, end);
+    range->AddUseInterval(start, end, zone_);
     iterator.Advance();
   }
 }
@@ -652,9 +641,9 @@
   ASSERT(index < Register::kNumAllocatableRegisters);
   LiveRange* result = fixed_live_ranges_[index];
   if (result == NULL) {
-    result = new LiveRange(FixedLiveRangeID(index));
+    result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
     ASSERT(result->IsFixed());
-    result->set_assigned_register(index, GENERAL_REGISTERS);
+    result->set_assigned_register(index, GENERAL_REGISTERS, zone_);
     fixed_live_ranges_[index] = result;
   }
   return result;
@@ -665,9 +654,9 @@
   ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
   LiveRange* result = fixed_double_live_ranges_[index];
   if (result == NULL) {
-    result = new LiveRange(FixedDoubleLiveRangeID(index));
+    result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
     ASSERT(result->IsFixed());
-    result->set_assigned_register(index, DOUBLE_REGISTERS);
+    result->set_assigned_register(index, DOUBLE_REGISTERS, zone_);
     fixed_double_live_ranges_[index] = result;
   }
   return result;
@@ -680,7 +669,7 @@
   }
   LiveRange* result = live_ranges_[index];
   if (result == NULL) {
-    result = new LiveRange(index);
+    result = new(zone_) LiveRange(index, zone_);
     live_ranges_[index] = result;
   }
   return result;
@@ -696,7 +685,7 @@
 
 HPhi* LAllocator::LookupPhi(LOperand* operand) const {
   if (!operand->IsUnallocated()) return NULL;
-  int index = operand->VirtualRegister();
+  int index = LUnallocated::cast(operand)->virtual_register();
   HValue* instr = graph_->LookupValue(index);
   if (instr != NULL && instr->IsPhi()) {
     return HPhi::cast(instr);
@@ -726,15 +715,15 @@
 
   if (range->IsEmpty() || range->Start().Value() > position.Value()) {
     // Can happen if there is a definition without use.
-    range->AddUseInterval(position, position.NextInstruction());
-    range->AddUsePosition(position.NextInstruction(), NULL);
+    range->AddUseInterval(position, position.NextInstruction(), zone_);
+    range->AddUsePosition(position.NextInstruction(), NULL, zone_);
   } else {
     range->ShortenTo(position);
   }
 
   if (operand->IsUnallocated()) {
     LUnallocated* unalloc_operand = LUnallocated::cast(operand);
-    range->AddUsePosition(position, unalloc_operand)->set_hint(hint);
+    range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
   }
 }
 
@@ -747,9 +736,9 @@
   if (range == NULL) return;
   if (operand->IsUnallocated()) {
     LUnallocated* unalloc_operand = LUnallocated::cast(operand);
-    range->AddUsePosition(position, unalloc_operand)->set_hint(hint);
+    range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
   }
-  range->AddUseInterval(block_start, position);
+  range->AddUseInterval(block_start, position, zone_);
 }
 
 
@@ -764,7 +753,8 @@
       LMoveOperands cur = move_operands->at(i);
       LOperand* cur_to = cur.destination();
       if (cur_to->IsUnallocated()) {
-        if (cur_to->VirtualRegister() == from->VirtualRegister()) {
+        if (LUnallocated::cast(cur_to)->virtual_register() ==
+            LUnallocated::cast(from)->virtual_register()) {
           move->AddMove(cur.source(), to);
           return;
         }
@@ -785,6 +775,7 @@
       if (i < end) instr = InstructionAt(i + 1);
       if (i > start) prev_instr = InstructionAt(i - 1);
       MeetConstraintsBetween(prev_instr, instr, i);
+      if (!AllocationOk()) return;
     }
   }
 }
@@ -806,11 +797,11 @@
   // Handle fixed output operand.
   if (first != NULL && first->Output() != NULL) {
     LUnallocated* first_output = LUnallocated::cast(first->Output());
-    LiveRange* range = LiveRangeFor(first_output->VirtualRegister());
+    LiveRange* range = LiveRangeFor(first_output->virtual_register());
     bool assigned = false;
     if (first_output->HasFixedPolicy()) {
       LUnallocated* output_copy = first_output->CopyUnconstrained();
-      bool is_tagged = HasTaggedValue(first_output->VirtualRegister());
+      bool is_tagged = HasTaggedValue(first_output->virtual_register());
       AllocateFixed(first_output, gap_index, is_tagged);
 
       // This value is produced on the stack, we never need to spill it.
@@ -841,7 +832,7 @@
       LUnallocated* cur_input = LUnallocated::cast(it.Current());
       if (cur_input->HasFixedPolicy()) {
         LUnallocated* input_copy = cur_input->CopyUnconstrained();
-        bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
+        bool is_tagged = HasTaggedValue(cur_input->virtual_register());
         AllocateFixed(cur_input, gap_index + 1, is_tagged);
         AddConstraintsGapMove(gap_index, input_copy, cur_input);
       } else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
@@ -850,12 +841,14 @@
         ASSERT(!cur_input->IsUsedAtStart());
 
         LUnallocated* input_copy = cur_input->CopyUnconstrained();
-        cur_input->set_virtual_register(next_virtual_register_++);
+        cur_input->set_virtual_register(GetVirtualRegister());
+        if (!AllocationOk()) return;
 
         if (RequiredRegisterKind(input_copy->virtual_register()) ==
             DOUBLE_REGISTERS) {
           double_artificial_registers_.Add(
-              cur_input->virtual_register() - first_artificial_register_);
+              cur_input->virtual_register() - first_artificial_register_,
+              zone_);
         }
 
         AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -868,8 +861,8 @@
     LUnallocated* second_output = LUnallocated::cast(second->Output());
     if (second_output->HasSameAsInputPolicy()) {
       LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
-      int output_vreg = second_output->VirtualRegister();
-      int input_vreg = cur_input->VirtualRegister();
+      int output_vreg = second_output->virtual_register();
+      int input_vreg = cur_input->virtual_register();
 
       LUnallocated* input_copy = cur_input->CopyUnconstrained();
       cur_input->set_virtual_register(second_output->virtual_register());
@@ -924,9 +917,9 @@
           }
         } else {
           if (to->IsUnallocated()) {
-            if (live->Contains(to->VirtualRegister())) {
+            if (live->Contains(LUnallocated::cast(to)->virtual_register())) {
               Define(curr_position, to, from);
-              live->Remove(to->VirtualRegister());
+              live->Remove(LUnallocated::cast(to)->virtual_register());
             } else {
               cur->Eliminate();
               continue;
@@ -937,7 +930,7 @@
         }
         Use(block_start_position, curr_position, from, hint);
         if (from->IsUnallocated()) {
-          live->Add(from->VirtualRegister());
+          live->Add(LUnallocated::cast(from)->virtual_register());
         }
       }
     } else {
@@ -947,7 +940,9 @@
       if (instr != NULL) {
         LOperand* output = instr->Output();
         if (output != NULL) {
-          if (output->IsUnallocated()) live->Remove(output->VirtualRegister());
+          if (output->IsUnallocated()) {
+            live->Remove(LUnallocated::cast(output)->virtual_register());
+          }
           Define(curr_position, output, NULL);
         }
 
@@ -957,7 +952,8 @@
                 output->index() != i) {
               LiveRange* range = FixedLiveRangeFor(i);
               range->AddUseInterval(curr_position,
-                                    curr_position.InstructionEnd());
+                                    curr_position.InstructionEnd(),
+                                    zone_);
             }
           }
         }
@@ -968,7 +964,8 @@
                 output->index() != i) {
               LiveRange* range = FixedDoubleLiveRangeFor(i);
               range->AddUseInterval(curr_position,
-                                    curr_position.InstructionEnd());
+                                    curr_position.InstructionEnd(),
+                                    zone_);
             }
           }
         }
@@ -985,7 +982,9 @@
           }
 
           Use(block_start_position, use_pos, input, NULL);
-          if (input->IsUnallocated()) live->Add(input->VirtualRegister());
+          if (input->IsUnallocated()) {
+            live->Add(LUnallocated::cast(input)->virtual_register());
+          }
         }
 
         for (TempIterator it(instr); !it.Done(); it.Advance()) {
@@ -1014,7 +1013,7 @@
   const ZoneList<HPhi*>* phis = block->phis();
   for (int i = 0; i < phis->length(); ++i) {
     HPhi* phi = phis->at(i);
-    LUnallocated* phi_operand = new LUnallocated(LUnallocated::NONE);
+    LUnallocated* phi_operand = new(zone_) LUnallocated(LUnallocated::NONE);
     phi_operand->set_virtual_register(phi->id());
     for (int j = 0; j < phi->OperandCount(); ++j) {
       HValue* op = phi->OperandAt(j);
@@ -1024,7 +1023,7 @@
         operand = chunk_->DefineConstantOperand(constant);
       } else {
         ASSERT(!op->EmitAtUses());
-        LUnallocated* unalloc = new LUnallocated(LUnallocated::ANY);
+        LUnallocated* unalloc = new(zone_) LUnallocated(LUnallocated::ANY);
         unalloc->set_virtual_register(op->id());
         operand = unalloc;
       }
@@ -1043,11 +1042,13 @@
       // it into a location different from the operand of a live range
       // covering a branch instruction.
       // Thus we need to manually record a pointer.
-      if (phi->representation().IsTagged()) {
-        LInstruction* branch =
-            InstructionAt(cur_block->last_instruction_index());
-        if (branch->HasPointerMap()) {
+      LInstruction* branch =
+          InstructionAt(cur_block->last_instruction_index());
+      if (branch->HasPointerMap()) {
+        if (phi->representation().IsTagged()) {
           branch->pointer_map()->RecordPointer(phi_operand);
+        } else if (!phi->representation().IsDouble()) {
+          branch->pointer_map()->RecordUntagged(phi_operand);
         }
       }
     }
@@ -1061,34 +1062,39 @@
 }
 
 
-void LAllocator::Allocate(LChunk* chunk) {
+bool LAllocator::Allocate(LChunk* chunk) {
   ASSERT(chunk_ == NULL);
   chunk_ = chunk;
   MeetRegisterConstraints();
+  if (!AllocationOk()) return false;
   ResolvePhis();
   BuildLiveRanges();
   AllocateGeneralRegisters();
+  if (!AllocationOk()) return false;
   AllocateDoubleRegisters();
+  if (!AllocationOk()) return false;
   PopulatePointerMaps();
   if (has_osr_entry_) ProcessOsrEntry();
   ConnectRanges();
   ResolveControlFlow();
+  return true;
 }
 
 
 void LAllocator::MeetRegisterConstraints() {
-  HPhase phase("Register constraints", chunk_);
+  HPhase phase("L_Register constraints", chunk_);
   first_artificial_register_ = next_virtual_register_;
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
   for (int i = 0; i < blocks->length(); ++i) {
     HBasicBlock* block = blocks->at(i);
     MeetRegisterConstraints(block);
+    if (!AllocationOk()) return;
   }
 }
 
 
 void LAllocator::ResolvePhis() {
-  HPhase phase("Resolve phis", chunk_);
+  HPhase phase("L_Resolve phis", chunk_);
 
   // Process the blocks in reverse order.
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@@ -1124,8 +1130,8 @@
   if (cur_cover->IsSpilled()) return;
   ASSERT(pred_cover != NULL && cur_cover != NULL);
   if (pred_cover != cur_cover) {
-    LOperand* pred_op = pred_cover->CreateAssignedOperand();
-    LOperand* cur_op = cur_cover->CreateAssignedOperand();
+    LOperand* pred_op = pred_cover->CreateAssignedOperand(zone_);
+    LOperand* cur_op = cur_cover->CreateAssignedOperand(zone_);
     if (!pred_op->Equals(cur_op)) {
       LGap* gap = NULL;
       if (block->predecessors()->length() == 1) {
@@ -1142,10 +1148,13 @@
         // it into a location different from the operand of a live range
         // covering a branch instruction.
         // Thus we need to manually record a pointer.
-        if (HasTaggedValue(range->id())) {
-          LInstruction* branch = InstructionAt(pred->last_instruction_index());
-          if (branch->HasPointerMap()) {
+        LInstruction* branch = InstructionAt(pred->last_instruction_index());
+        if (branch->HasPointerMap()) {
+          if (HasTaggedValue(range->id())) {
             branch->pointer_map()->RecordPointer(cur_op);
+          } else if (!cur_op->IsDoubleStackSlot() &&
+                     !cur_op->IsDoubleRegister()) {
+            branch->pointer_map()->RemovePointer(cur_op);
           }
         }
       }
@@ -1175,7 +1184,7 @@
 
 
 void LAllocator::ConnectRanges() {
-  HPhase phase("Connect ranges", this);
+  HPhase phase("L_Connect ranges", this);
   for (int i = 0; i < live_ranges()->length(); ++i) {
     LiveRange* first_range = live_ranges()->at(i);
     if (first_range == NULL || first_range->parent() != NULL) continue;
@@ -1194,8 +1203,8 @@
           }
           if (should_insert) {
             LParallelMove* move = GetConnectingParallelMove(pos);
-            LOperand* prev_operand = first_range->CreateAssignedOperand();
-            LOperand* cur_operand = second_range->CreateAssignedOperand();
+            LOperand* prev_operand = first_range->CreateAssignedOperand(zone_);
+            LOperand* cur_operand = second_range->CreateAssignedOperand(zone_);
             move->AddMove(prev_operand, cur_operand);
           }
         }
@@ -1215,7 +1224,7 @@
 
 
 void LAllocator::ResolveControlFlow() {
-  HPhase phase("Resolve control flow", this);
+  HPhase phase("L_Resolve control flow", this);
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
   for (int block_id = 1; block_id < blocks->length(); ++block_id) {
     HBasicBlock* block = blocks->at(block_id);
@@ -1236,7 +1245,7 @@
 
 
 void LAllocator::BuildLiveRanges() {
-  HPhase phase("Build live ranges", this);
+  HPhase phase("L_Build live ranges", this);
   InitializeLivenessAnalysis();
   // Process the blocks in reverse order.
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@@ -1264,7 +1273,8 @@
       LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
       for (int j = 0; j < move->move_operands()->length(); ++j) {
         LOperand* to = move->move_operands()->at(j).destination();
-        if (to->IsUnallocated() && to->VirtualRegister() == phi->id()) {
+        if (to->IsUnallocated() &&
+            LUnallocated::cast(to)->virtual_register() == phi->id()) {
           hint = move->move_operands()->at(j).source();
           phi_operand = to;
           break;
@@ -1297,7 +1307,7 @@
       while (!iterator.Done()) {
         int operand_index = iterator.Current();
         LiveRange* range = LiveRangeFor(operand_index);
-        range->EnsureInterval(start, end);
+        range->EnsureInterval(start, end, zone_);
         iterator.Advance();
       }
 
@@ -1340,7 +1350,7 @@
 
 
 void LAllocator::PopulatePointerMaps() {
-  HPhase phase("Populate pointer maps", this);
+  HPhase phase("L_Populate pointer maps", this);
   const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
 
   ASSERT(SafePointsAreInOrder());
@@ -1418,7 +1428,7 @@
         TraceAlloc("Pointer in register for range %d (start at %d) "
                    "at safe point %d\n",
                    cur->id(), cur->Start().Value(), safe_point);
-        LOperand* operand = cur->CreateAssignedOperand();
+        LOperand* operand = cur->CreateAssignedOperand(zone_);
         ASSERT(!operand->IsStackSlot());
         map->RecordPointer(operand);
       }
@@ -1459,15 +1469,14 @@
 
 
 void LAllocator::AllocateGeneralRegisters() {
-  HPhase phase("Allocate general registers", this);
+  HPhase phase("L_Allocate general registers", this);
   num_registers_ = Register::kNumAllocatableRegisters;
-  mode_ = GENERAL_REGISTERS;
   AllocateRegisters();
 }
 
 
 void LAllocator::AllocateDoubleRegisters() {
-  HPhase phase("Allocate double registers", this);
+  HPhase phase("L_Allocate double registers", this);
   num_registers_ = DoubleRegister::kNumAllocatableRegisters;
   mode_ = DOUBLE_REGISTERS;
   AllocateRegisters();
@@ -1475,7 +1484,6 @@
 
 
 void LAllocator::AllocateRegisters() {
-  ASSERT(mode_ != NONE);
   ASSERT(unhandled_live_ranges_.is_empty());
 
   for (int i = 0; i < live_ranges_.length(); ++i) {
@@ -1534,6 +1542,7 @@
         // Do not spill live range eagerly if use position that can benefit from
         // the register is too close to the start of live range.
         SpillBetween(current, current->Start(), pos->pos());
+        if (!AllocationOk()) return;
         ASSERT(UnhandledIsSorted());
         continue;
       }
@@ -1564,9 +1573,10 @@
     ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
 
     bool result = TryAllocateFreeReg(current);
-    if (!result) {
-      AllocateBlockedReg(current);
-    }
+    if (!AllocationOk()) return;
+
+    if (!result) AllocateBlockedReg(current);
+    if (!AllocationOk()) return;
 
     if (current->HasRegisterAssigned()) {
       AddToActive(current);
@@ -1580,7 +1590,6 @@
 
 
 const char* LAllocator::RegisterName(int allocation_index) {
-  ASSERT(mode_ != NONE);
   if (mode_ == GENERAL_REGISTERS) {
     return Register::AllocationIndexToString(allocation_index);
   } else {
@@ -1621,29 +1630,6 @@
 }
 
 
-void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) {
-  operand->set_virtual_register(instr->id());
-}
-
-
-void LAllocator::RecordTemporary(LUnallocated* operand) {
-  ASSERT(next_virtual_register_ < LUnallocated::kMaxVirtualRegisters);
-  if (!operand->HasFixedPolicy()) {
-    operand->set_virtual_register(next_virtual_register_++);
-  }
-}
-
-
-void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
-  operand->set_virtual_register(value->id());
-}
-
-
-int LAllocator::max_initial_value_ids() {
-  return LUnallocated::kMaxVirtualRegisters / 32;
-}
-
-
 void LAllocator::AddToActive(LiveRange* range) {
   TraceAlloc("Add live range %d to active\n", range->id());
   active_live_ranges_.Add(range);
@@ -1814,7 +1800,7 @@
         TraceAlloc("Assigning preferred reg %s to live range %d\n",
                    RegisterName(register_index),
                    current->id());
-        current->set_assigned_register(register_index, mode_);
+        current->set_assigned_register(register_index, mode_, zone_);
         return true;
       }
     }
@@ -1838,7 +1824,8 @@
   if (pos.Value() < current->End().Value()) {
     // Register reg is available at the range start but becomes blocked before
     // the range end. Split current at position where it becomes blocked.
-    LiveRange* tail = SplitAt(current, pos);
+    LiveRange* tail = SplitRangeAt(current, pos);
+    if (!AllocationOk()) return false;
     AddToUnhandledSorted(tail);
   }
 
@@ -1849,7 +1836,7 @@
   TraceAlloc("Assigning free reg %s to live range %d\n",
              RegisterName(reg),
              current->id());
-  current->set_assigned_register(reg, mode_);
+  current->set_assigned_register(reg, mode_, zone_);
 
   return true;
 }
@@ -1939,7 +1926,7 @@
   TraceAlloc("Assigning blocked reg %s to live range %d\n",
              RegisterName(reg),
              current->id());
-  current->set_assigned_register(reg, mode_);
+  current->set_assigned_register(reg, mode_, zone_);
 
   // This register was not free. Thus we need to find and spill
   // parts of active and inactive live regions that use the same register
@@ -1993,7 +1980,7 @@
 }
 
 
-LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
+LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
   ASSERT(!range->IsFixed());
   TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
 
@@ -2004,8 +1991,9 @@
   ASSERT(pos.IsInstructionStart() ||
          !chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
 
-  LiveRange* result = LiveRangeFor(next_virtual_register_++);
-  range->SplitAt(pos, result);
+  LiveRange* result = LiveRangeFor(GetVirtualRegister());
+  if (!AllocationOk()) return NULL;
+  range->SplitAt(pos, result, zone_);
   return result;
 }
 
@@ -2021,7 +2009,7 @@
 
   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
   ASSERT(split_pos.Value() >= start.Value());
-  return SplitAt(range, split_pos);
+  return SplitRangeAt(range, split_pos);
 }
 
 
@@ -2060,7 +2048,8 @@
 
 
 void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
-  LiveRange* second_part = SplitAt(range, pos);
+  LiveRange* second_part = SplitRangeAt(range, pos);
+  if (!AllocationOk()) return;
   Spill(second_part);
 }
 
@@ -2069,7 +2058,8 @@
                               LifetimePosition start,
                               LifetimePosition end) {
   ASSERT(start.Value() < end.Value());
-  LiveRange* second_part = SplitAt(range, start);
+  LiveRange* second_part = SplitRangeAt(range, start);
+  if (!AllocationOk()) return;
 
   if (second_part->Start().Value() < end.Value()) {
     // The split result intersects with [start, end[.
@@ -2102,7 +2092,7 @@
     if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
     first->SetSpillOperand(op);
   }
-  range->MakeSpilled();
+  range->MakeSpilled(zone_);
 }
 
 
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index e4e6497..f5ab055 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -146,7 +146,6 @@
 
 
 enum RegisterKind {
-  NONE,
   GENERAL_REGISTERS,
   DOUBLE_REGISTERS
 };
@@ -217,7 +216,7 @@
 
   // Split this interval at the given position without effecting the
   // live range that owns it. The interval must contain the position.
-  void SplitAt(LifetimePosition pos);
+  void SplitAt(LifetimePosition pos, Zone* zone);
 
   // If this interval intersects with other return smallest position
   // that belongs to both of them.
@@ -278,7 +277,7 @@
  public:
   static const int kInvalidAssignment = 0x7fffffff;
 
-  explicit LiveRange(int id);
+  LiveRange(int id, Zone* zone);
 
   UseInterval* first_interval() const { return first_interval_; }
   UsePosition* first_pos() const { return first_pos_; }
@@ -289,11 +288,13 @@
   int id() const { return id_; }
   bool IsFixed() const { return id_ < 0; }
   bool IsEmpty() const { return first_interval() == NULL; }
-  LOperand* CreateAssignedOperand();
+  LOperand* CreateAssignedOperand(Zone* zone);
   int assigned_register() const { return assigned_register_; }
   int spill_start_index() const { return spill_start_index_; }
-  void set_assigned_register(int reg, RegisterKind register_kind);
-  void MakeSpilled();
+  void set_assigned_register(int reg,
+                             RegisterKind register_kind,
+                             Zone* zone);
+  void MakeSpilled(Zone* zone);
 
   // Returns use position in this live range that follows both start
   // and last processed use position.
@@ -317,9 +318,9 @@
   // the range.
   // All uses following the given position will be moved from this
   // live range to the result live range.
-  void SplitAt(LifetimePosition position, LiveRange* result);
+  void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
 
-  bool IsDouble() const { return assigned_register_kind_ == DOUBLE_REGISTERS; }
+  bool IsDouble() const { return is_double_; }
   bool HasRegisterAssigned() const {
     return assigned_register_ != kInvalidAssignment;
   }
@@ -356,9 +357,15 @@
   LifetimePosition FirstIntersection(LiveRange* other);
 
   // Add a new interval or a new use position to this live range.
-  void EnsureInterval(LifetimePosition start, LifetimePosition end);
-  void AddUseInterval(LifetimePosition start, LifetimePosition end);
-  UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand);
+  void EnsureInterval(LifetimePosition start,
+                      LifetimePosition end,
+                      Zone* zone);
+  void AddUseInterval(LifetimePosition start,
+                      LifetimePosition end,
+                      Zone* zone);
+  UsePosition* AddUsePosition(LifetimePosition pos,
+                              LOperand* operand,
+                              Zone* zone);
 
   // Shorten the most recently added interval by setting a new start.
   void ShortenTo(LifetimePosition start);
@@ -370,15 +377,15 @@
 #endif
 
  private:
-  void ConvertOperands();
+  void ConvertOperands(Zone* zone);
   UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
   void AdvanceLastProcessedMarker(UseInterval* to_start_of,
                                   LifetimePosition but_not_past) const;
 
   int id_;
   bool spilled_;
+  bool is_double_;
   int assigned_register_;
-  RegisterKind assigned_register_kind_;
   UseInterval* last_interval_;
   UseInterval* first_interval_;
   UsePosition* first_pos_;
@@ -401,8 +408,8 @@
     return bits_->Contains(value);
   }
 
-  void Add(int value) {
-    EnsureCapacity(value);
+  void Add(int value, Zone* zone) {
+    EnsureCapacity(value, zone);
     bits_->Add(value);
   }
 
@@ -413,11 +420,11 @@
     return bits_ != NULL && bits_->length() > value;
   }
 
-  void EnsureCapacity(int value) {
+  void EnsureCapacity(int value, Zone* zone) {
     if (InBitsRange(value)) return;
     int new_length = bits_ == NULL ? kInitialLength : bits_->length();
     while (new_length <= value) new_length *= 2;
-    BitVector* new_bits = new BitVector(new_length);
+    BitVector* new_bits = new(zone) BitVector(new_length, zone);
     if (bits_ != NULL) new_bits->CopyFrom(*bits_);
     bits_ = new_bits;
   }
@@ -432,24 +439,13 @@
 
   static void TraceAlloc(const char* msg, ...);
 
-  // Lithium translation support.
-  // Record a use of an input operand in the current instruction.
-  void RecordUse(HValue* value, LUnallocated* operand);
-  // Record the definition of the output operand.
-  void RecordDefinition(HInstruction* instr, LUnallocated* operand);
-  // Record a temporary operand.
-  void RecordTemporary(LUnallocated* operand);
-
   // Checks whether the value of a given virtual register is tagged.
   bool HasTaggedValue(int virtual_register) const;
 
   // Returns the register kind required by the given virtual register.
   RegisterKind RequiredRegisterKind(int virtual_register) const;
 
-  // Control max function size.
-  static int max_initial_value_ids();
-
-  void Allocate(LChunk* chunk);
+  bool Allocate(LChunk* chunk);
 
   const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
   const Vector<LiveRange*>* fixed_live_ranges() const {
@@ -462,6 +458,15 @@
   LChunk* chunk() const { return chunk_; }
   HGraph* graph() const { return graph_; }
 
+  int GetVirtualRegister() {
+    if (next_virtual_register_ > LUnallocated::kMaxVirtualRegisters) {
+      allocation_ok_ = false;
+    }
+    return next_virtual_register_++;
+  }
+
+  bool AllocationOk() { return allocation_ok_; }
+
   void MarkAsOsrEntry() {
     // There can be only one.
     ASSERT(!has_osr_entry_);
@@ -534,7 +539,7 @@
   // Otherwise returns the live range that starts at pos and contains
   // all uses from the original range that follow pos. Uses at pos will
   // still be owned by the original range after splitting.
-  LiveRange* SplitAt(LiveRange* range, LifetimePosition pos);
+  LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
 
   // Split the given range in a position from the interval [start, end].
   LiveRange* SplitBetween(LiveRange* range,
@@ -590,6 +595,8 @@
 
   inline LGap* GapAt(int index);
 
+  Zone* zone_;
+
   LChunk* chunk_;
 
   // During liveness analysis keep a mapping from block id to live_in sets
@@ -621,6 +628,9 @@
 
   bool has_osr_entry_;
 
+  // Indicates success or failure during register allocation.
+  bool allocation_ok_;
+
   DISALLOW_COPY_AND_ASSIGN(LAllocator);
 };
 
diff --git a/src/lithium.cc b/src/lithium.cc
index 5410f6f..aefd8b6 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -36,6 +36,7 @@
   LUnallocated* unalloc = NULL;
   switch (kind()) {
     case INVALID:
+      stream->Add("(0)");
       break;
     case UNALLOCATED:
       unalloc = LUnallocated::cast(this);
@@ -70,9 +71,6 @@
         case LUnallocated::ANY:
           stream->Add("(-)");
           break;
-        case LUnallocated::IGNORE:
-          stream->Add("(0)");
-          break;
       }
       break;
     case CONSTANT_OPERAND:
@@ -96,13 +94,32 @@
   }
 }
 
+#define DEFINE_OPERAND_CACHE(name, type)                      \
+  name* name::cache = NULL;                                   \
+  void name::SetUpCache() {                                   \
+    if (cache) return;                                        \
+    cache = new name[kNumCachedOperands];                     \
+    for (int i = 0; i < kNumCachedOperands; i++) {            \
+      cache[i].ConvertTo(type, i);                            \
+    }                                                         \
+  }                                                           \
 
-int LOperand::VirtualRegister() {
-  LUnallocated* unalloc = LUnallocated::cast(this);
-  return unalloc->virtual_register();
+DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
+DEFINE_OPERAND_CACHE(LStackSlot,       STACK_SLOT)
+DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
+DEFINE_OPERAND_CACHE(LRegister,        REGISTER)
+DEFINE_OPERAND_CACHE(LDoubleRegister,  DOUBLE_REGISTER)
+
+#undef DEFINE_OPERAND_CACHE
+
+void LOperand::SetUpCaches() {
+  LConstantOperand::SetUpCache();
+  LStackSlot::SetUpCache();
+  LDoubleStackSlot::SetUpCache();
+  LRegister::SetUpCache();
+  LDoubleRegister::SetUpCache();
 }
 
-
 bool LParallelMove::IsRedundant() const {
   for (int i = 0; i < move_operands_.length(); ++i) {
     if (!move_operands_[i].IsRedundant()) return false;
@@ -156,6 +173,27 @@
 }
 
 
+void LPointerMap::RemovePointer(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (pointer_operands_[i]->Equals(op)) {
+      pointer_operands_.Remove(i);
+      --i;
+    }
+  }
+}
+
+
+void LPointerMap::RecordUntagged(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  untagged_operands_.Add(op);
+}
+
+
 void LPointerMap::PrintTo(StringStream* stream) {
   stream->Add("{");
   for (int i = 0; i < pointer_operands_.length(); ++i) {
@@ -182,6 +220,7 @@
     case EXTERNAL_DOUBLE_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
       return 3;
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
diff --git a/src/lithium.h b/src/lithium.h
index a933f72..d1e2e3c 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -59,8 +59,8 @@
   bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
   bool IsArgument() const { return kind() == ARGUMENT; }
   bool IsUnallocated() const { return kind() == UNALLOCATED; }
+  bool IsIgnored() const { return kind() == INVALID; }
   bool Equals(LOperand* other) const { return value_ == other->value_; }
-  int VirtualRegister();
 
   void PrintTo(StringStream* stream);
   void ConvertTo(Kind kind, int index) {
@@ -69,6 +69,10 @@
     ASSERT(this->index() == index);
   }
 
+  // Calls SetUpCache() for each subclass. Don't forget to update this method
+  // if you add a new LOperand subclass.
+  static void SetUpCaches();
+
  protected:
   static const int kKindFieldWidth = 3;
   class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
@@ -89,8 +93,7 @@
     FIXED_SLOT,
     MUST_HAVE_REGISTER,
     WRITABLE_REGISTER,
-    SAME_AS_FIRST_INPUT,
-    IGNORE
+    SAME_AS_FIRST_INPUT
   };
 
   // Lifetime of operand inside the instruction.
@@ -121,9 +124,9 @@
 
   // The superclass has a KindField.  Some policies have a signed fixed
   // index in the upper bits.
-  static const int kPolicyWidth = 4;
+  static const int kPolicyWidth = 3;
   static const int kLifetimeWidth = 1;
-  static const int kVirtualRegisterWidth = 17;
+  static const int kVirtualRegisterWidth = 18;
 
   static const int kPolicyShift = kKindFieldWidth;
   static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
@@ -143,12 +146,10 @@
                         kVirtualRegisterWidth> {
   };
 
-  static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
+  static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
   static const int kMaxFixedIndex = 63;
   static const int kMinFixedIndex = -64;
 
-  bool HasIgnorePolicy() const { return policy() == IGNORE; }
-  bool HasNoPolicy() const { return policy() == NONE; }
   bool HasAnyPolicy() const {
     return policy() == ANY;
   }
@@ -171,7 +172,7 @@
     return static_cast<int>(value_) >> kFixedIndexShift;
   }
 
-  unsigned virtual_register() const {
+  int virtual_register() const {
     return VirtualRegisterField::decode(value_);
   }
 
@@ -234,9 +235,7 @@
   }
 
   bool IsIgnored() const {
-    return destination_ != NULL &&
-        destination_->IsUnallocated() &&
-        LUnallocated::cast(destination_)->HasIgnorePolicy();
+    return destination_ != NULL && destination_->IsIgnored();
   }
 
   // We clear both operands to indicate move that's been eliminated.
@@ -265,11 +264,11 @@
     return reinterpret_cast<LConstantOperand*>(op);
   }
 
-  static void SetupCache();
+  static void SetUpCache();
 
  private:
   static const int kNumCachedOperands = 128;
-  static LConstantOperand cache[];
+  static LConstantOperand* cache;
 
   LConstantOperand() : LOperand() { }
   explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
@@ -300,11 +299,11 @@
     return reinterpret_cast<LStackSlot*>(op);
   }
 
-  static void SetupCache();
+  static void SetUpCache();
 
  private:
   static const int kNumCachedOperands = 128;
-  static LStackSlot cache[];
+  static LStackSlot* cache;
 
   LStackSlot() : LOperand() { }
   explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
@@ -324,11 +323,11 @@
     return reinterpret_cast<LDoubleStackSlot*>(op);
   }
 
-  static void SetupCache();
+  static void SetUpCache();
 
  private:
   static const int kNumCachedOperands = 128;
-  static LDoubleStackSlot cache[];
+  static LDoubleStackSlot* cache;
 
   LDoubleStackSlot() : LOperand() { }
   explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
@@ -348,11 +347,11 @@
     return reinterpret_cast<LRegister*>(op);
   }
 
-  static void SetupCache();
+  static void SetUpCache();
 
  private:
   static const int kNumCachedOperands = 16;
-  static LRegister cache[];
+  static LRegister* cache;
 
   LRegister() : LOperand() { }
   explicit LRegister(int index) : LOperand(REGISTER, index) { }
@@ -372,11 +371,11 @@
     return reinterpret_cast<LDoubleRegister*>(op);
   }
 
-  static void SetupCache();
+  static void SetUpCache();
 
  private:
   static const int kNumCachedOperands = 16;
-  static LDoubleRegister cache[];
+  static LDoubleRegister* cache;
 
   LDoubleRegister() : LOperand() { }
   explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
@@ -407,9 +406,18 @@
 class LPointerMap: public ZoneObject {
  public:
   explicit LPointerMap(int position)
-      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+      : pointer_operands_(8),
+        untagged_operands_(0),
+        position_(position),
+        lithium_position_(-1) { }
 
-  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+  const ZoneList<LOperand*>* GetNormalizedOperands() {
+    for (int i = 0; i < untagged_operands_.length(); ++i) {
+      RemovePointer(untagged_operands_[i]);
+    }
+    untagged_operands_.Clear();
+    return &pointer_operands_;
+  }
   int position() const { return position_; }
   int lithium_position() const { return lithium_position_; }
 
@@ -419,10 +427,13 @@
   }
 
   void RecordPointer(LOperand* op);
+  void RemovePointer(LOperand* op);
+  void RecordUntagged(LOperand* op);
   void PrintTo(StringStream* stream);
 
  private:
   ZoneList<LOperand*> pointer_operands_;
+  ZoneList<LOperand*> untagged_operands_;
   int position_;
   int lithium_position_;
 };
@@ -431,12 +442,14 @@
 class LEnvironment: public ZoneObject {
  public:
   LEnvironment(Handle<JSFunction> closure,
+               FrameType frame_type,
                int ast_id,
                int parameter_count,
                int argument_count,
                int value_count,
                LEnvironment* outer)
       : closure_(closure),
+        frame_type_(frame_type),
         arguments_stack_height_(argument_count),
         deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
         translation_index_(-1),
@@ -444,13 +457,13 @@
         parameter_count_(parameter_count),
         pc_offset_(-1),
         values_(value_count),
-        representations_(value_count),
+        is_tagged_(value_count, closure->GetHeap()->isolate()->zone()),
         spilled_registers_(NULL),
         spilled_double_registers_(NULL),
-        outer_(outer) {
-  }
+        outer_(outer) { }
 
   Handle<JSFunction> closure() const { return closure_; }
+  FrameType frame_type() const { return frame_type_; }
   int arguments_stack_height() const { return arguments_stack_height_; }
   int deoptimization_index() const { return deoptimization_index_; }
   int translation_index() const { return translation_index_; }
@@ -466,11 +479,13 @@
 
   void AddValue(LOperand* operand, Representation representation) {
     values_.Add(operand);
-    representations_.Add(representation);
+    if (representation.IsTagged()) {
+      is_tagged_.Add(values_.length() - 1);
+    }
   }
 
   bool HasTaggedValueAt(int index) const {
-    return representations_[index].IsTagged();
+    return is_tagged_.Contains(index);
   }
 
   void Register(int deoptimization_index,
@@ -495,6 +510,7 @@
 
  private:
   Handle<JSFunction> closure_;
+  FrameType frame_type_;
   int arguments_stack_height_;
   int deoptimization_index_;
   int translation_index_;
@@ -502,7 +518,7 @@
   int parameter_count_;
   int pc_offset_;
   ZoneList<LOperand*> values_;
-  ZoneList<Representation> representations_;
+  BitVector is_tagged_;
 
   // Allocation index indexed arrays of spill slot operands for registers
   // that are also in spill slots at an OSR entry.  NULL for environments
@@ -511,8 +527,6 @@
   LOperand** spilled_double_registers_;
 
   LEnvironment* outer_;
-
-  friend class LCodegen;
 };
 
 
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index e05c53c..abfb0f6 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -325,9 +325,10 @@
             if (old_node.children[i].live_shared_function_infos) {
               old_node.children[i].live_shared_function_infos.
                   forEach(function (old_child_info) {
-                    %LiveEditReplaceRefToNestedFunction(old_info.info,
-                                                        corresponding_child_info,
-                                                        old_child_info.info);
+                    %LiveEditReplaceRefToNestedFunction(
+                        old_info.info,
+                        corresponding_child_info,
+                        old_child_info.info);
                   });
             }
           }
@@ -381,7 +382,7 @@
           position: break_point_position,
           line: break_point.line(),
           column: break_point.column()
-      }
+      };
       break_point_old_positions.push(old_position_description);
     }
 
@@ -418,7 +419,7 @@
             position: updated_position,
             line: new_location.line,
             column: new_location.column
-        }
+        };
 
         break_point.set(original_script);
 
@@ -428,7 +429,7 @@
           new_positions: new_position_description
           } );
       }
-    }
+    };
   }
 
 
@@ -465,7 +466,7 @@
   }
   PosTranslator.prototype.GetChunks = function() {
     return this.chunks;
-  }
+  };
 
   PosTranslator.prototype.Translate = function(pos, inside_chunk_handler) {
     var array = this.chunks;
@@ -492,18 +493,18 @@
       inside_chunk_handler = PosTranslator.DefaultInsideChunkHandler;
     }
     return inside_chunk_handler(pos, chunk);
-  }
+  };
 
   PosTranslator.DefaultInsideChunkHandler = function(pos, diff_chunk) {
     Assert(false, "Cannot translate position in changed area");
-  }
+  };
 
   PosTranslator.ShiftWithTopInsideChunkHandler =
       function(pos, diff_chunk) {
     // We carelessly do not check whether we stay inside the chunk after
     // translation.
     return pos - diff_chunk.pos1 + diff_chunk.pos2;
-  }
+  };
 
   var FunctionStatus = {
       // No change to function or its inner functions; however its positions
@@ -517,7 +518,7 @@
       CHANGED: "changed",
       // Function is changed but cannot be patched.
       DAMAGED: "damaged"
-  }
+  };
 
   function CodeInfoTreeNode(code_info, children, array_index) {
     this.info = code_info;
@@ -580,19 +581,19 @@
   // children of unchanged functions are ignored.
   function MarkChangedFunctions(code_info_tree, chunks) {
 
-    // A convenient interator over diff chunks that also translates
+    // A convenient iterator over diff chunks that also translates
     // positions from old to new in a current non-changed part of script.
     var chunk_it = new function() {
       var chunk_index = 0;
       var pos_diff = 0;
-      this.current = function() { return chunks[chunk_index]; }
+      this.current = function() { return chunks[chunk_index]; };
       this.next = function() {
         var chunk = chunks[chunk_index];
         pos_diff = chunk.pos2 + chunk.len2 - (chunk.pos1 + chunk.len1);
         chunk_index++;
-      }
-      this.done = function() { return chunk_index >= chunks.length; }
-      this.TranslatePos = function(pos) { return pos + pos_diff; }
+      };
+      this.done = function() { return chunk_index >= chunks.length; };
+      this.TranslatePos = function(pos) { return pos + pos_diff; };
     };
 
     // A recursive function that processes internals of a function and all its
@@ -946,16 +947,16 @@
       BLOCKED_ON_OTHER_STACK: 3,
       BLOCKED_UNDER_NATIVE_CODE: 4,
       REPLACED_ON_ACTIVE_STACK: 5
-  }
+  };
 
   FunctionPatchabilityStatus.SymbolName = function(code) {
-    var enum = FunctionPatchabilityStatus;
-    for (name in enum) {
-      if (enum[name] == code) {
+    var enumeration = FunctionPatchabilityStatus;
+    for (name in enumeration) {
+      if (enumeration[name] == code) {
         return name;
       }
     }
-  }
+  };
 
 
   // A logical failure in liveedit process. This means that change_log
@@ -968,7 +969,7 @@
 
   Failure.prototype.toString = function() {
     return "LiveEdit Failure: " + this.message;
-  }
+  };
 
   // A testing entry.
   function GetPcFromSourcePos(func, source_pos) {
@@ -1078,5 +1079,5 @@
     PosTranslator: PosTranslator,
     CompareStrings: CompareStrings,
     ApplySingleChunkPatch: ApplySingleChunkPatch
-  }
-}
+  };
+};
diff --git a/src/liveedit.cc b/src/liveedit.cc
index d44c2fc..9c5294a 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -53,8 +53,8 @@
   // Ignore return value from SetElement. It can only be a failure if there
   // are element setters causing exceptions and the debugger context has none
   // of these.
-  Handle<Object> no_failure;
-  no_failure = SetElement(object, index, value, kNonStrictMode);
+  Handle<Object> no_failure =
+      JSObject::SetElement(object, index, value, NONE, kNonStrictMode);
   ASSERT(!no_failure.is_null());
   USE(no_failure);
 }
@@ -602,7 +602,8 @@
   // Build AST.
   CompilationInfo info(script);
   info.MarkAsGlobal();
-  if (ParserApi::Parse(&info)) {
+  // Parse and don't allow skipping lazy functions.
+  if (ParserApi::Parse(&info, kNoParsingFlags)) {
     // Compile the code.
     LiveEditFunctionTracker tracker(info.isolate(), info.function());
     if (Compiler::MakeCodeForLiveEdit(&info)) {
@@ -797,7 +798,7 @@
     HandleScope scope;
     FunctionInfoWrapper info = FunctionInfoWrapper::Create();
     info.SetInitialProperties(fun->name(), fun->start_position(),
-                              fun->end_position(), fun->num_parameters(),
+                              fun->end_position(), fun->parameter_count(),
                               current_parent_index_);
     current_parent_index_ = len_;
     SetElementNonStrict(result_, len_, info.GetJSArray());
@@ -855,38 +856,20 @@
       return HEAP->undefined_value();
     }
     do {
-      ZoneList<Variable*> list(10);
-      outer_scope->CollectUsedVariables(&list);
-      int j = 0;
-      for (int i = 0; i < list.length(); i++) {
-        Variable* var1 = list[i];
-        if (var1->IsContextSlot()) {
-          if (j != i) {
-            list[j] = var1;
-          }
-          j++;
-        }
-      }
+      ZoneList<Variable*> stack_list(outer_scope->StackLocalCount());
+      ZoneList<Variable*> context_list(outer_scope->ContextLocalCount());
+      outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
+      context_list.Sort(&Variable::CompareIndex);
 
-      // Sort it.
-      for (int k = 1; k < j; k++) {
-        int l = k;
-        for (int m = k + 1; m < j; m++) {
-          if (list[l]->index() > list[m]->index()) {
-            l = m;
-          }
-        }
-        list[k] = list[l];
-      }
-      for (int i = 0; i < j; i++) {
+      for (int i = 0; i < context_list.length(); i++) {
         SetElementNonStrict(scope_info_list,
                             scope_info_length,
-                            list[i]->name());
+                            context_list[i]->name());
         scope_info_length++;
         SetElementNonStrict(
             scope_info_list,
             scope_info_length,
-            Handle<Smi>(Smi::FromInt(list[i]->index())));
+            Handle<Smi>(Smi::FromInt(context_list[i]->index())));
         scope_info_length++;
       }
       SetElementNonStrict(scope_info_list,
@@ -1000,6 +983,7 @@
 static void ReplaceCodeObject(Code* original, Code* substitution) {
   ASSERT(!HEAP->InNewSpace(substitution));
 
+  HeapIterator iterator;
   AssertNoAllocation no_allocations_please;
 
   // A zone scope for ReferenceCollectorVisitor.
@@ -1016,7 +1000,6 @@
 
   // Now iterate over all pointers of all objects, including code_target
   // implicit pointers.
-  HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     obj->Iterate(&visitor);
   }
@@ -1101,12 +1084,14 @@
 
   Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
 
+  HEAP->EnsureHeapIsIterable();
+
   if (IsJSFunctionCode(shared_info->code())) {
     Handle<Code> code = compile_info_wrapper.GetFunctionCode();
     ReplaceCodeObject(shared_info->code(), *code);
     Handle<Object> code_scope_info =  compile_info_wrapper.GetCodeScopeInfo();
     if (code_scope_info->IsFixedArray()) {
-      shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
+      shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
     }
   }
 
@@ -1243,7 +1228,7 @@
       V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer");
     }
 
-    // Setup new buffer.
+    // Set up new buffer.
     byte* new_buffer = NewArray<byte>(new_buffer_size);
 
     // Copy the data.
@@ -1271,7 +1256,8 @@
 
 // Patch positions in code (changes relocation info section) and possibly
 // returns new instance of code.
-static Handle<Code> PatchPositionsInCode(Handle<Code> code,
+static Handle<Code> PatchPositionsInCode(
+    Handle<Code> code,
     Handle<JSArray> position_change_array) {
 
   RelocInfoBuffer buffer_writer(code->relocation_size(),
@@ -1286,7 +1272,7 @@
         int new_position = TranslatePosition(position,
                                              position_change_array);
         if (position != new_position) {
-          RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position);
+          RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position, NULL);
           buffer_writer.Write(&info_copy);
           continue;
         }
@@ -1333,6 +1319,8 @@
   info->set_end_position(new_function_end);
   info->set_function_token_position(new_function_token_pos);
 
+  HEAP->EnsureHeapIsIterable();
+
   if (IsJSFunctionCode(info->code())) {
     // Patch relocation info section of the code.
     Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
diff --git a/src/liveobjectlist-inl.h b/src/liveobjectlist-inl.h
index f742de3..2bc2296 100644
--- a/src/liveobjectlist-inl.h
+++ b/src/liveobjectlist-inl.h
@@ -59,7 +59,7 @@
 }
 
 
-void LiveObjectList::ProcessNonLive(HeapObject *obj) {
+void LiveObjectList::ProcessNonLive(HeapObject* obj) {
   // Only do work if we have at least one list to process.
   if (last()) DoProcessNonLive(obj);
 }
@@ -93,7 +93,7 @@
 template <typename T>
 inline LiveObjectList::Element*
 LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) {
-  LiveObjectList *lol = last();
+  LiveObjectList* lol = last();
   while (lol != NULL) {
     Element* elements = lol->elements_;
     for (int i = 0; i < lol->obj_count_; i++) {
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
index 957c051..1aabc59 100644
--- a/src/liveobjectlist.cc
+++ b/src/liveobjectlist.cc
@@ -165,7 +165,7 @@
 }
 
 
-bool IsOfType(LiveObjectType type, HeapObject *obj) {
+bool IsOfType(LiveObjectType type, HeapObject* obj) {
   // Note: there are types that are more general (e.g. JSObject) that would
   // have passed the Is##type_() test for more specialized types (e.g.
   // JSFunction).  If we find a more specialized match but we're looking for
@@ -211,7 +211,7 @@
 }
 
 
-static bool InSpace(AllocationSpace space, HeapObject *heap_obj) {
+static bool InSpace(AllocationSpace space, HeapObject* heap_obj) {
   Heap* heap = ISOLATE->heap();
   if (space != LO_SPACE) {
     return heap->InSpace(heap_obj, space);
@@ -462,7 +462,7 @@
   char prev_ch = 0;
   while (*dst != '\0') {
     char ch = *src++;
-    // We will treat non-ascii chars as '?'.
+    // We will treat non-ASCII chars as '?'.
     if ((ch & 0x80) != 0) {
       ch = '?';
     }
@@ -498,7 +498,7 @@
                  length);
 
   } else if (obj->IsString()) {
-    String *str = String::cast(obj);
+    String* str = String::cast(obj);
     // Only grab up to 160 chars in case they are double byte.
     // We'll only dump 80 of them after we compact them.
     const int kMaxCharToDump = 80;
@@ -842,7 +842,7 @@
   bool found_root_;
   bool found_weak_root_;
 
-  LolFilter *filter_;
+  LolFilter* filter_;
 };
 
 
@@ -857,8 +857,8 @@
 // A summary writer for filling in a summary of lol lists and diffs.
 class LolSummaryWriter: public SummaryWriter {
  public:
-  LolSummaryWriter(LiveObjectList *older_lol,
-                   LiveObjectList *newer_lol)
+  LolSummaryWriter(LiveObjectList* older_lol,
+                   LiveObjectList* newer_lol)
       : older_(older_lol), newer_(newer_lol) {
   }
 
@@ -944,7 +944,7 @@
 int LiveObjectList::GetTotalObjCountAndSize(int* size_p) {
   int size = 0;
   int count = 0;
-  LiveObjectList *lol = this;
+  LiveObjectList* lol = this;
   do {
     // Only compute total size if requested i.e. when size_p is not null.
     if (size_p != NULL) {
@@ -1085,7 +1085,7 @@
 static int CountHeapObjects() {
   int count = 0;
   // Iterate over all the heap spaces and count the number of objects.
-  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
   while ((heap_obj = iterator.next()) != NULL) {
     count++;
@@ -1122,7 +1122,7 @@
   // allocation, and we need allocate below.
   {
     // Iterate over all the heap spaces and add the objects.
-    HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+    HeapIterator iterator;
     HeapObject* heap_obj = NULL;
     bool failed = false;
     while (!failed && (heap_obj = iterator.next()) != NULL) {
@@ -1183,7 +1183,7 @@
 // only time we'll actually delete the lol is when we Reset() or if the lol is
 // invisible, and its element count reaches 0.
 bool LiveObjectList::Delete(int id) {
-  LiveObjectList *lol = last();
+  LiveObjectList* lol = last();
   while (lol != NULL) {
     if (lol->id() == id) {
       break;
@@ -1246,8 +1246,8 @@
     newer_id = temp;
   }
 
-  LiveObjectList *newer_lol = FindLolForId(newer_id, last());
-  LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
+  LiveObjectList* newer_lol = FindLolForId(newer_id, last());
+  LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
 
   // If the id is defined, and we can't find a LOL for it, then we have an
   // invalid id.
@@ -1336,7 +1336,9 @@
   // Allocate the JSArray of the elements.
   Handle<JSObject> elements = factory->NewJSObject(isolate->array_function());
   if (elements->IsFailure()) return Object::cast(*elements);
-  Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
+
+  maybe_result = Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Set body.elements.
   Handle<String> elements_sym = factory->LookupAsciiSymbol("elements");
@@ -1363,8 +1365,8 @@
     newer_id = temp;
   }
 
-  LiveObjectList *newer_lol = FindLolForId(newer_id, last());
-  LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
+  LiveObjectList* newer_lol = FindLolForId(newer_id, last());
+  LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
 
   // If the id is defined, and we can't find a LOL for it, then we have an
   // invalid id.
@@ -1462,7 +1464,9 @@
   Handle<JSObject> summary_obj =
     factory->NewJSObject(isolate->array_function());
   if (summary_obj->IsFailure()) return Object::cast(*summary_obj);
-  Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
+
+  maybe_result = Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Create the body object.
   Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
@@ -1589,7 +1593,9 @@
 
   // Return the result as a JS array.
   Handle<JSObject> lols = factory->NewJSObject(isolate->array_function());
-  Handle<JSArray>::cast(lols)->SetContent(*list);
+
+  maybe_result = Handle<JSArray>::cast(lols)->SetContent(*list);
+  if (maybe_result->IsFailure()) return maybe_result;
 
   Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
   if (result->IsFailure()) return Object::cast(*result);
@@ -1620,7 +1626,7 @@
 
 // Deletes all captured lols.
 void LiveObjectList::Reset() {
-  LiveObjectList *lol = last();
+  LiveObjectList* lol = last();
   // Just delete the last.  Each lol will delete it's prev automatically.
   delete lol;
 
@@ -1709,8 +1715,8 @@
 
 inline bool AddRootRetainerIfFound(const LolVisitor& visitor,
                                    LolFilter* filter,
-                                   LiveObjectSummary *summary,
-                                   void (*SetRootFound)(LiveObjectSummary *s),
+                                   LiveObjectSummary* summary,
+                                   void (*SetRootFound)(LiveObjectSummary* s),
                                    int start,
                                    int dump_limit,
                                    int* total_count,
@@ -1756,12 +1762,12 @@
 }
 
 
-inline void SetFoundRoot(LiveObjectSummary *summary) {
+inline void SetFoundRoot(LiveObjectSummary* summary) {
   summary->set_found_root();
 }
 
 
-inline void SetFoundWeakRoot(LiveObjectSummary *summary) {
+inline void SetFoundWeakRoot(LiveObjectSummary* summary) {
   summary->set_found_weak_root();
 }
 
@@ -1773,7 +1779,7 @@
                                  int dump_limit,
                                  int* total_count,
                                  LolFilter* filter,
-                                 LiveObjectSummary *summary,
+                                 LiveObjectSummary* summary,
                                  JSFunction* arguments_function,
                                  Handle<Object> error) {
   HandleScope scope;
@@ -2261,7 +2267,7 @@
 }
 
 
-void LiveObjectList::DoProcessNonLive(HeapObject *obj) {
+void LiveObjectList::DoProcessNonLive(HeapObject* obj) {
   // We should only be called if we have at least one lol to search.
   ASSERT(last() != NULL);
   Element* element = last()->Find(obj);
@@ -2278,7 +2284,7 @@
     int count = lol->obj_count_;
     for (int i = 0; i < count; i++) {
       HeapObject** p = &elements[i].obj_;
-      v->VisitPointer(reinterpret_cast<Object **>(p));
+      v->VisitPointer(reinterpret_cast<Object** >(p));
     }
     lol = lol->prev_;
   }
@@ -2383,11 +2389,11 @@
   PurgeDuplicates();
 
   // After the GC, sweep away all free'd Elements and compact.
-  LiveObjectList *prev = NULL;
-  LiveObjectList *next = NULL;
+  LiveObjectList* prev = NULL;
+  LiveObjectList* next = NULL;
 
   // Iterating from the youngest lol to the oldest lol.
-  for (LiveObjectList *lol = last(); lol; lol = prev) {
+  for (LiveObjectList* lol = last(); lol; lol = prev) {
     Element* elements = lol->elements_;
     prev = lol->prev();  // Save the prev.
 
@@ -2440,7 +2446,7 @@
       const int kMaxUnusedSpace = 64;
       if (diff > kMaxUnusedSpace) {  // Threshold for shrinking.
         // Shrink the list.
-        Element *new_elements = NewArray<Element>(new_count);
+        Element* new_elements = NewArray<Element>(new_count);
         memcpy(new_elements, elements, new_count * sizeof(Element));
 
         DeleteArray<Element>(elements);
@@ -2507,7 +2513,7 @@
   OS::Print("  Start verify ...\n");
   OS::Print("  Verifying ...");
   Flush();
-  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
   while ((heap_obj = iterator.next()) != NULL) {
     number_of_heap_objects++;
@@ -2613,7 +2619,7 @@
     HeapObject* heap_obj = it.Obj();
     if (heap->InFromSpace(heap_obj)) {
       OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n",
-                i++, heap_obj, heap->new_space()->FromSpaceLow());
+                i++, heap_obj, Heap::new_space()->FromSpaceStart());
     }
   }
 }
diff --git a/src/liveobjectlist.h b/src/liveobjectlist.h
index 65470d7..1aa9196 100644
--- a/src/liveobjectlist.h
+++ b/src/liveobjectlist.h
@@ -77,7 +77,7 @@
   inline static void GCEpilogue();
   inline static void GCPrologue();
   inline static void IterateElements(ObjectVisitor* v);
-  inline static void ProcessNonLive(HeapObject *obj);
+  inline static void ProcessNonLive(HeapObject* obj);
   inline static void UpdateReferencesForScavengeGC();
 
   // Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be
@@ -125,7 +125,7 @@
   static void GCEpiloguePrivate();
   static void IterateElementsPrivate(ObjectVisitor* v);
 
-  static void DoProcessNonLive(HeapObject *obj);
+  static void DoProcessNonLive(HeapObject* obj);
 
   static int CompareElement(const Element* a, const Element* b);
 
@@ -138,7 +138,7 @@
                           int dump_limit,
                           int* total_count,
                           LolFilter* filter,
-                          LiveObjectSummary *summary,
+                          LiveObjectSummary* summary,
                           JSFunction* arguments_function,
                           Handle<Object> error);
 
@@ -151,7 +151,7 @@
                                        bool is_tracking_roots);
 
   static bool NeedLOLProcessing() { return (last() != NULL); }
-  static void NullifyNonLivePointer(HeapObject **p) {
+  static void NullifyNonLivePointer(HeapObject** p) {
     // Mask out the low bit that marks this as a heap object.  We'll use this
     // cleared bit as an indicator that this pointer needs to be collected.
     //
@@ -202,7 +202,7 @@
   int id_;
   int capacity_;
   int obj_count_;
-  Element *elements_;
+  Element* elements_;
 
   // Statics for managing all the lists.
   static uint32_t next_element_id_;
diff --git a/src/log.cc b/src/log.cc
index 3d66b5f..21d64df 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -35,6 +35,7 @@
 #include "global-handles.h"
 #include "log.h"
 #include "macro-assembler.h"
+#include "platform.h"
 #include "runtime-profiler.h"
 #include "serialize.h"
 #include "string-stream.h"
@@ -461,18 +462,20 @@
       utf8_pos_ += utf8_length;
       return;
     }
-    int uc16_length = Min(str->length(), kUc16BufferSize);
-    String::WriteToFlat(str, uc16_buffer_, 0, uc16_length);
+    int uc16_length = Min(str->length(), kUtf16BufferSize);
+    String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
+    int previous = unibrow::Utf16::kNoPreviousCharacter;
     for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
-      uc16 c = uc16_buffer_[i];
+      uc16 c = utf16_buffer[i];
       if (c <= String::kMaxAsciiCharCodeU) {
         utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
       } else {
-        int char_length = unibrow::Utf8::Length(c);
+        int char_length = unibrow::Utf8::Length(c, previous);
         if (utf8_pos_ + char_length > kUtf8BufferSize) break;
-        unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c);
+        unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c, previous);
         utf8_pos_ += char_length;
       }
+      previous = c;
     }
   }
 
@@ -504,11 +507,11 @@
 
  private:
   static const int kUtf8BufferSize = 512;
-  static const int kUc16BufferSize = 128;
+  static const int kUtf16BufferSize = 128;
 
   int utf8_pos_;
   char utf8_buffer_[kUtf8BufferSize];
-  uc16 uc16_buffer_[kUc16BufferSize];
+  uc16 utf16_buffer[kUtf16BufferSize];
 };
 
 
@@ -1356,12 +1359,12 @@
 
 static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
                                       Handle<Code>* code_objects) {
+  HeapIterator iterator;
   AssertNoAllocation no_alloc;
   int compiled_funcs_count = 0;
 
   // Iterate the heap to find shared function info objects and record
   // the unoptimized code for them.
-  HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsSharedFunctionInfo()) continue;
     SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
@@ -1450,6 +1453,8 @@
   const char arch[] = "x64";
 #elif V8_TARGET_ARCH_ARM
   const char arch[] = "arm";
+#elif V8_TARGET_ARCH_MIPS
+  const char arch[] = "mips";
 #else
   const char arch[] = "unknown";
 #endif
@@ -1519,8 +1524,10 @@
 
 
 void Logger::LogCodeObjects() {
-  AssertNoAllocation no_alloc;
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "Logger::LogCodeObjects");
   HeapIterator iterator;
+  AssertNoAllocation no_alloc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsCode()) LogCodeObject(obj);
   }
@@ -1573,6 +1580,8 @@
 
 
 void Logger::LogCompiledFunctions() {
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "Logger::LogCompiledFunctions");
   HandleScope scope;
   const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
   ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
@@ -1591,9 +1600,10 @@
 
 
 void Logger::LogAccessorCallbacks() {
-  AssertNoAllocation no_alloc;
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "Logger::LogAccessorCallbacks");
   HeapIterator iterator;
-  i::Isolate* isolate = ISOLATE;
+  AssertNoAllocation no_alloc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsAccessorInfo()) continue;
     AccessorInfo* ai = AccessorInfo::cast(obj);
@@ -1601,17 +1611,17 @@
     String* name = String::cast(ai->name());
     Address getter_entry = v8::ToCData<Address>(ai->getter());
     if (getter_entry != 0) {
-      PROFILE(isolate, GetterCallbackEvent(name, getter_entry));
+      PROFILE(ISOLATE, GetterCallbackEvent(name, getter_entry));
     }
     Address setter_entry = v8::ToCData<Address>(ai->setter());
     if (setter_entry != 0) {
-      PROFILE(isolate, SetterCallbackEvent(name, setter_entry));
+      PROFILE(ISOLATE, SetterCallbackEvent(name, setter_entry));
     }
   }
 }
 
 
-bool Logger::Setup() {
+bool Logger::SetUp() {
   // Tests and EnsureInitialize() can call this twice in a row. It's harmless.
   if (is_initialized_) return true;
   is_initialized_ = true;
@@ -1704,9 +1714,9 @@
 
 
 void Logger::EnableSlidingStateWindow() {
-  // If the ticker is NULL, Logger::Setup has not been called yet.  In
+  // If the ticker is NULL, Logger::SetUp has not been called yet.  In
   // that case, we set the sliding_state_window flag so that the
-  // sliding window computation will be started when Logger::Setup is
+  // sliding window computation will be started when Logger::SetUp is
   // called.
   if (ticker_ == NULL) {
     FLAG_sliding_state_window = true;
@@ -1719,13 +1729,14 @@
   }
 }
 
+// Protects the state below.
+static LazyMutex active_samplers_mutex = LAZY_MUTEX_INITIALIZER;
 
-Mutex* SamplerRegistry::mutex_ = OS::CreateMutex();
 List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
 
 
 bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
-  ScopedLock lock(mutex_);
+  ScopedLock lock(active_samplers_mutex.Pointer());
   for (int i = 0;
        ActiveSamplersExist() && i < active_samplers_->length();
        ++i) {
@@ -1752,7 +1763,7 @@
 
 void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
   ASSERT(sampler->IsActive());
-  ScopedLock lock(mutex_);
+  ScopedLock lock(active_samplers_mutex.Pointer());
   if (active_samplers_ == NULL) {
     active_samplers_ = new List<Sampler*>;
   } else {
@@ -1764,7 +1775,7 @@
 
 void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
   ASSERT(sampler->IsActive());
-  ScopedLock lock(mutex_);
+  ScopedLock lock(active_samplers_mutex.Pointer());
   ASSERT(active_samplers_ != NULL);
   bool removed = active_samplers_->RemoveElement(sampler);
   ASSERT(removed);
diff --git a/src/log.h b/src/log.h
index 50358ce..1297387 100644
--- a/src/log.h
+++ b/src/log.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_LOG_H_
 
 #include "allocation.h"
+#include "objects.h"
 #include "platform.h"
 #include "log-utils.h"
 
@@ -70,7 +71,6 @@
 // tick profiler requires code events, so --prof implies --log-code.
 
 // Forward declarations.
-class HashMap;
 class LogMessageBuilder;
 class Profiler;
 class Semaphore;
@@ -149,14 +149,14 @@
 #undef DECLARE_ENUM
 
   // Acquires resources for logging if the right flags are set.
-  bool Setup();
+  bool SetUp();
 
   void EnsureTickerStarted();
   void EnsureTickerStopped();
 
   Sampler* sampler();
 
-  // Frees resources acquired in Setup.
+  // Frees resources acquired in SetUp.
   // When a temporary file is used for the log, returns its stream descriptor,
   // leaving the file open.
   FILE* TearDown();
@@ -410,7 +410,7 @@
   NameMap* address_to_name_map_;
 
   // Guards against multiple calls to TearDown() that can happen in some tests.
-  // 'true' between Setup() and TearDown().
+  // 'true' between SetUp() and TearDown().
   bool is_initialized_;
 
   // Support for 'incremental addresses' in compressed logs:
@@ -454,7 +454,6 @@
     return active_samplers_ != NULL && !active_samplers_->is_empty();
   }
 
-  static Mutex* mutex_;  // Protects the state below.
   static List<Sampler*>* active_samplers_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 30838bd..11e2217 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,27 +36,6 @@
 };
 
 
-enum CodeLocation {
-  IN_JAVASCRIPT,
-  IN_JS_ENTRY,
-  IN_C_ENTRY
-};
-
-
-enum HandlerType {
-  TRY_CATCH_HANDLER,
-  TRY_FINALLY_HANDLER,
-  JS_ENTRY_HANDLER
-};
-
-
-// Types of uncatchable exceptions.
-enum UncatchableExceptionType {
-  OUT_OF_MEMORY,
-  TERMINATION
-};
-
-
 // Invalid depth in prototype chain.
 const int kInvalidProtoDepth = -1;
 
@@ -93,6 +72,63 @@
 namespace v8 {
 namespace internal {
 
+class FrameScope {
+ public:
+  explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
+      : masm_(masm), type_(type), old_has_frame_(masm->has_frame()) {
+    masm->set_has_frame(true);
+    if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+      masm->EnterFrame(type);
+    }
+  }
+
+  ~FrameScope() {
+    if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+      masm_->LeaveFrame(type_);
+    }
+    masm_->set_has_frame(old_has_frame_);
+  }
+
+  // Normally we generate the leave-frame code when this object goes
+  // out of scope.  Sometimes we may need to generate the code somewhere else
+  // in addition.  Calling this will achieve that, but the object stays in
+  // scope, the MacroAssembler is still marked as being in a frame scope, and
+  // the code will be generated again when it goes out of scope.
+  void GenerateLeaveFrame() {
+    masm_->LeaveFrame(type_);
+  }
+
+ private:
+  MacroAssembler* masm_;
+  StackFrame::Type type_;
+  bool old_has_frame_;
+};
+
+
+class AllowExternalCallThatCantCauseGC: public FrameScope {
+ public:
+  explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm)
+      : FrameScope(masm, StackFrame::NONE) { }
+};
+
+
+class NoCurrentFrameScope {
+ public:
+  explicit NoCurrentFrameScope(MacroAssembler* masm)
+      : masm_(masm), saved_(masm->has_frame()) {
+    masm->set_has_frame(false);
+  }
+
+  ~NoCurrentFrameScope() {
+    masm_->set_has_frame(saved_);
+  }
+
+ private:
+  MacroAssembler* masm_;
+  bool saved_;
+};
+
+
 // Support for "structured" code comments.
 #ifdef DEBUG
 
diff --git a/src/macros.py b/src/macros.py
index 7a493ca..93287ae 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -26,7 +26,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 # Dictionary that is passed as defines for js2c.py.
-# Used for defines that must be defined for all native js files.
+# Used for defines that must be defined for all native JS files.
 
 const NONE        = 0;
 const READ_ONLY   = 1;
@@ -82,8 +82,6 @@
 const kMaxYear  = 1000000;
 const kMinMonth = -10000000;
 const kMaxMonth = 10000000;
-const kMinDate  = -100000000;
-const kMaxDate  = 100000000;
 
 # Native cache ids.
 const STRING_TO_REGEXP_CACHE_ID = 0;
@@ -103,6 +101,9 @@
 macro IS_ARRAY(arg)             = (%_IsArray(arg));
 macro IS_FUNCTION(arg)          = (%_IsFunction(arg));
 macro IS_REGEXP(arg)            = (%_IsRegExp(arg));
+macro IS_SET(arg)               = (%_ClassOf(arg) === 'Set');
+macro IS_MAP(arg)               = (%_ClassOf(arg) === 'Map');
+macro IS_WEAKMAP(arg)           = (%_ClassOf(arg) === 'WeakMap');
 macro IS_DATE(arg)              = (%_ClassOf(arg) === 'Date');
 macro IS_NUMBER_WRAPPER(arg)    = (%_ClassOf(arg) === 'Number');
 macro IS_STRING_WRAPPER(arg)    = (%_ClassOf(arg) === 'String');
@@ -128,6 +129,11 @@
 # we cannot handle those anyway.
 macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
 
+# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
+const kBoundFunctionIndex = 0;
+const kBoundThisIndex = 1;
+const kBoundArgumentsStartIndex = 2;
+
 # Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
 macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
 macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
@@ -158,16 +164,36 @@
 
 # Gets the value of a Date object. If arg is not a Date object
 # a type error is thrown.
-macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
-macro DAY(time) = ($floor(time / 86400000));
-macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DateFromTime(time));
-macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
-macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
-macro NAN_OR_MIN_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MIN_FROM_TIME(time));
-macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));
-macro NAN_OR_SEC_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : SEC_FROM_TIME(time));
-macro MS_FROM_TIME(time) = (Modulo(time, 1000));
-macro NAN_OR_MS_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MS_FROM_TIME(time));
+macro CHECK_DATE(arg) = if (%_ClassOf(arg) !== 'Date') ThrowDateTypeError();
+macro LOCAL_DATE_VALUE(arg) = (%_DateField(arg, 0) + %_DateField(arg, 21));
+macro UTC_DATE_VALUE(arg)    = (%_DateField(arg, 0));
+
+macro LOCAL_YEAR(arg)        = (%_DateField(arg, 1));
+macro LOCAL_MONTH(arg)       = (%_DateField(arg, 2));
+macro LOCAL_DAY(arg)         = (%_DateField(arg, 3));
+macro LOCAL_WEEKDAY(arg)     = (%_DateField(arg, 4));
+macro LOCAL_HOUR(arg)        = (%_DateField(arg, 5));
+macro LOCAL_MIN(arg)         = (%_DateField(arg, 6));
+macro LOCAL_SEC(arg)         = (%_DateField(arg, 7));
+macro LOCAL_MS(arg)          = (%_DateField(arg, 8));
+macro LOCAL_DAYS(arg)        = (%_DateField(arg, 9));
+macro LOCAL_TIME_IN_DAY(arg) = (%_DateField(arg, 10));
+
+macro UTC_YEAR(arg)        = (%_DateField(arg, 11));
+macro UTC_MONTH(arg)       = (%_DateField(arg, 12));
+macro UTC_DAY(arg)         = (%_DateField(arg, 13));
+macro UTC_WEEKDAY(arg)     = (%_DateField(arg, 14));
+macro UTC_HOUR(arg)        = (%_DateField(arg, 15));
+macro UTC_MIN(arg)         = (%_DateField(arg, 16));
+macro UTC_SEC(arg)         = (%_DateField(arg, 17));
+macro UTC_MS(arg)          = (%_DateField(arg, 18));
+macro UTC_DAYS(arg)        = (%_DateField(arg, 19));
+macro UTC_TIME_IN_DAY(arg) = (%_DateField(arg, 20));
+
+macro TIMEZONE_OFFSET(arg)   = (%_DateField(arg, 21));
+
+macro SET_UTC_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 1));
+macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0));
 
 # Last input and last subject of regexp matches.
 macro LAST_SUBJECT(array) = ((array)[1]);
diff --git a/src/mark-compact-inl.h b/src/mark-compact-inl.h
new file mode 100644
index 0000000..43f6b89
--- /dev/null
+++ b/src/mark-compact-inl.h
@@ -0,0 +1,114 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MARK_COMPACT_INL_H_
+#define V8_MARK_COMPACT_INL_H_
+
+#include "isolate.h"
+#include "memory.h"
+#include "mark-compact.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MarkBit Marking::MarkBitFrom(Address addr) {
+  MemoryChunk* p = MemoryChunk::FromAddress(addr);
+  return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
+                                         p->ContainsOnlyData());
+}
+
+
+void MarkCompactCollector::SetFlags(int flags) {
+  sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0);
+  reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
+  abort_incremental_marking_ =
+      ((flags & Heap::kAbortIncrementalMarkingMask) != 0);
+}
+
+
+void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
+  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+  if (!mark_bit.Get()) {
+    mark_bit.Set();
+    MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+    ProcessNewlyMarkedObject(obj);
+  }
+}
+
+
+bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) {
+  MarkBit mark = Marking::MarkBitFrom(object);
+  bool old_mark = mark.Get();
+  if (!old_mark) SetMark(object, mark);
+  return old_mark;
+}
+
+
+void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) {
+  if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object);
+}
+
+
+void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
+  ASSERT(!mark_bit.Get());
+  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+  mark_bit.Set();
+  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+  if (obj->IsMap()) {
+    heap_->ClearCacheOnMap(Map::cast(obj));
+  }
+}
+
+
+bool MarkCompactCollector::IsMarked(Object* obj) {
+  ASSERT(obj->IsHeapObject());
+  HeapObject* heap_object = HeapObject::cast(obj);
+  return Marking::MarkBitFrom(heap_object).Get();
+}
+
+
+void MarkCompactCollector::RecordSlot(Object** anchor_slot,
+                                      Object** slot,
+                                      Object* object) {
+  Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
+  if (object_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            object_page->slots_buffer_address(),
+                            slot,
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(object_page);
+    }
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MARK_COMPACT_INL_H_
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 9b0d5fc..dde172d 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,20 +27,31 @@
 
 #include "v8.h"
 
+#include "code-stubs.h"
 #include "compilation-cache.h"
+#include "deoptimizer.h"
 #include "execution.h"
-#include "heap-profiler.h"
 #include "gdb-jit.h"
 #include "global-handles.h"
+#include "heap-profiler.h"
 #include "ic-inl.h"
+#include "incremental-marking.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "objects-visiting.h"
+#include "objects-visiting-inl.h"
 #include "stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
+
+const char* Marking::kWhiteBitPattern = "00";
+const char* Marking::kBlackBitPattern = "10";
+const char* Marking::kGreyBitPattern = "11";
+const char* Marking::kImpossibleBitPattern = "01";
+
+
 // -------------------------------------------------------------------------
 // MarkCompactCollector
 
@@ -48,70 +59,616 @@
 #ifdef DEBUG
       state_(IDLE),
 #endif
-      force_compaction_(false),
-      compacting_collection_(false),
-      compact_on_next_gc_(false),
-      previous_marked_count_(0),
+      sweep_precisely_(false),
+      reduce_memory_footprint_(false),
+      abort_incremental_marking_(false),
+      compacting_(false),
+      was_marked_incrementally_(false),
+      collect_maps_(FLAG_collect_maps),
+      flush_monomorphic_ics_(false),
       tracer_(NULL),
-#ifdef DEBUG
-      live_young_objects_size_(0),
-      live_old_pointer_objects_size_(0),
-      live_old_data_objects_size_(0),
-      live_code_objects_size_(0),
-      live_map_objects_size_(0),
-      live_cell_objects_size_(0),
-      live_lo_objects_size_(0),
-      live_bytes_(0),
-#endif
+      migration_slots_buffer_(NULL),
       heap_(NULL),
       code_flusher_(NULL),
       encountered_weak_maps_(NULL) { }
 
 
+#ifdef DEBUG
+class VerifyMarkingVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
+      }
+    }
+  }
+};
+
+
+static void VerifyMarking(Address bottom, Address top) {
+  VerifyMarkingVisitor visitor;
+  HeapObject* object;
+  Address next_object_must_be_here_or_later = bottom;
+
+  for (Address current = bottom;
+       current < top;
+       current += kPointerSize) {
+    object = HeapObject::FromAddress(current);
+    if (MarkCompactCollector::IsMarked(object)) {
+      ASSERT(current >= next_object_must_be_here_or_later);
+      object->Iterate(&visitor);
+      next_object_must_be_here_or_later = current + object->Size();
+    }
+  }
+}
+
+
+static void VerifyMarking(NewSpace* space) {
+  Address end = space->top();
+  NewSpacePageIterator it(space->bottom(), end);
+  // The bottom position is at the start of its page. Allows us to use
+  // page->area_start() as start of range on all pages.
+  ASSERT_EQ(space->bottom(),
+            NewSpacePage::FromAddress(space->bottom())->area_start());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address limit = it.has_next() ? page->area_end() : end;
+    ASSERT(limit == end || !page->Contains(end));
+    VerifyMarking(page->area_start(), limit);
+  }
+}
+
+
+static void VerifyMarking(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    VerifyMarking(p->area_start(), p->area_end());
+  }
+}
+
+
+static void VerifyMarking(Heap* heap) {
+  VerifyMarking(heap->old_pointer_space());
+  VerifyMarking(heap->old_data_space());
+  VerifyMarking(heap->code_space());
+  VerifyMarking(heap->cell_space());
+  VerifyMarking(heap->map_space());
+  VerifyMarking(heap->new_space());
+
+  VerifyMarkingVisitor visitor;
+
+  LargeObjectIterator it(heap->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    if (MarkCompactCollector::IsMarked(obj)) {
+      obj->Iterate(&visitor);
+    }
+  }
+
+  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+}
+
+
+class VerifyEvacuationVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
+      }
+    }
+  }
+};
+
+
+static void VerifyEvacuation(Address bottom, Address top) {
+  VerifyEvacuationVisitor visitor;
+  HeapObject* object;
+  Address next_object_must_be_here_or_later = bottom;
+
+  for (Address current = bottom;
+       current < top;
+       current += kPointerSize) {
+    object = HeapObject::FromAddress(current);
+    if (MarkCompactCollector::IsMarked(object)) {
+      ASSERT(current >= next_object_must_be_here_or_later);
+      object->Iterate(&visitor);
+      next_object_must_be_here_or_later = current + object->Size();
+    }
+  }
+}
+
+
+static void VerifyEvacuation(NewSpace* space) {
+  NewSpacePageIterator it(space->bottom(), space->top());
+  VerifyEvacuationVisitor visitor;
+
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address current = page->area_start();
+    Address limit = it.has_next() ? page->area_end() : space->top();
+    ASSERT(limit == space->top() || !page->Contains(space->top()));
+    while (current < limit) {
+      HeapObject* object = HeapObject::FromAddress(current);
+      object->Iterate(&visitor);
+      current += object->Size();
+    }
+  }
+}
+
+
+static void VerifyEvacuation(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    if (p->IsEvacuationCandidate()) continue;
+    VerifyEvacuation(p->area_start(), p->area_end());
+  }
+}
+
+
+static void VerifyEvacuation(Heap* heap) {
+  VerifyEvacuation(heap->old_pointer_space());
+  VerifyEvacuation(heap->old_data_space());
+  VerifyEvacuation(heap->code_space());
+  VerifyEvacuation(heap->cell_space());
+  VerifyEvacuation(heap->map_space());
+  VerifyEvacuation(heap->new_space());
+
+  VerifyEvacuationVisitor visitor;
+  heap->IterateStrongRoots(&visitor, VISIT_ALL);
+}
+#endif
+
+
+void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
+  p->MarkEvacuationCandidate();
+  evacuation_candidates_.Add(p);
+}
+
+
+static void TraceFragmentation(PagedSpace* space) {
+  int number_of_pages = space->CountTotalPages();
+  intptr_t reserved = (number_of_pages * space->AreaSize());
+  intptr_t free = reserved - space->SizeOfObjects();
+  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
+         AllocationSpaceName(space->identity()),
+         number_of_pages,
+         static_cast<int>(free),
+         static_cast<double>(free) * 100 / reserved);
+}
+
+
+bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
+  if (!compacting_) {
+    ASSERT(evacuation_candidates_.length() == 0);
+
+    CollectEvacuationCandidates(heap()->old_pointer_space());
+    CollectEvacuationCandidates(heap()->old_data_space());
+
+    if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) {
+      CollectEvacuationCandidates(heap()->code_space());
+    } else if (FLAG_trace_fragmentation) {
+      TraceFragmentation(heap()->code_space());
+    }
+
+    if (FLAG_trace_fragmentation) {
+      TraceFragmentation(heap()->map_space());
+      TraceFragmentation(heap()->cell_space());
+    }
+
+    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
+
+    compacting_ = evacuation_candidates_.length() > 0;
+  }
+
+  return compacting_;
+}
+
+
 void MarkCompactCollector::CollectGarbage() {
   // Make sure that Prepare() has been called. The individual steps below will
   // update the state as they proceed.
   ASSERT(state_ == PREPARE_GC);
   ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
 
-  // Prepare has selected whether to compact the old generation or not.
-  // Tell the tracer.
-  if (IsCompacting()) tracer_->set_is_compacting();
-
   MarkLiveObjects();
+  ASSERT(heap_->incremental_marking()->IsStopped());
 
-  if (FLAG_collect_maps) ClearNonLiveTransitions();
+  if (collect_maps_) ClearNonLiveTransitions();
 
   ClearWeakMaps();
 
-  SweepLargeObjectSpace();
-
-  if (IsCompacting()) {
-    GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
-    EncodeForwardingAddresses();
-
-    heap()->MarkMapPointersAsEncoded(true);
-    UpdatePointers();
-    heap()->MarkMapPointersAsEncoded(false);
-    heap()->isolate()->pc_to_code_cache()->Flush();
-
-    RelocateObjects();
-  } else {
-    SweepSpaces();
-    heap()->isolate()->pc_to_code_cache()->Flush();
+#ifdef DEBUG
+  if (FLAG_verify_heap) {
+    VerifyMarking(heap_);
   }
+#endif
+
+  SweepSpaces();
+
+  if (!collect_maps_) ReattachInitialMaps();
+
+  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
 
   Finish();
 
-  // Save the count of marked objects remaining after the collection and
-  // null out the GC tracer.
-  previous_marked_count_ = tracer_->marked_count();
-  ASSERT(previous_marked_count_ == 0);
   tracer_ = NULL;
 }
 
 
+#ifdef DEBUG
+void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    CHECK(p->markbits()->IsClean());
+    CHECK_EQ(0, p->LiveBytes());
+  }
+}
+
+void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
+  NewSpacePageIterator it(space->bottom(), space->top());
+
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    CHECK(p->markbits()->IsClean());
+    CHECK_EQ(0, p->LiveBytes());
+  }
+}
+
+void MarkCompactCollector::VerifyMarkbitsAreClean() {
+  VerifyMarkbitsAreClean(heap_->old_pointer_space());
+  VerifyMarkbitsAreClean(heap_->old_data_space());
+  VerifyMarkbitsAreClean(heap_->code_space());
+  VerifyMarkbitsAreClean(heap_->cell_space());
+  VerifyMarkbitsAreClean(heap_->map_space());
+  VerifyMarkbitsAreClean(heap_->new_space());
+
+  LargeObjectIterator it(heap_->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(obj);
+    ASSERT(Marking::IsWhite(mark_bit));
+  }
+}
+#endif
+
+
+static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+static void ClearMarkbitsInNewSpace(NewSpace* space) {
+  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void MarkCompactCollector::ClearMarkbits() {
+  ClearMarkbitsInPagedSpace(heap_->code_space());
+  ClearMarkbitsInPagedSpace(heap_->map_space());
+  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
+  ClearMarkbitsInPagedSpace(heap_->old_data_space());
+  ClearMarkbitsInPagedSpace(heap_->cell_space());
+  ClearMarkbitsInNewSpace(heap_->new_space());
+
+  LargeObjectIterator it(heap_->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(obj);
+    mark_bit.Clear();
+    mark_bit.Next().Clear();
+  }
+}
+
+
+bool Marking::TransferMark(Address old_start, Address new_start) {
+  // This is only used when resizing an object.
+  ASSERT(MemoryChunk::FromAddress(old_start) ==
+         MemoryChunk::FromAddress(new_start));
+
+  // If the mark doesn't move, we don't check the color of the object.
+  // It doesn't matter whether the object is black, since it hasn't changed
+  // size, so the adjustment to the live data count will be zero anyway.
+  if (old_start == new_start) return false;
+
+  MarkBit new_mark_bit = MarkBitFrom(new_start);
+  MarkBit old_mark_bit = MarkBitFrom(old_start);
+
+#ifdef DEBUG
+  ObjectColor old_color = Color(old_mark_bit);
+#endif
+
+  if (Marking::IsBlack(old_mark_bit)) {
+    old_mark_bit.Clear();
+    ASSERT(IsWhite(old_mark_bit));
+    Marking::MarkBlack(new_mark_bit);
+    return true;
+  } else if (Marking::IsGrey(old_mark_bit)) {
+    ASSERT(heap_->incremental_marking()->IsMarking());
+    old_mark_bit.Clear();
+    old_mark_bit.Next().Clear();
+    ASSERT(IsWhite(old_mark_bit));
+    heap_->incremental_marking()->WhiteToGreyAndPush(
+        HeapObject::FromAddress(new_start), new_mark_bit);
+    heap_->incremental_marking()->RestartIfNotMarking();
+  }
+
+#ifdef DEBUG
+  ObjectColor new_color = Color(new_mark_bit);
+  ASSERT(new_color == old_color);
+#endif
+
+  return false;
+}
+
+
+const char* AllocationSpaceName(AllocationSpace space) {
+  switch (space) {
+    case NEW_SPACE: return "NEW_SPACE";
+    case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
+    case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
+    case CODE_SPACE: return "CODE_SPACE";
+    case MAP_SPACE: return "MAP_SPACE";
+    case CELL_SPACE: return "CELL_SPACE";
+    case LO_SPACE: return "LO_SPACE";
+    default:
+      UNREACHABLE();
+  }
+
+  return NULL;
+}
+
+
+// Returns zero for pages that have so little fragmentation that it is not
+// worth defragmenting them.  Otherwise a positive integer that gives an
+// estimate of fragmentation on an arbitrary scale.
+static int FreeListFragmentation(PagedSpace* space, Page* p) {
+  // If page was not swept then there are no free list items on it.
+  if (!p->WasSwept()) {
+    if (FLAG_trace_fragmentation) {
+      PrintF("%p [%s]: %d bytes live (unswept)\n",
+             reinterpret_cast<void*>(p),
+             AllocationSpaceName(space->identity()),
+             p->LiveBytes());
+    }
+    return 0;
+  }
+
+  FreeList::SizeStats sizes;
+  space->CountFreeListItems(p, &sizes);
+
+  intptr_t ratio;
+  intptr_t ratio_threshold;
+  intptr_t area_size = space->AreaSize();
+  if (space->identity() == CODE_SPACE) {
+    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
+        area_size;
+    ratio_threshold = 10;
+  } else {
+    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
+        area_size;
+    ratio_threshold = 15;
+  }
+
+  if (FLAG_trace_fragmentation) {
+    PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
+           reinterpret_cast<void*>(p),
+           AllocationSpaceName(space->identity()),
+           static_cast<int>(sizes.small_size_),
+           static_cast<double>(sizes.small_size_ * 100) /
+           area_size,
+           static_cast<int>(sizes.medium_size_),
+           static_cast<double>(sizes.medium_size_ * 100) /
+           area_size,
+           static_cast<int>(sizes.large_size_),
+           static_cast<double>(sizes.large_size_ * 100) /
+           area_size,
+           static_cast<int>(sizes.huge_size_),
+           static_cast<double>(sizes.huge_size_ * 100) /
+           area_size,
+           (ratio > ratio_threshold) ? "[fragmented]" : "");
+  }
+
+  if (FLAG_always_compact && sizes.Total() != area_size) {
+    return 1;
+  }
+
+  if (ratio <= ratio_threshold) return 0;  // Not fragmented.
+
+  return static_cast<int>(ratio - ratio_threshold);
+}
+
+
+void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
+  ASSERT(space->identity() == OLD_POINTER_SPACE ||
+         space->identity() == OLD_DATA_SPACE ||
+         space->identity() == CODE_SPACE);
+
+  int number_of_pages = space->CountTotalPages();
+
+  const int kMaxMaxEvacuationCandidates = 1000;
+  int max_evacuation_candidates = Min(
+    kMaxMaxEvacuationCandidates,
+    static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
+
+  if (FLAG_stress_compaction || FLAG_always_compact) {
+    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
+  }
+
+  class Candidate {
+   public:
+    Candidate() : fragmentation_(0), page_(NULL) { }
+    Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
+
+    int fragmentation() { return fragmentation_; }
+    Page* page() { return page_; }
+
+   private:
+    int fragmentation_;
+    Page* page_;
+  };
+
+  enum CompactionMode {
+    COMPACT_FREE_LISTS,
+    REDUCE_MEMORY_FOOTPRINT
+  };
+
+  CompactionMode mode = COMPACT_FREE_LISTS;
+
+  intptr_t reserved = number_of_pages * space->AreaSize();
+  intptr_t over_reserved = reserved - space->SizeOfObjects();
+  static const intptr_t kFreenessThreshold = 50;
+
+  if (over_reserved >= 2 * space->AreaSize() &&
+      reduce_memory_footprint_) {
+    mode = REDUCE_MEMORY_FOOTPRINT;
+
+    // We expect that empty pages are easier to compact so slightly bump the
+    // limit.
+    max_evacuation_candidates += 2;
+
+    if (FLAG_trace_fragmentation) {
+      PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
+             static_cast<double>(over_reserved) / MB,
+             static_cast<int>(kFreenessThreshold));
+    }
+  }
+
+  intptr_t estimated_release = 0;
+
+  Candidate candidates[kMaxMaxEvacuationCandidates];
+
+  int count = 0;
+  int fragmentation = 0;
+  Candidate* least = NULL;
+
+  PageIterator it(space);
+  if (it.has_next()) it.next();  // Never compact the first page.
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    p->ClearEvacuationCandidate();
+
+    if (FLAG_stress_compaction) {
+      int counter = space->heap()->ms_count();
+      uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
+      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
+    } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
+      // Don't try to release too many pages.
+      if (estimated_release >= ((over_reserved * 3) / 4)) {
+        continue;
+      }
+
+      intptr_t free_bytes = 0;
+
+      if (!p->WasSwept()) {
+        free_bytes = (p->area_size() - p->LiveBytes());
+      } else {
+        FreeList::SizeStats sizes;
+        space->CountFreeListItems(p, &sizes);
+        free_bytes = sizes.Total();
+      }
+
+      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
+
+      if (free_pct >= kFreenessThreshold) {
+        estimated_release += 2 * p->area_size() - free_bytes;
+        fragmentation = free_pct;
+      } else {
+        fragmentation = 0;
+      }
+
+      if (FLAG_trace_fragmentation) {
+        PrintF("%p [%s]: %d (%.2f%%) free %s\n",
+               reinterpret_cast<void*>(p),
+               AllocationSpaceName(space->identity()),
+               static_cast<int>(free_bytes),
+               static_cast<double>(free_bytes * 100) / p->area_size(),
+               (fragmentation > 0) ? "[fragmented]" : "");
+      }
+    } else {
+      fragmentation = FreeListFragmentation(space, p);
+    }
+
+    if (fragmentation != 0) {
+      if (count < max_evacuation_candidates) {
+        candidates[count++] = Candidate(fragmentation, p);
+      } else {
+        if (least == NULL) {
+          for (int i = 0; i < max_evacuation_candidates; i++) {
+            if (least == NULL ||
+                candidates[i].fragmentation() < least->fragmentation()) {
+              least = candidates + i;
+            }
+          }
+        }
+        if (least->fragmentation() < fragmentation) {
+          *least = Candidate(fragmentation, p);
+          least = NULL;
+        }
+      }
+    }
+  }
+
+  for (int i = 0; i < count; i++) {
+    AddEvacuationCandidate(candidates[i].page());
+  }
+
+  if (count > 0 && FLAG_trace_fragmentation) {
+    PrintF("Collected %d evacuation candidates for space %s\n",
+           count,
+           AllocationSpaceName(space->identity()));
+  }
+}
+
+
+void MarkCompactCollector::AbortCompaction() {
+  if (compacting_) {
+    int npages = evacuation_candidates_.length();
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+      p->ClearEvacuationCandidate();
+      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+    compacting_ = false;
+    evacuation_candidates_.Rewind(0);
+    invalidated_code_.Rewind(0);
+  }
+  ASSERT_EQ(0, evacuation_candidates_.length());
+}
+
+
 void MarkCompactCollector::Prepare(GCTracer* tracer) {
+  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
+
+  // Disable collection of maps if incremental marking is enabled.
+  // Map collection algorithm relies on a special map transition tree traversal
+  // order which is not implemented for incremental marking.
+  collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
+
+  // Monomorphic ICs are preserved when possible, but need to be flushed
+  // when they might be keeping a Context alive, or when the heap is about
+  // to be serialized.
+  flush_monomorphic_ics_ =
+      heap()->isolate()->context_exit_happened() || Serializer::enabled();
+
   // Rather than passing the tracer around we stash it in a static member
   // variable.
   tracer_ = tracer;
@@ -120,16 +677,10 @@
   ASSERT(state_ == IDLE);
   state_ = PREPARE_GC;
 #endif
-  ASSERT(!FLAG_always_compact || !FLAG_never_compact);
 
-  compacting_collection_ =
-      FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
-  compact_on_next_gc_ = false;
+  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
 
-  if (FLAG_never_compact) compacting_collection_ = false;
-  if (!heap()->map_space()->MapPointersEncodable())
-      compacting_collection_ = false;
-  if (FLAG_collect_maps) CreateBackPointers();
+  if (collect_maps_) CreateBackPointers();
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (FLAG_gdbjit) {
     // If GDBJIT interface is active disable compaction.
@@ -137,21 +688,31 @@
   }
 #endif
 
+  // Clear marking bits if incremental marking is aborted.
+  if (was_marked_incrementally_ && abort_incremental_marking_) {
+    heap()->incremental_marking()->Abort();
+    ClearMarkbits();
+    AbortCompaction();
+    was_marked_incrementally_ = false;
+  }
+
+  // Don't start compaction if we are in the middle of incremental
+  // marking cycle. We did not collect any slots.
+  if (!FLAG_never_compact && !was_marked_incrementally_) {
+    StartCompaction(NON_INCREMENTAL_COMPACTION);
+  }
+
   PagedSpaces spaces;
   for (PagedSpace* space = spaces.next();
-       space != NULL; space = spaces.next()) {
-    space->PrepareForMarkCompact(compacting_collection_);
+       space != NULL;
+       space = spaces.next()) {
+    space->PrepareForMarkCompact();
   }
 
 #ifdef DEBUG
-  live_bytes_ = 0;
-  live_young_objects_size_ = 0;
-  live_old_pointer_objects_size_ = 0;
-  live_old_data_objects_size_ = 0;
-  live_code_objects_size_ = 0;
-  live_map_objects_size_ = 0;
-  live_cell_objects_size_ = 0;
-  live_lo_objects_size_ = 0;
+  if (!was_marked_incrementally_ && FLAG_verify_heap) {
+    VerifyMarkbitsAreClean();
+  }
 #endif
 }
 
@@ -168,31 +729,6 @@
   heap()->isolate()->stub_cache()->Clear();
 
   heap()->external_string_table_.CleanUp();
-
-  // If we've just compacted old space there's no reason to check the
-  // fragmentation limit. Just return.
-  if (HasCompacted()) return;
-
-  // We compact the old generation on the next GC if it has gotten too
-  // fragmented (ie, we could recover an expected amount of space by
-  // reclaiming the waste and free list blocks).
-  static const int kFragmentationLimit = 15;        // Percent.
-  static const int kFragmentationAllowed = 1 * MB;  // Absolute.
-  intptr_t old_gen_recoverable = 0;
-  intptr_t old_gen_used = 0;
-
-  OldSpaces spaces;
-  for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
-    old_gen_recoverable += space->Waste() + space->AvailableFree();
-    old_gen_used += space->Size();
-  }
-
-  int old_gen_fragmentation =
-      static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
-  if (old_gen_fragmentation > kFragmentationLimit &&
-      old_gen_recoverable > kFragmentationAllowed) {
-    compact_on_next_gc_ = true;
-  }
 }
 
 
@@ -237,8 +773,7 @@
   }
 
   void AddCandidate(JSFunction* function) {
-    ASSERT(function->unchecked_code() ==
-           function->unchecked_shared()->unchecked_code());
+    ASSERT(function->code() == function->shared()->code());
 
     SetNextCandidate(function, jsfunction_candidates_head_);
     jsfunction_candidates_head_ = function;
@@ -258,16 +793,26 @@
     while (candidate != NULL) {
       next_candidate = GetNextCandidate(candidate);
 
-      SharedFunctionInfo* shared = candidate->unchecked_shared();
+      SharedFunctionInfo* shared = candidate->shared();
 
-      Code* code = shared->unchecked_code();
-      if (!code->IsMarked()) {
+      Code* code = shared->code();
+      MarkBit code_mark = Marking::MarkBitFrom(code);
+      if (!code_mark.Get()) {
         shared->set_code(lazy_compile);
         candidate->set_code(lazy_compile);
       } else {
-        candidate->set_code(shared->unchecked_code());
+        candidate->set_code(shared->code());
       }
 
+      // We are in the middle of a GC cycle so the write barrier in the code
+      // setter did not record the slot update and we have to do that manually.
+      Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
+      Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
+      isolate_->heap()->mark_compact_collector()->
+          RecordCodeEntrySlot(slot, target);
+
+      RecordSharedFunctionInfoCodeSlot(shared);
+
       candidate = next_candidate;
     }
 
@@ -284,17 +829,27 @@
       next_candidate = GetNextCandidate(candidate);
       SetNextCandidate(candidate, NULL);
 
-      Code* code = candidate->unchecked_code();
-      if (!code->IsMarked()) {
+      Code* code = candidate->code();
+      MarkBit code_mark = Marking::MarkBitFrom(code);
+      if (!code_mark.Get()) {
         candidate->set_code(lazy_compile);
       }
 
+      RecordSharedFunctionInfoCodeSlot(candidate);
+
       candidate = next_candidate;
     }
 
     shared_function_info_candidates_head_ = NULL;
   }
 
+  void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) {
+    Object** slot = HeapObject::RawField(shared,
+                                         SharedFunctionInfo::kCodeOffset);
+    isolate_->heap()->mark_compact_collector()->
+        RecordSlot(slot, slot, HeapObject::cast(*slot));
+  }
+
   static JSFunction** GetNextCandidateField(JSFunction* candidate) {
     return reinterpret_cast<JSFunction**>(
         candidate->address() + JSFunction::kCodeEntryOffset);
@@ -311,18 +866,19 @@
 
   static SharedFunctionInfo** GetNextCandidateField(
       SharedFunctionInfo* candidate) {
-    Code* code = candidate->unchecked_code();
+    Code* code = candidate->code();
     return reinterpret_cast<SharedFunctionInfo**>(
-        code->address() + Code::kNextCodeFlushingCandidateOffset);
+        code->address() + Code::kGCMetadataOffset);
   }
 
   static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
-    return *GetNextCandidateField(candidate);
+    return reinterpret_cast<SharedFunctionInfo*>(
+        candidate->code()->gc_metadata());
   }
 
   static void SetNextCandidate(SharedFunctionInfo* candidate,
                                SharedFunctionInfo* next_candidate) {
-    *GetNextCandidateField(candidate) = next_candidate;
+    candidate->code()->set_gc_metadata(next_candidate);
   }
 
   Isolate* isolate_;
@@ -347,7 +903,7 @@
   // it in place to its left substring.  Return the updated value.
   //
   // Here we assume that if we change *p, we replace it with a heap object
-  // (ie, the left substring of a cons string is always a heap object).
+  // (i.e., the left substring of a cons string is always a heap object).
   //
   // The check performed is:
   //   object->IsConsString() && !object->IsSymbol() &&
@@ -355,14 +911,14 @@
   // except the maps for the object and its possible substrings might be
   // marked.
   HeapObject* object = HeapObject::cast(*p);
-  MapWord map_word = object->map_word();
-  map_word.ClearMark();
-  InstanceType type = map_word.ToMap()->instance_type();
+  if (!FLAG_clever_optimizations) return object;
+  Map* map = object->map();
+  InstanceType type = map->instance_type();
   if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
 
   Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
-  Heap* heap = map_word.ToMap()->heap();
-  if (second != heap->raw_unchecked_empty_string()) {
+  Heap* heap = map->GetHeap();
+  if (second != heap->empty_string()) {
     return object;
   }
 
@@ -404,14 +960,12 @@
                                          FixedArray::BodyDescriptor,
                                          void>::Visit);
 
+    table_.Register(kVisitGlobalContext, &VisitGlobalContext);
+
     table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
 
-    table_.Register(kVisitGlobalContext,
-                    &FixedBodyVisitor<StaticMarkingVisitor,
-                                      Context::MarkCompactBodyDescriptor,
-                                      void>::Visit);
-
     table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
+    table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
 
@@ -456,7 +1010,7 @@
   }
 
   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
-    MarkObjectByPointer(heap, p);
+    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
   }
 
   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
@@ -466,29 +1020,43 @@
       if (VisitUnmarkedObjects(heap, start, end)) return;
       // We are close to a stack overflow, so just mark the objects.
     }
-    for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
-  }
-
-  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
-      IC::Clear(rinfo->pc());
-      // Please note targets for cleared inline cached do not have to be
-      // marked since they are contained in HEAP->non_monomorphic_cache().
-    } else {
-      heap->mark_compact_collector()->MarkObject(code);
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    for (Object** p = start; p < end; p++) {
+      MarkObjectByPointer(collector, start, p);
     }
   }
 
   static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
     ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
-    Object* cell = rinfo->target_cell();
-    Object* old_cell = cell;
-    VisitPointer(heap, &cell);
-    if (cell != old_cell) {
-      rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
+    JSGlobalPropertyCell* cell =
+        JSGlobalPropertyCell::cast(rinfo->target_cell());
+    MarkBit mark = Marking::MarkBitFrom(cell);
+    heap->mark_compact_collector()->MarkObject(cell, mark);
+  }
+
+  static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
+    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    // TODO(mstarzinger): We do not short-circuit cons strings here, verify
+    // that there can be no such embedded pointers and add assertion here.
+    HeapObject* object = HeapObject::cast(rinfo->target_object());
+    heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+    MarkBit mark = Marking::MarkBitFrom(object);
+    heap->mark_compact_collector()->MarkObject(object, mark);
+  }
+
+  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
+        && (target->ic_state() == MEGAMORPHIC ||
+            heap->mark_compact_collector()->flush_monomorphic_ics_ ||
+            target->ic_age() != heap->global_ic_age())) {
+      IC::Clear(rinfo->pc());
+      target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     }
+    MarkBit code_mark = Marking::MarkBitFrom(target);
+    heap->mark_compact_collector()->MarkObject(target, code_mark);
+    heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
   }
 
   static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
@@ -496,17 +1064,21 @@
             rinfo->IsPatchedReturnSequence()) ||
            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
             rinfo->IsPatchedDebugBreakSlotSequence()));
-    HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    heap->mark_compact_collector()->MarkObject(code);
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    MarkBit code_mark = Marking::MarkBitFrom(target);
+    heap->mark_compact_collector()->MarkObject(target, code_mark);
+    heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
   }
 
   // Mark object pointed to by p.
-  INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
+  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
+                                         Object** anchor_slot,
+                                         Object** p)) {
     if (!(*p)->IsHeapObject()) return;
     HeapObject* object = ShortCircuitConsString(p);
-    if (!object->IsMarked()) {
-      heap->mark_compact_collector()->MarkUnmarkedObject(object);
-    }
+    collector->RecordSlot(anchor_slot, p, object);
+    MarkBit mark = Marking::MarkBitFrom(object);
+    collector->MarkObject(object, mark);
   }
 
 
@@ -515,12 +1087,15 @@
                                          HeapObject* obj)) {
 #ifdef DEBUG
     ASSERT(Isolate::Current()->heap()->Contains(obj));
-    ASSERT(!obj->IsMarked());
+    ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
 #endif
     Map* map = obj->map();
-    collector->SetMark(obj);
+    Heap* heap = obj->GetHeap();
+    MarkBit mark = Marking::MarkBitFrom(obj);
+    heap->mark_compact_collector()->SetMark(obj, mark);
     // Mark the map pointer and the body.
-    if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    heap->mark_compact_collector()->MarkObject(map, map_mark);
     IterateBody(map, obj);
   }
 
@@ -536,15 +1111,19 @@
     MarkCompactCollector* collector = heap->mark_compact_collector();
     // Visit the unmarked objects.
     for (Object** p = start; p < end; p++) {
-      if (!(*p)->IsHeapObject()) continue;
-      HeapObject* obj = HeapObject::cast(*p);
-      if (obj->IsMarked()) continue;
+      Object* o = *p;
+      if (!o->IsHeapObject()) continue;
+      collector->RecordSlot(start, p, o);
+      HeapObject* obj = HeapObject::cast(o);
+      MarkBit mark = Marking::MarkBitFrom(obj);
+      if (mark.Get()) continue;
       VisitUnmarkedObject(collector, obj);
     }
     return true;
   }
 
   static inline void VisitExternalReference(Address* p) { }
+  static inline void VisitExternalReference(RelocInfo* rinfo) { }
   static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
 
  private:
@@ -567,7 +1146,7 @@
                               void> StructObjectVisitor;
 
   static void VisitJSWeakMap(Map* map, HeapObject* object) {
-    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
 
     // Enqueue weak map in linked list of encountered weak maps.
@@ -578,25 +1157,40 @@
     // Skip visiting the backing hash table containing the mappings.
     int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
     BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         JSWeakMap::BodyDescriptor::kStartOffset,
         JSWeakMap::kTableOffset);
     BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         JSWeakMap::kTableOffset + kPointerSize,
         object_size);
 
     // Mark the backing hash table without pushing it on the marking stack.
-    ASSERT(!weak_map->unchecked_table()->IsMarked());
-    ASSERT(weak_map->unchecked_table()->map()->IsMarked());
-    collector->SetMark(weak_map->unchecked_table());
+    ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+    ASSERT(!MarkCompactCollector::IsMarked(table));
+    collector->SetMark(table, Marking::MarkBitFrom(table));
+    collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
+    ASSERT(MarkCompactCollector::IsMarked(table->map()));
   }
 
   static void VisitCode(Map* map, HeapObject* object) {
-    reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
-        map->heap());
+    Heap* heap = map->GetHeap();
+    Code* code = reinterpret_cast<Code*>(object);
+    if (FLAG_cleanup_code_caches_at_gc) {
+      Object* raw_info = code->type_feedback_info();
+      if (raw_info->IsTypeFeedbackInfo()) {
+        TypeFeedbackCells* type_feedback_cells =
+            TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
+        for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
+          ASSERT(type_feedback_cells->AstId(i)->IsSmi());
+          JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
+          cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+        }
+      }
+    }
+    code->CodeIterateBody<StaticMarkingVisitor>(heap);
   }
 
   // Code flushing support.
@@ -608,19 +1202,19 @@
   static const int kRegExpCodeThreshold = 5;
 
   inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
-    Object* undefined = heap->raw_unchecked_undefined_value();
+    Object* undefined = heap->undefined_value();
     return (info->script() != undefined) &&
         (reinterpret_cast<Script*>(info->script())->source() != undefined);
   }
 
 
   inline static bool IsCompiled(JSFunction* function) {
-    return function->unchecked_code() !=
+    return function->code() !=
         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
   inline static bool IsCompiled(SharedFunctionInfo* function) {
-    return function->unchecked_code() !=
+    return function->code() !=
         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
@@ -629,13 +1223,16 @@
 
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
-    if (function->unchecked_code()->IsMarked()) {
-      shared_info->set_code_age(0);
+    MarkBit code_mark = Marking::MarkBitFrom(function->code());
+    if (code_mark.Get()) {
+      if (!Marking::MarkBitFrom(shared_info).Get()) {
+        shared_info->set_code_age(0);
+      }
       return false;
     }
 
     // We do not flush code for optimized functions.
-    if (function->code() != shared_info->unchecked_code()) {
+    if (function->code() != shared_info->code()) {
       return false;
     }
 
@@ -645,8 +1242,9 @@
   inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
-    if (shared_info->unchecked_code()->IsMarked()) {
-      shared_info->set_code_age(0);
+    MarkBit code_mark =
+        Marking::MarkBitFrom(shared_info->code());
+    if (code_mark.Get()) {
       return false;
     }
 
@@ -658,9 +1256,7 @@
 
     // We never flush code for Api functions.
     Object* function_data = shared_info->function_data();
-    if (function_data->IsHeapObject() &&
-        (SafeMap(function_data)->instance_type() ==
-         FUNCTION_TEMPLATE_INFO_TYPE)) {
+    if (function_data->IsFunctionTemplateInfo()) {
       return false;
     }
 
@@ -701,40 +1297,9 @@
     return true;
   }
 
-
-  static inline Map* SafeMap(Object* obj) {
-    MapWord map_word = HeapObject::cast(obj)->map_word();
-    map_word.ClearMark();
-    map_word.ClearOverflow();
-    return map_word.ToMap();
-  }
-
-
-  static inline bool IsJSBuiltinsObject(Object* obj) {
-    return obj->IsHeapObject() &&
-        (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
-  }
-
-
   static inline bool IsValidNotBuiltinContext(Object* ctx) {
-    if (!ctx->IsHeapObject()) return false;
-
-    Map* map = SafeMap(ctx);
-    Heap* heap = map->heap();
-    if (!(map == heap->raw_unchecked_function_context_map() ||
-          map == heap->raw_unchecked_catch_context_map() ||
-          map == heap->raw_unchecked_with_context_map() ||
-          map == heap->raw_unchecked_global_context_map())) {
-      return false;
-    }
-
-    Context* context = reinterpret_cast<Context*>(ctx);
-
-    if (IsJSBuiltinsObject(context->global())) {
-      return false;
-    }
-
-    return true;
+    return ctx->IsContext() &&
+        !Context::cast(ctx)->global()->IsJSBuiltinsObject();
   }
 
 
@@ -754,17 +1319,29 @@
                                           bool is_ascii) {
     // Make sure that the fixed array is in fact initialized on the RegExp.
     // We could potentially trigger a GC when initializing the RegExp.
-    if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
+    if (HeapObject::cast(re->data())->map()->instance_type() !=
+            FIXED_ARRAY_TYPE) return;
 
     // Make sure this is a RegExp that actually contains code.
     if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
 
     Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
-    if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
+    if (!code->IsSmi() &&
+        HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
       // Save a copy that can be reinstated if we need the code again.
       re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
                              code,
                              heap);
+
+      // Saving a copy might create a pointer into compaction candidate
+      // that was not observed by marker.  This might happen if JSRegExp data
+      // was marked through the compilation cache before marker reached JSRegExp
+      // object.
+      FixedArray* data = FixedArray::cast(re->data());
+      Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
+      heap->mark_compact_collector()->
+          RecordSlot(slot, slot, code);
+
       // Set a number in the 0-255 range to guarantee no smi overflow.
       re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
                              Smi::FromInt(heap->sweep_generation() & 0xff),
@@ -796,14 +1373,14 @@
   // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
   // we flush the code.
   static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     MarkCompactCollector* collector = heap->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitJSRegExpFields(map, object);
       return;
     }
     JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
-    // Flush code or set age on both ascii and two byte code.
+    // Flush code or set age on both ASCII and two byte code.
     UpdateRegExpCodeAgeAndFlush(heap, re, true);
     UpdateRegExpCodeAgeAndFlush(heap, re, false);
     // Visit the fields of the RegExp, including the updated FixedArray.
@@ -813,7 +1390,7 @@
 
   static void VisitSharedFunctionInfoAndFlushCode(Map* map,
                                                   HeapObject* object) {
-    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitSharedFunctionInfoGeneric(map, object);
       return;
@@ -824,7 +1401,7 @@
 
   static void VisitSharedFunctionInfoAndFlushCodeGeneric(
       Map* map, HeapObject* object, bool known_flush_code_candidate) {
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
 
     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
@@ -841,18 +1418,30 @@
 
 
   static void VisitCodeEntry(Heap* heap, Address entry_address) {
-    Object* code = Code::GetObjectFromEntryAddress(entry_address);
-    Object* old_code = code;
-    VisitPointer(heap, &code);
-    if (code != old_code) {
-      Memory::Address_at(entry_address) =
-          reinterpret_cast<Code*>(code)->entry();
+    Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+    MarkBit mark = Marking::MarkBitFrom(code);
+    heap->mark_compact_collector()->MarkObject(code, mark);
+    heap->mark_compact_collector()->
+        RecordCodeEntrySlot(entry_address, code);
+  }
+
+  static void VisitGlobalContext(Map* map, HeapObject* object) {
+    FixedBodyVisitor<StaticMarkingVisitor,
+                     Context::MarkCompactBodyDescriptor,
+                     void>::Visit(map, object);
+
+    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+    for (int idx = Context::FIRST_WEAK_SLOT;
+         idx < Context::GLOBAL_CONTEXT_SLOTS;
+         ++idx) {
+      Object** slot =
+          HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
+      collector->RecordSlot(slot, slot, *slot);
     }
   }
 
-
   static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     MarkCompactCollector* collector = heap->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitJSFunction(map, object);
@@ -867,10 +1456,12 @@
     }
 
     if (!flush_code_candidate) {
-      collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
+      Code* code = jsfunction->shared()->code();
+      MarkBit code_mark = Marking::MarkBitFrom(code);
+      collector->MarkObject(code, code_mark);
 
-      if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
-        collector->MarkInlinedFunctionsCode(jsfunction->unchecked_code());
+      if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
+        collector->MarkInlinedFunctionsCode(jsfunction->code());
       }
     }
 
@@ -894,12 +1485,11 @@
   static inline void VisitJSFunctionFields(Map* map,
                                            JSFunction* object,
                                            bool flush_code_candidate) {
-    Heap* heap = map->heap();
-    MarkCompactCollector* collector = heap->mark_compact_collector();
+    Heap* heap = map->GetHeap();
 
     VisitPointers(heap,
-                  SLOT_ADDR(object, JSFunction::kPropertiesOffset),
-                  SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
+                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
+                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
 
     if (!flush_code_candidate) {
       VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
@@ -909,29 +1499,39 @@
       // Visit shared function info to avoid double checking of it's
       // flushability.
       SharedFunctionInfo* shared_info = object->unchecked_shared();
-      if (!shared_info->IsMarked()) {
+      MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
+      if (!shared_info_mark.Get()) {
         Map* shared_info_map = shared_info->map();
-        collector->SetMark(shared_info);
-        collector->MarkObject(shared_info_map);
+        MarkBit shared_info_map_mark =
+            Marking::MarkBitFrom(shared_info_map);
+        heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
+        heap->mark_compact_collector()->MarkObject(shared_info_map,
+                                                   shared_info_map_mark);
         VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
                                                    shared_info,
                                                    true);
       }
     }
 
-    VisitPointers(heap,
-                  SLOT_ADDR(object,
-                            JSFunction::kCodeEntryOffset + kPointerSize),
-                  SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
+    VisitPointers(
+        heap,
+        HeapObject::RawField(object,
+                             JSFunction::kCodeEntryOffset + kPointerSize),
+        HeapObject::RawField(object,
+                             JSFunction::kNonWeakFieldsEndOffset));
 
     // Don't visit the next function list field as it is a weak reference.
+    Object** next_function =
+        HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
+    heap->mark_compact_collector()->RecordSlot(
+        next_function, next_function, *next_function);
   }
 
   static inline void VisitJSRegExpFields(Map* map,
                                          HeapObject* object) {
     int last_property_offset =
         JSRegExp::kSize + kPointerSize * map->inobject_properties();
-    VisitPointers(map->heap(),
+    VisitPointers(map->GetHeap(),
                   SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
                   SLOT_ADDR(object, last_property_offset));
   }
@@ -1007,8 +1607,10 @@
     Object* obj = *slot;
     if (obj->IsSharedFunctionInfo()) {
       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
-      collector_->MarkObject(shared->unchecked_code());
-      collector_->MarkObject(shared);
+      MarkBit shared_mark = Marking::MarkBitFrom(shared);
+      MarkBit code_mark = Marking::MarkBitFrom(shared->code());
+      collector_->MarkObject(shared->code(), code_mark);
+      collector_->MarkObject(shared, shared_mark);
     }
   }
 
@@ -1022,16 +1624,17 @@
   // of it's code and non-optimized version of all inlined functions.
   // This is required to support bailing out from inlined code.
   DeoptimizationInputData* data =
-      reinterpret_cast<DeoptimizationInputData*>(
-          code->unchecked_deoptimization_data());
+      DeoptimizationInputData::cast(code->deoptimization_data());
 
-  FixedArray* literals = data->UncheckedLiteralArray();
+  FixedArray* literals = data->LiteralArray();
 
   for (int i = 0, count = data->InlinedFunctionCount()->value();
        i < count;
        i++) {
-    JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
-    MarkObject(inlined->unchecked_shared()->unchecked_code());
+    JSFunction* inlined = JSFunction::cast(literals->get(i));
+    Code* inlined_code = inlined->shared()->code();
+    MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
+    MarkObject(inlined_code, inlined_code_mark);
   }
 }
 
@@ -1045,7 +1648,8 @@
     // actual optimized code object.
     StackFrame* frame = it.frame();
     Code* code = frame->unchecked_code();
-    MarkObject(code);
+    MarkBit code_mark = Marking::MarkBitFrom(code);
+    MarkObject(code, code_mark);
     if (frame->is_optimized()) {
       MarkInlinedFunctionsCode(frame->LookupCode());
     }
@@ -1056,7 +1660,8 @@
 void MarkCompactCollector::PrepareForCodeFlushing() {
   ASSERT(heap() == Isolate::Current()->heap());
 
-  if (!FLAG_flush_code) {
+  // TODO(1609) Currently incremental marker does not support code flushing.
+  if (!FLAG_flush_code || was_marked_incrementally_) {
     EnableCodeFlushing(false);
     return;
   }
@@ -1068,11 +1673,14 @@
     return;
   }
 #endif
+
   EnableCodeFlushing(true);
 
   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
   // relies on it being marked before any other descriptor array.
-  MarkObject(heap()->raw_unchecked_empty_descriptor_array());
+  HeapObject* descriptor_array = heap()->empty_descriptor_array();
+  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
+  MarkObject(descriptor_array, descriptor_array_mark);
 
   // Make sure we are not referencing the code from the stack.
   ASSERT(this == heap()->mark_compact_collector());
@@ -1089,7 +1697,7 @@
   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
 
-  ProcessMarkingStack();
+  ProcessMarkingDeque();
 }
 
 
@@ -1113,19 +1721,21 @@
 
     // Replace flat cons strings in place.
     HeapObject* object = ShortCircuitConsString(p);
-    if (object->IsMarked()) return;
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) return;
 
     Map* map = object->map();
     // Mark the object.
-    collector_->SetMark(object);
+    collector_->SetMark(object, mark_bit);
 
     // Mark the map pointer and body, and push them on the marking stack.
-    collector_->MarkObject(map);
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    collector_->MarkObject(map, map_mark);
     StaticMarkingVisitor::IterateBody(map, object);
 
     // Mark all the objects reachable from the map and body.  May leave
     // overflowed objects in the heap.
-    collector_->EmptyMarkingStack();
+    collector_->EmptyMarkingDeque();
   }
 
   MarkCompactCollector* collector_;
@@ -1141,17 +1751,19 @@
   virtual void VisitPointers(Object** start, Object** end) {
     // Visit all HeapObject pointers in [start, end).
     for (Object** p = start; p < end; p++) {
-      if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
+      Object* o = *p;
+      if (o->IsHeapObject() &&
+          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
         // Check if the symbol being pruned is an external symbol. We need to
         // delete the associated external data as this symbol is going away.
 
         // Since no objects have yet been moved we can safely access the map of
         // the object.
-        if ((*p)->IsExternalString()) {
+        if (o->IsExternalString()) {
           heap_->FinalizeExternalString(String::cast(*p));
         }
-        // Set the entry to null_value (as deleted).
-        *p = heap_->raw_unchecked_null_value();
+        // Set the entry to the_hole_value (as deleted).
+        *p = heap_->the_hole_value();
         pointers_removed_++;
       }
     }
@@ -1172,8 +1784,7 @@
 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
  public:
   virtual Object* RetainAs(Object* object) {
-    MapWord first_word = HeapObject::cast(object)->map_word();
-    if (first_word.IsMarked()) {
+    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
       return object;
     } else {
       return NULL;
@@ -1182,28 +1793,24 @@
 };
 
 
-void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
-  ASSERT(!object->IsMarked());
+void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
+  ASSERT(IsMarked(object));
   ASSERT(HEAP->Contains(object));
   if (object->IsMap()) {
     Map* map = Map::cast(object);
-    if (FLAG_cleanup_code_caches_at_gc) {
-      map->ClearCodeCache(heap());
-    }
-    SetMark(map);
+    heap_->ClearCacheOnMap(map);
 
     // When map collection is enabled we have to mark through map's transitions
     // in a special way to make transition links weak.
     // Only maps for subclasses of JSReceiver can have transitions.
     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-    if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+    if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
       MarkMapContents(map);
     } else {
-      marking_stack_.Push(map);
+      marking_deque_.PushBlack(map);
     }
   } else {
-    SetMark(object);
-    marking_stack_.Push(object);
+    marking_deque_.PushBlack(object);
   }
 }
 
@@ -1212,12 +1819,17 @@
   // Mark prototype transitions array but don't push it into marking stack.
   // This will make references from it weak. We will clean dead prototype
   // transitions in ClearNonLiveTransitions.
-  FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
-  if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
+  FixedArray* prototype_transitions = map->prototype_transitions();
+  MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
+  if (!mark.Get()) {
+    mark.Set();
+    MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
+                                          prototype_transitions->Size());
+  }
 
-  Object* raw_descriptor_array =
-      *HeapObject::RawField(map,
-                            Map::kInstanceDescriptorsOrBitField3Offset);
+  Object** raw_descriptor_array_slot =
+      HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
+  Object* raw_descriptor_array = *raw_descriptor_array_slot;
   if (!raw_descriptor_array->IsSmi()) {
     MarkDescriptorArray(
         reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
@@ -1231,24 +1843,36 @@
 
   Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
 
-  StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
+  StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
+}
+
+
+void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
+                                                int offset) {
+  Object** slot = HeapObject::RawField(accessors, offset);
+  HeapObject* accessor = HeapObject::cast(*slot);
+  if (accessor->IsMap()) return;
+  RecordSlot(slot, slot, accessor);
+  MarkObjectAndPush(accessor);
 }
 
 
 void MarkCompactCollector::MarkDescriptorArray(
     DescriptorArray* descriptors) {
-  if (descriptors->IsMarked()) return;
+  MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
+  if (descriptors_mark.Get()) return;
   // Empty descriptor array is marked as a root before any maps are marked.
-  ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
-  SetMark(descriptors);
+  ASSERT(descriptors != heap()->empty_descriptor_array());
+  SetMark(descriptors, descriptors_mark);
 
   FixedArray* contents = reinterpret_cast<FixedArray*>(
       descriptors->get(DescriptorArray::kContentArrayIndex));
   ASSERT(contents->IsHeapObject());
-  ASSERT(!contents->IsMarked());
+  ASSERT(!IsMarked(contents));
   ASSERT(contents->IsFixedArray());
   ASSERT(contents->length() >= 2);
-  SetMark(contents);
+  MarkBit contents_mark = Marking::MarkBitFrom(contents);
+  SetMark(contents, contents_mark);
   // Contents contains (value, details) pairs.  If the details say that the type
   // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
   // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
@@ -1258,27 +1882,54 @@
     // If the pair (value, details) at index i, i+1 is not
     // a transition or null descriptor, mark the value.
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
-    if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
-      HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
-      if (object->IsHeapObject() && !object->IsMarked()) {
-        SetMark(object);
-        marking_stack_.Push(object);
-      }
+
+    Object** slot = contents->data_start() + i;
+    if (!(*slot)->IsHeapObject()) continue;
+    HeapObject* value = HeapObject::cast(*slot);
+
+    RecordSlot(slot, slot, *slot);
+
+    switch (details.type()) {
+      case NORMAL:
+      case FIELD:
+      case CONSTANT_FUNCTION:
+      case HANDLER:
+      case INTERCEPTOR:
+        MarkObjectAndPush(value);
+        break;
+      case CALLBACKS:
+        if (!value->IsAccessorPair()) {
+          MarkObjectAndPush(value);
+        } else if (!MarkObjectWithoutPush(value)) {
+          MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
+          MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
+        }
+        break;
+      case ELEMENTS_TRANSITION:
+        // For maps with multiple elements transitions, the transition maps are
+        // stored in a FixedArray. Keep the fixed array alive but not the maps
+        // that it refers to.
+        if (value->IsFixedArray()) MarkObjectWithoutPush(value);
+        break;
+      case MAP_TRANSITION:
+      case CONSTANT_TRANSITION:
+      case NULL_DESCRIPTOR:
+        break;
     }
   }
   // The DescriptorArray descriptors contains a pointer to its contents array,
   // but the contents array is already marked.
-  marking_stack_.Push(descriptors);
+  marking_deque_.PushBlack(descriptors);
 }
 
 
 void MarkCompactCollector::CreateBackPointers() {
   HeapObjectIterator iterator(heap()->map_space());
-  for (HeapObject* next_object = iterator.next();
-       next_object != NULL; next_object = iterator.next()) {
-    if (next_object->IsMap()) {  // Could also be ByteArray on free list.
+  for (HeapObject* next_object = iterator.Next();
+       next_object != NULL; next_object = iterator.Next()) {
+    if (next_object->IsMap()) {  // Could also be FreeSpace object on free list.
       Map* map = Map::cast(next_object);
-      STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+      STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
       if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
         map->CreateBackPointers();
       } else {
@@ -1289,54 +1940,126 @@
 }
 
 
-static int OverflowObjectSize(HeapObject* obj) {
-  // Recover the normal map pointer, it might be marked as live and
-  // overflowed.
-  MapWord map_word = obj->map_word();
-  map_word.ClearMark();
-  map_word.ClearOverflow();
-  return obj->SizeFromMap(map_word.ToMap());
+// Fill the marking stack with overflowed objects returned by the given
+// iterator.  Stop when the marking stack is filled or the end of the space
+// is reached, whichever comes first.
+template<class T>
+static void DiscoverGreyObjectsWithIterator(Heap* heap,
+                                            MarkingDeque* marking_deque,
+                                            T* it) {
+  // The caller should ensure that the marking stack is initially not full,
+  // so that we don't waste effort pointlessly scanning for objects.
+  ASSERT(!marking_deque->IsFull());
+
+  Map* filler_map = heap->one_pointer_filler_map();
+  for (HeapObject* object = it->Next();
+       object != NULL;
+       object = it->Next()) {
+    MarkBit markbit = Marking::MarkBitFrom(object);
+    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
+      Marking::GreyToBlack(markbit);
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
+      marking_deque->PushBlack(object);
+      if (marking_deque->IsFull()) return;
+    }
+  }
 }
 
 
-class OverflowedObjectsScanner : public AllStatic {
- public:
-  // Fill the marking stack with overflowed objects returned by the given
-  // iterator.  Stop when the marking stack is filled or the end of the space
-  // is reached, whichever comes first.
-  template<class T>
-  static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
-                                           T* it) {
-    // The caller should ensure that the marking stack is initially not full,
-    // so that we don't waste effort pointlessly scanning for objects.
-    ASSERT(!collector->marking_stack_.is_full());
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
 
-    for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
-      if (object->IsOverflowed()) {
-        object->ClearOverflow();
-        ASSERT(object->IsMarked());
-        ASSERT(HEAP->Contains(object));
-        collector->marking_stack_.Push(object);
-        if (collector->marking_stack_.is_full()) return;
-      }
+
+static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  MarkBit::CellType* cells = p->markbits()->cells();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  Address cell_base = p->area_start();
+  int cell_index = Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(cell_base)));
+
+
+  for (;
+       cell_index < last_cell_index;
+       cell_index++, cell_base += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(cell_base))));
+
+    const MarkBit::CellType current_cell = cells[cell_index];
+    if (current_cell == 0) continue;
+
+    const MarkBit::CellType next_cell = cells[cell_index + 1];
+    MarkBit::CellType grey_objects = current_cell &
+        ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
+
+    int offset = 0;
+    while (grey_objects != 0) {
+      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
+      grey_objects >>= trailing_zeros;
+      offset += trailing_zeros;
+      MarkBit markbit(&cells[cell_index], 1 << offset, false);
+      ASSERT(Marking::IsGrey(markbit));
+      Marking::GreyToBlack(markbit);
+      Address addr = cell_base + offset * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(addr);
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
+      marking_deque->PushBlack(object);
+      if (marking_deque->IsFull()) return;
+      offset += 2;
+      grey_objects >>= 2;
+    }
+
+    grey_objects >>= (Bitmap::kBitsPerCell - 1);
+  }
+}
+
+
+static void DiscoverGreyObjectsInSpace(Heap* heap,
+                                       MarkingDeque* marking_deque,
+                                       PagedSpace* space) {
+  if (!space->was_swept_conservatively()) {
+    HeapObjectIterator it(space);
+    DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
+  } else {
+    PageIterator it(space);
+    while (it.has_next()) {
+      Page* p = it.next();
+      DiscoverGreyObjectsOnPage(marking_deque, p);
+      if (marking_deque->IsFull()) return;
     }
   }
-};
+}
 
 
 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
-  return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
+  Object* o = *p;
+  if (!o->IsHeapObject()) return false;
+  HeapObject* heap_object = HeapObject::cast(o);
+  MarkBit mark = Marking::MarkBitFrom(heap_object);
+  return !mark.Get();
 }
 
 
 void MarkCompactCollector::MarkSymbolTable() {
-  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+  SymbolTable* symbol_table = heap()->symbol_table();
   // Mark the symbol table itself.
-  SetMark(symbol_table);
+  MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
+  SetMark(symbol_table, symbol_table_mark);
   // Explicitly mark the prefix.
   MarkingVisitor marker(heap());
   symbol_table->IteratePrefix(&marker);
-  ProcessMarkingStack();
+  ProcessMarkingDeque();
 }
 
 
@@ -1349,9 +2072,9 @@
   MarkSymbolTable();
 
   // There may be overflowed objects in the heap.  Visit them now.
-  while (marking_stack_.overflowed()) {
-    RefillMarkingStack();
-    EmptyMarkingStack();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
   }
 }
 
@@ -1369,9 +2092,13 @@
     bool group_marked = false;
     for (size_t j = 0; j < entry->length_; j++) {
       Object* object = *objects[j];
-      if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
-        group_marked = true;
-        break;
+      if (object->IsHeapObject()) {
+        HeapObject* heap_object = HeapObject::cast(object);
+        MarkBit mark = Marking::MarkBitFrom(heap_object);
+        if (mark.Get()) {
+          group_marked = true;
+          break;
+        }
       }
     }
 
@@ -1380,17 +2107,21 @@
       continue;
     }
 
-    // An object in the group is marked, so mark all heap objects in
-    // the group.
+    // An object in the group is marked, so mark as grey all white heap
+    // objects in the group.
     for (size_t j = 0; j < entry->length_; ++j) {
-      if ((*objects[j])->IsHeapObject()) {
-        MarkObject(HeapObject::cast(*objects[j]));
+      Object* object = *objects[j];
+      if (object->IsHeapObject()) {
+        HeapObject* heap_object = HeapObject::cast(object);
+        MarkBit mark = Marking::MarkBitFrom(heap_object);
+        MarkObject(heap_object, mark);
       }
     }
 
-    // Once the entire group has been marked, dispose it because it's
-    // not needed anymore.
+    // Once the entire group has been colored grey, set the object group
+    // to NULL so it won't be processed again.
     entry->Dispose();
+    object_groups->at(i) = NULL;
   }
   object_groups->Rewind(last);
 }
@@ -1405,7 +2136,7 @@
     ImplicitRefGroup* entry = ref_groups->at(i);
     ASSERT(entry != NULL);
 
-    if (!(*entry->parent_)->IsMarked()) {
+    if (!IsMarked(*entry->parent_)) {
       (*ref_groups)[last++] = entry;
       continue;
     }
@@ -1414,7 +2145,9 @@
     // A parent object is marked, so mark all child heap objects.
     for (size_t j = 0; j < entry->length_; ++j) {
       if ((*children[j])->IsHeapObject()) {
-        MarkObject(HeapObject::cast(*children[j]));
+        HeapObject* child = HeapObject::cast(*children[j]);
+        MarkBit mark = Marking::MarkBitFrom(child);
+        MarkObject(child, mark);
       }
     }
 
@@ -1430,21 +2163,17 @@
 // Before: the marking stack contains zero or more heap object pointers.
 // After: the marking stack is empty, and all objects reachable from the
 // marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingStack() {
-  while (!marking_stack_.is_empty()) {
-    while (!marking_stack_.is_empty()) {
-      HeapObject* object = marking_stack_.Pop();
+void MarkCompactCollector::EmptyMarkingDeque() {
+  while (!marking_deque_.IsEmpty()) {
+    while (!marking_deque_.IsEmpty()) {
+      HeapObject* object = marking_deque_.Pop();
       ASSERT(object->IsHeapObject());
       ASSERT(heap()->Contains(object));
-      ASSERT(object->IsMarked());
-      ASSERT(!object->IsOverflowed());
+      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
 
-      // Because the object is marked, we have to recover the original map
-      // pointer and use it to mark the object's body.
-      MapWord map_word = object->map_word();
-      map_word.ClearMark();
-      Map* map = map_word.ToMap();
-      MarkObject(map);
+      Map* map = object->map();
+      MarkBit map_mark = Marking::MarkBitFrom(map);
+      MarkObject(map, map_mark);
 
       StaticMarkingVisitor::IterateBody(map, object);
     }
@@ -1461,39 +2190,45 @@
 // before sweeping completes.  If sweeping completes, there are no remaining
 // overflowed objects in the heap so the overflow flag on the markings stack
 // is cleared.
-void MarkCompactCollector::RefillMarkingStack() {
-  ASSERT(marking_stack_.overflowed());
+void MarkCompactCollector::RefillMarkingDeque() {
+  ASSERT(marking_deque_.overflowed());
 
-  SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
-  if (marking_stack_.is_full()) return;
+  SemiSpaceIterator new_it(heap()->new_space());
+  DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
-                                    &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->old_pointer_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->old_data_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->code_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->map_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->cell_space());
+  if (marking_deque_.IsFull()) return;
 
-  LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
-  if (marking_stack_.is_full()) return;
+  LargeObjectIterator lo_it(heap()->lo_space());
+  DiscoverGreyObjectsWithIterator(heap(),
+                                  &marking_deque_,
+                                  &lo_it);
+  if (marking_deque_.IsFull()) return;
 
-  marking_stack_.clear_overflowed();
+  marking_deque_.ClearOverflowed();
 }
 
 
@@ -1501,23 +2236,23 @@
 // stack.  Before: the marking stack contains zero or more heap object
 // pointers.  After: the marking stack is empty and there are no overflowed
 // objects in the heap.
-void MarkCompactCollector::ProcessMarkingStack() {
-  EmptyMarkingStack();
-  while (marking_stack_.overflowed()) {
-    RefillMarkingStack();
-    EmptyMarkingStack();
+void MarkCompactCollector::ProcessMarkingDeque() {
+  EmptyMarkingDeque();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
   }
 }
 
 
 void MarkCompactCollector::ProcessExternalMarking() {
   bool work_to_do = true;
-  ASSERT(marking_stack_.is_empty());
+  ASSERT(marking_deque_.IsEmpty());
   while (work_to_do) {
     MarkObjectGroups();
     MarkImplicitRefGroups();
-    work_to_do = !marking_stack_.is_empty();
-    ProcessMarkingStack();
+    work_to_do = !marking_deque_.IsEmpty();
+    ProcessMarkingDeque();
   }
 }
 
@@ -1529,19 +2264,64 @@
   // with the C stack limit check.
   PostponeInterruptsScope postpone(heap()->isolate());
 
+  bool incremental_marking_overflowed = false;
+  IncrementalMarking* incremental_marking = heap_->incremental_marking();
+  if (was_marked_incrementally_) {
+    // Finalize the incremental marking and check whether we had an overflow.
+    // Both markers use grey color to mark overflowed objects so
+    // non-incremental marker can deal with them as if overflow
+    // occured during normal marking.
+    // But incremental marker uses a separate marking deque
+    // so we have to explicitly copy it's overflow state.
+    incremental_marking->Finalize();
+    incremental_marking_overflowed =
+        incremental_marking->marking_deque()->overflowed();
+    incremental_marking->marking_deque()->ClearOverflowed();
+  } else {
+    // Abort any pending incremental activities e.g. incremental sweeping.
+    incremental_marking->Abort();
+  }
+
 #ifdef DEBUG
   ASSERT(state_ == PREPARE_GC);
   state_ = MARK_LIVE_OBJECTS;
 #endif
-  // The to space contains live objects, the from space is used as a marking
-  // stack.
-  marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
-                            heap()->new_space()->FromSpaceHigh());
+  // The to space contains live objects, a page in from space is used as a
+  // marking stack.
+  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
+  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
+  if (FLAG_force_marking_deque_overflows) {
+    marking_deque_end = marking_deque_start + 64 * kPointerSize;
+  }
+  marking_deque_.Initialize(marking_deque_start,
+                            marking_deque_end);
+  ASSERT(!marking_deque_.overflowed());
 
-  ASSERT(!marking_stack_.overflowed());
+  if (incremental_marking_overflowed) {
+    // There are overflowed objects left in the heap after incremental marking.
+    marking_deque_.SetOverflowed();
+  }
 
   PrepareForCodeFlushing();
 
+  if (was_marked_incrementally_) {
+    // There is no write barrier on cells so we have to scan them now at the end
+    // of the incremental marking.
+    {
+      HeapObjectIterator cell_iterator(heap()->cell_space());
+      HeapObject* cell;
+      while ((cell = cell_iterator.Next()) != NULL) {
+        ASSERT(cell->IsJSGlobalPropertyCell());
+        if (IsMarked(cell)) {
+          int offset = JSGlobalPropertyCell::kValueOffset;
+          StaticMarkingVisitor::VisitPointer(
+              heap(),
+              reinterpret_cast<Object**>(cell->address() + offset));
+        }
+      }
+    }
+  }
+
   RootMarkingVisitor root_visitor(heap());
   MarkRoots(&root_visitor);
 
@@ -1560,15 +2340,20 @@
       &IsUnmarkedHeapObject);
   // Then we mark the objects and process the transitive closure.
   heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
-  while (marking_stack_.overflowed()) {
-    RefillMarkingStack();
-    EmptyMarkingStack();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
   }
 
   // Repeat host application specific marking to mark unmarked objects
   // reachable from the weak roots.
   ProcessExternalMarking();
 
+  AfterMarking();
+}
+
+
+void MarkCompactCollector::AfterMarking() {
   // Object literal map caches reference symbols (cache keys) and maps
   // (cache values). At this point still useful maps have already been
   // marked. Mark the keys for the alive values before we process the
@@ -1578,7 +2363,7 @@
   // Prune the symbol table removing all symbols only pointed to by the
   // symbol table.  Cannot use symbol_table() here because the symbol
   // table is marked.
-  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+  SymbolTable* symbol_table = heap()->symbol_table();
   SymbolTableCleaner v(heap());
   symbol_table->IterateElements(&v);
   symbol_table->ElementsRemoved(v.PointersRemoved());
@@ -1598,8 +2383,10 @@
     code_flusher_->ProcessCandidates();
   }
 
-  // Clean up dead objects from the runtime profiler.
-  heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
+  if (!FLAG_watch_ic_patching) {
+    // Clean up dead objects from the runtime profiler.
+    heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
+  }
 }
 
 
@@ -1607,13 +2394,13 @@
   Object* raw_context = heap()->global_contexts_list_;
   while (raw_context != heap()->undefined_value()) {
     Context* context = reinterpret_cast<Context*>(raw_context);
-    if (context->IsMarked()) {
+    if (IsMarked(context)) {
       HeapObject* raw_map_cache =
           HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
       // A map cache may be reachable from the stack. In this case
       // it's already transitively marked and it's too late to clean
       // up its parts.
-      if (!raw_map_cache->IsMarked() &&
+      if (!IsMarked(raw_map_cache) &&
           raw_map_cache != heap()->undefined_value()) {
         MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
         int existing_elements = map_cache->NumberOfElements();
@@ -1623,17 +2410,16 @@
              i += MapCache::kEntrySize) {
           Object* raw_key = map_cache->get(i);
           if (raw_key == heap()->undefined_value() ||
-              raw_key == heap()->null_value()) continue;
+              raw_key == heap()->the_hole_value()) continue;
           STATIC_ASSERT(MapCache::kEntrySize == 2);
           Object* raw_map = map_cache->get(i + 1);
-          if (raw_map->IsHeapObject() &&
-              HeapObject::cast(raw_map)->IsMarked()) {
+          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
             ++used_elements;
           } else {
             // Delete useless entries with unmarked maps.
             ASSERT(raw_map->IsMap());
-            map_cache->set_null_unchecked(heap(), i);
-            map_cache->set_null_unchecked(heap(), i + 1);
+            map_cache->set_the_hole(i);
+            map_cache->set_the_hole(i + 1);
           }
         }
         if (used_elements == 0) {
@@ -1643,64 +2429,38 @@
           // extra complexity during GC. We rely on subsequent cache
           // usages (EnsureCapacity) to do this.
           map_cache->ElementsRemoved(existing_elements - used_elements);
-          MarkObject(map_cache);
+          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
+          MarkObject(map_cache, map_cache_markbit);
         }
       }
     }
     // Move to next element in the list.
     raw_context = context->get(Context::NEXT_CONTEXT_LINK);
   }
-  ProcessMarkingStack();
+  ProcessMarkingDeque();
 }
 
 
-#ifdef DEBUG
-void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
-  live_bytes_ += obj->Size();
-  if (heap()->new_space()->Contains(obj)) {
-    live_young_objects_size_ += obj->Size();
-  } else if (heap()->map_space()->Contains(obj)) {
-    ASSERT(obj->IsMap());
-    live_map_objects_size_ += obj->Size();
-  } else if (heap()->cell_space()->Contains(obj)) {
-    ASSERT(obj->IsJSGlobalPropertyCell());
-    live_cell_objects_size_ += obj->Size();
-  } else if (heap()->old_pointer_space()->Contains(obj)) {
-    live_old_pointer_objects_size_ += obj->Size();
-  } else if (heap()->old_data_space()->Contains(obj)) {
-    live_old_data_objects_size_ += obj->Size();
-  } else if (heap()->code_space()->Contains(obj)) {
-    live_code_objects_size_ += obj->Size();
-  } else if (heap()->lo_space()->Contains(obj)) {
-    live_lo_objects_size_ += obj->Size();
-  } else {
-    UNREACHABLE();
+void MarkCompactCollector::ReattachInitialMaps() {
+  HeapObjectIterator map_iterator(heap()->map_space());
+  for (HeapObject* obj = map_iterator.Next();
+       obj != NULL;
+       obj = map_iterator.Next()) {
+    if (obj->IsFreeSpace()) continue;
+    Map* map = Map::cast(obj);
+
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
+
+    if (map->attached_to_shared_function_info()) {
+      JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
+    }
   }
 }
-#endif  // DEBUG
-
-
-void MarkCompactCollector::SweepLargeObjectSpace() {
-#ifdef DEBUG
-  ASSERT(state_ == MARK_LIVE_OBJECTS);
-  state_ =
-      compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
-#endif
-  // Deallocate unmarked objects and clear marked bits for marked objects.
-  heap()->lo_space()->FreeUnmarkedObjects();
-}
-
-
-// Safe to use during marking phase only.
-bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
-  MapWord metamap = object->map_word();
-  metamap.ClearMark();
-  return metamap.ToMap()->instance_type() == MAP_TYPE;
-}
 
 
 void MarkCompactCollector::ClearNonLiveTransitions() {
-  HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
+  HeapObjectIterator map_iterator(heap()->map_space());
   // Iterate over the map space, setting map transitions that go from
   // a marked map to an unmarked map to null transitions.  At the same time,
   // set all the prototype fields of maps back to their original value,
@@ -1711,96 +2471,111 @@
   // scan the descriptor arrays of those maps, not all maps.
   // All of these actions are carried out only on maps of JSObjects
   // and related subtypes.
-  for (HeapObject* obj = map_iterator.next();
-       obj != NULL; obj = map_iterator.next()) {
+  for (HeapObject* obj = map_iterator.Next();
+       obj != NULL; obj = map_iterator.Next()) {
     Map* map = reinterpret_cast<Map*>(obj);
-    if (!map->IsMarked() && map->IsByteArray()) continue;
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    if (map->IsFreeSpace()) continue;
 
-    ASSERT(SafeIsMap(map));
+    ASSERT(map->IsMap());
     // Only JSObject and subtypes have map transitions and back pointers.
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+    if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
 
-    if (map->IsMarked() && map->attached_to_shared_function_info()) {
+    if (map_mark.Get() &&
+        map->attached_to_shared_function_info()) {
       // This map is used for inobject slack tracking and has been detached
       // from SharedFunctionInfo during the mark phase.
       // Since it survived the GC, reattach it now.
       map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
     }
 
-    // Clear dead prototype transitions.
-    int number_of_transitions = map->NumberOfProtoTransitions();
-    if (number_of_transitions > 0) {
-      FixedArray* prototype_transitions =
-          map->unchecked_prototype_transitions();
-      int new_number_of_transitions = 0;
-      const int header = Map::kProtoTransitionHeaderSize;
-      const int proto_offset =
-          header + Map::kProtoTransitionPrototypeOffset;
-      const int map_offset = header + Map::kProtoTransitionMapOffset;
-      const int step = Map::kProtoTransitionElementsPerEntry;
-      for (int i = 0; i < number_of_transitions; i++) {
-        Object* prototype = prototype_transitions->get(proto_offset + i * step);
-        Object* cached_map = prototype_transitions->get(map_offset + i * step);
-        if (HeapObject::cast(prototype)->IsMarked() &&
-            HeapObject::cast(cached_map)->IsMarked()) {
-          if (new_number_of_transitions != i) {
-            prototype_transitions->set_unchecked(
-                heap_,
-                proto_offset + new_number_of_transitions * step,
-                prototype,
-                UPDATE_WRITE_BARRIER);
-            prototype_transitions->set_unchecked(
-                heap_,
-                map_offset + new_number_of_transitions * step,
-                cached_map,
-                SKIP_WRITE_BARRIER);
-          }
-          new_number_of_transitions++;
-        }
-      }
+    ClearNonLivePrototypeTransitions(map);
+    ClearNonLiveMapTransitions(map, map_mark);
+  }
+}
 
-      // Fill slots that became free with undefined value.
-      Object* undefined = heap()->raw_unchecked_undefined_value();
-      for (int i = new_number_of_transitions * step;
-           i < number_of_transitions * step;
-           i++) {
-        prototype_transitions->set_unchecked(heap_,
-                                             header + i,
-                                             undefined,
-                                             SKIP_WRITE_BARRIER);
+
+void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
+  int number_of_transitions = map->NumberOfProtoTransitions();
+  FixedArray* prototype_transitions = map->prototype_transitions();
+
+  int new_number_of_transitions = 0;
+  const int header = Map::kProtoTransitionHeaderSize;
+  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
+  const int map_offset = header + Map::kProtoTransitionMapOffset;
+  const int step = Map::kProtoTransitionElementsPerEntry;
+  for (int i = 0; i < number_of_transitions; i++) {
+    Object* prototype = prototype_transitions->get(proto_offset + i * step);
+    Object* cached_map = prototype_transitions->get(map_offset + i * step);
+    if (IsMarked(prototype) && IsMarked(cached_map)) {
+      int proto_index = proto_offset + new_number_of_transitions * step;
+      int map_index = map_offset + new_number_of_transitions * step;
+      if (new_number_of_transitions != i) {
+        prototype_transitions->set_unchecked(
+            heap_,
+            proto_index,
+            prototype,
+            UPDATE_WRITE_BARRIER);
+        prototype_transitions->set_unchecked(
+            heap_,
+            map_index,
+            cached_map,
+            SKIP_WRITE_BARRIER);
       }
-      map->SetNumberOfProtoTransitions(new_number_of_transitions);
+      Object** slot =
+          HeapObject::RawField(prototype_transitions,
+                               FixedArray::OffsetOfElementAt(proto_index));
+      RecordSlot(slot, slot, prototype);
+      new_number_of_transitions++;
+    }
+  }
+
+  if (new_number_of_transitions != number_of_transitions) {
+    map->SetNumberOfProtoTransitions(new_number_of_transitions);
+  }
+
+  // Fill slots that became free with undefined value.
+  for (int i = new_number_of_transitions * step;
+       i < number_of_transitions * step;
+       i++) {
+    prototype_transitions->set_undefined(heap_, header + i);
+  }
+}
+
+
+void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
+                                                      MarkBit map_mark) {
+  // Follow the chain of back pointers to find the prototype.
+  Object* real_prototype = map;
+  while (real_prototype->IsMap()) {
+    real_prototype = Map::cast(real_prototype)->prototype();
+    ASSERT(real_prototype->IsHeapObject());
+  }
+
+  // Follow back pointers, setting them to prototype, clearing map transitions
+  // when necessary.
+  Map* current = map;
+  bool current_is_alive = map_mark.Get();
+  bool on_dead_path = !current_is_alive;
+  while (current->IsMap()) {
+    Object* next = current->prototype();
+    // There should never be a dead map above a live map.
+    ASSERT(on_dead_path || current_is_alive);
+
+    // A live map above a dead map indicates a dead transition. This test will
+    // always be false on the first iteration.
+    if (on_dead_path && current_is_alive) {
+      on_dead_path = false;
+      current->ClearNonLiveTransitions(heap(), real_prototype);
     }
 
-    // Follow the chain of back pointers to find the prototype.
-    Map* current = map;
-    while (SafeIsMap(current)) {
-      current = reinterpret_cast<Map*>(current->prototype());
-      ASSERT(current->IsHeapObject());
-    }
-    Object* real_prototype = current;
+    Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
+    *slot = real_prototype;
+    if (current_is_alive) RecordSlot(slot, slot, real_prototype);
 
-    // Follow back pointers, setting them to prototype,
-    // clearing map transitions when necessary.
-    current = map;
-    bool on_dead_path = !current->IsMarked();
-    Object* next;
-    while (SafeIsMap(current)) {
-      next = current->prototype();
-      // There should never be a dead map above a live map.
-      ASSERT(on_dead_path || current->IsMarked());
-
-      // A live map above a dead map indicates a dead transition.
-      // This test will always be false on the first iteration.
-      if (on_dead_path && current->IsMarked()) {
-        on_dead_path = false;
-        current->ClearNonLiveTransitions(heap(), real_prototype);
-      }
-      *HeapObject::RawField(current, Map::kPrototypeOffset) =
-          real_prototype;
-      current = reinterpret_cast<Map*>(next);
-    }
+    current = reinterpret_cast<Map*>(next);
+    current_is_alive = Marking::MarkBitFrom(current).Get();
   }
 }
 
@@ -1808,13 +2583,13 @@
 void MarkCompactCollector::ProcessWeakMaps() {
   Object* weak_map_obj = encountered_weak_maps();
   while (weak_map_obj != Smi::FromInt(0)) {
-    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
+    ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
-    ObjectHashTable* table = weak_map->unchecked_table();
+    ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
     for (int i = 0; i < table->Capacity(); i++) {
-      if (HeapObject::cast(table->KeyAt(i))->IsMarked()) {
+      if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
         Object* value = table->get(table->EntryToValueIndex(i));
-        StaticMarkingVisitor::MarkObjectByPointer(heap(), &value);
+        StaticMarkingVisitor::VisitPointer(heap(), &value);
         table->set_unchecked(heap(),
                              table->EntryToValueIndex(i),
                              value,
@@ -1829,12 +2604,12 @@
 void MarkCompactCollector::ClearWeakMaps() {
   Object* weak_map_obj = encountered_weak_maps();
   while (weak_map_obj != Smi::FromInt(0)) {
-    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
+    ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
-    ObjectHashTable* table = weak_map->unchecked_table();
+    ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
     for (int i = 0; i < table->Capacity(); i++) {
-      if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) {
-        table->RemoveEntry(i, heap());
+      if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+        table->RemoveEntry(i);
       }
     }
     weak_map_obj = weak_map->next();
@@ -1843,316 +2618,97 @@
   set_encountered_weak_maps(Smi::FromInt(0));
 }
 
-// -------------------------------------------------------------------------
-// Phase 2: Encode forwarding addresses.
-// When compacting, forwarding addresses for objects in old space and map
-// space are encoded in their map pointer word (along with an encoding of
-// their map pointers).
-//
-// The excact encoding is described in the comments for class MapWord in
-// objects.h.
-//
-// An address range [start, end) can have both live and non-live objects.
-// Maximal non-live regions are marked so they can be skipped on subsequent
-// sweeps of the heap.  A distinguished map-pointer encoding is used to mark
-// free regions of one-word size (in which case the next word is the start
-// of a live object).  A second distinguished map-pointer encoding is used
-// to mark free regions larger than one word, and the size of the free
-// region (including the first word) is written to the second word of the
-// region.
-//
-// Any valid map page offset must lie in the object area of the page, so map
-// page offsets less than Page::kObjectStartOffset are invalid.  We use a
-// pair of distinguished invalid map encodings (for single word and multiple
-// words) to indicate free regions in the page found during computation of
-// forwarding addresses and skipped over in subsequent sweeps.
-
-
-// Encode a free region, defined by the given start address and size, in the
-// first word or two of the region.
-void EncodeFreeRegion(Address free_start, int free_size) {
-  ASSERT(free_size >= kIntSize);
-  if (free_size == kIntSize) {
-    Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
-  } else {
-    ASSERT(free_size >= 2 * kIntSize);
-    Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
-    Memory::int_at(free_start + kIntSize) = free_size;
-  }
-
-#ifdef DEBUG
-  // Zap the body of the free region.
-  if (FLAG_enable_slow_asserts) {
-    for (int offset = 2 * kIntSize;
-         offset < free_size;
-         offset += kPointerSize) {
-      Memory::Address_at(free_start + offset) = kZapValue;
-    }
-  }
-#endif
-}
-
-
-// Try to promote all objects in new space.  Heap numbers and sequential
-// strings are promoted to the code space, large objects to large object space,
-// and all others to the old space.
-inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
-                                           HeapObject* object,
-                                           int object_size) {
-  MaybeObject* forwarded;
-  if (object_size > heap->MaxObjectSizeInPagedSpace()) {
-    forwarded = Failure::Exception();
-  } else {
-    OldSpace* target_space = heap->TargetSpace(object);
-    ASSERT(target_space == heap->old_pointer_space() ||
-           target_space == heap->old_data_space());
-    forwarded = target_space->MCAllocateRaw(object_size);
-  }
-  Object* result;
-  if (!forwarded->ToObject(&result)) {
-    result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
-  }
-  return result;
-}
-
-
-// Allocation functions for the paged spaces call the space's MCAllocateRaw.
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
-    Heap *heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->old_pointer_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
-    Heap* heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->old_data_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
-    Heap* heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->code_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
-    Heap* heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->map_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
-    Heap* heap, HeapObject* ignore, int object_size) {
-  return heap->cell_space()->MCAllocateRaw(object_size);
-}
-
-
-// The forwarding address is encoded at the same offset as the current
-// to-space object, but in from space.
-inline void EncodeForwardingAddressInNewSpace(Heap* heap,
-                                              HeapObject* old_object,
-                                              int object_size,
-                                              Object* new_object,
-                                              int* ignored) {
-  int offset =
-      heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
-  Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
-      HeapObject::cast(new_object)->address();
-}
-
-
-// The forwarding address is encoded in the map pointer of the object as an
-// offset (in terms of live bytes) from the address of the first live object
-// in the page.
-inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
-                                                HeapObject* old_object,
-                                                int object_size,
-                                                Object* new_object,
-                                                int* offset) {
-  // Record the forwarding address of the first live object if necessary.
-  if (*offset == 0) {
-    Page::FromAddress(old_object->address())->mc_first_forwarded =
-        HeapObject::cast(new_object)->address();
-  }
-
-  MapWord encoding =
-      MapWord::EncodeAddress(old_object->map()->address(), *offset);
-  old_object->set_map_word(encoding);
-  *offset += object_size;
-  ASSERT(*offset <= Page::kObjectAreaSize);
-}
-
-
-// Most non-live objects are ignored.
-inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
-
-
-// Function template that, given a range of addresses (eg, a semispace or a
-// paged space page), iterates through the objects in the range to clear
-// mark bits and compute and encode forwarding addresses.  As a side effect,
-// maximal free chunks are marked so that they can be skipped on subsequent
-// sweeps.
-//
-// The template parameters are an allocation function, a forwarding address
-// encoding function, and a function to process non-live objects.
-template<MarkCompactCollector::AllocationFunction Alloc,
-         MarkCompactCollector::EncodingFunction Encode,
-         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
-                                             Address start,
-                                             Address end,
-                                             int* offset) {
-  // The start address of the current free region while sweeping the space.
-  // This address is set when a transition from live to non-live objects is
-  // encountered.  A value (an encoding of the 'next free region' pointer)
-  // is written to memory at this address when a transition from non-live to
-  // live objects is encountered.
-  Address free_start = NULL;
-
-  // A flag giving the state of the previously swept object.  Initially true
-  // to ensure that free_start is initialized to a proper address before
-  // trying to write to it.
-  bool is_prev_alive = true;
-
-  int object_size;  // Will be set on each iteration of the loop.
-  for (Address current = start; current < end; current += object_size) {
-    HeapObject* object = HeapObject::FromAddress(current);
-    if (object->IsMarked()) {
-      object->ClearMark();
-      collector->tracer()->decrement_marked_count();
-      object_size = object->Size();
-
-      Object* forwarded =
-          Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
-      Encode(collector->heap(), object, object_size, forwarded, offset);
-
-#ifdef DEBUG
-      if (FLAG_gc_verbose) {
-        PrintF("forward %p -> %p.\n", object->address(),
-               HeapObject::cast(forwarded)->address());
-      }
-#endif
-      if (!is_prev_alive) {  // Transition from non-live to live.
-        EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
-        is_prev_alive = true;
-      }
-    } else {  // Non-live object.
-      object_size = object->Size();
-      ProcessNonLive(object, collector->heap()->isolate());
-      if (is_prev_alive) {  // Transition from live to non-live.
-        free_start = current;
-        is_prev_alive = false;
-      }
-      LiveObjectList::ProcessNonLive(object);
-    }
-  }
-
-  // If we ended on a free region, mark it.
-  if (!is_prev_alive) {
-    EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
-  }
-}
-
-
-// Functions to encode the forwarding pointers in each compactable space.
-void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
-  int ignored;
-  EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
-                                   EncodeForwardingAddressInNewSpace,
-                                   IgnoreNonLiveObject>(
-      this,
-      heap()->new_space()->bottom(),
-      heap()->new_space()->top(),
-      &ignored);
-}
-
-
-template<MarkCompactCollector::AllocationFunction Alloc,
-         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
-    PagedSpace* space) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  while (it.has_next()) {
-    Page* p = it.next();
-
-    // The offset of each live object in the page from the first live object
-    // in the page.
-    int offset = 0;
-    EncodeForwardingAddressesInRange<Alloc,
-                                     EncodeForwardingAddressInPagedSpace,
-                                     ProcessNonLive>(
-        this,
-        p->ObjectAreaStart(),
-        p->AllocationTop(),
-        &offset);
-  }
-}
-
 
 // We scavange new space simultaneously with sweeping. This is done in two
 // passes.
+//
 // The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space. Forwading address is written directly into
-// first word of object without any encoding. If object is dead we are writing
+// promotes them to old space.  Forwarding address is written directly into
+// first word of object without any encoding.  If object is dead we write
 // NULL as a forwarding address.
-// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead objects during traversal of dirty regions we
-// should clear them to avoid encountering them during next dirty regions
-// iteration.
-static void MigrateObject(Heap* heap,
-                          Address dst,
-                          Address src,
-                          int size,
-                          bool to_old_space) {
-  if (to_old_space) {
-    heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
-  } else {
-    heap->CopyBlock(dst, src, size);
-  }
+//
+// The second pass updates pointers to new space in all spaces.  It is possible
+// to encounter pointers to dead new space objects during traversal of pointers
+// to new space.  We should clear them to avoid encountering them during next
+// pointer iteration.  This is an issue if the store buffer overflows and we
+// have to scan the entire old space, including dead objects, looking for
+// pointers to new space.
+void MarkCompactCollector::MigrateObject(Address dst,
+                                         Address src,
+                                         int size,
+                                         AllocationSpace dest) {
+  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
+  if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
+    Address src_slot = src;
+    Address dst_slot = dst;
+    ASSERT(IsAligned(size, kPointerSize));
 
+    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
+      Object* value = Memory::Object_at(src_slot);
+
+      Memory::Object_at(dst_slot) = value;
+
+      if (heap_->InNewSpace(value)) {
+        heap_->store_buffer()->Mark(dst_slot);
+      } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
+        SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                           &migration_slots_buffer_,
+                           reinterpret_cast<Object**>(dst_slot),
+                           SlotsBuffer::IGNORE_OVERFLOW);
+      }
+
+      src_slot += kPointerSize;
+      dst_slot += kPointerSize;
+    }
+
+    if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
+      Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
+      Address code_entry = Memory::Address_at(code_entry_slot);
+
+      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+        SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                           &migration_slots_buffer_,
+                           SlotsBuffer::CODE_ENTRY_SLOT,
+                           code_entry_slot,
+                           SlotsBuffer::IGNORE_OVERFLOW);
+      }
+    }
+  } else if (dest == CODE_SPACE) {
+    PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
+    heap()->MoveBlock(dst, src, size);
+    SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                       &migration_slots_buffer_,
+                       SlotsBuffer::RELOCATED_CODE_OBJECT,
+                       dst,
+                       SlotsBuffer::IGNORE_OVERFLOW);
+    Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
+  } else {
+    ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
+    heap()->MoveBlock(dst, src, size);
+  }
   Memory::Address_at(src) = dst;
 }
 
 
-class StaticPointersToNewGenUpdatingVisitor : public
-  StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
- public:
-  static inline void VisitPointer(Heap* heap, Object** p) {
-    if (!(*p)->IsHeapObject()) return;
-
-    HeapObject* obj = HeapObject::cast(*p);
-    Address old_addr = obj->address();
-
-    if (heap->new_space()->Contains(obj)) {
-      ASSERT(heap->InFromSpace(*p));
-      *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
-    }
-  }
-};
-
-
 // Visitor for updating pointers from live objects in old spaces to new space.
 // It does not expect to encounter pointers to dead objects.
-class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
+class PointersUpdatingVisitor: public ObjectVisitor {
  public:
-  explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
+  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
 
   void VisitPointer(Object** p) {
-    StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
+    UpdatePointer(p);
   }
 
   void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) {
-      StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
-    }
+    for (Object** p = start; p < end; p++) UpdatePointer(p);
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) {
+    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    Object* target = rinfo->target_object();
+    VisitPointer(&target);
+    rinfo->set_target_object(target);
   }
 
   void VisitCodeTarget(RelocInfo* rinfo) {
@@ -2172,68 +2728,96 @@
     rinfo->set_call_address(Code::cast(target)->instruction_start());
   }
 
+  static inline void UpdateSlot(Heap* heap, Object** slot) {
+    Object* obj = *slot;
+
+    if (!obj->IsHeapObject()) return;
+
+    HeapObject* heap_obj = HeapObject::cast(obj);
+
+    MapWord map_word = heap_obj->map_word();
+    if (map_word.IsForwardingAddress()) {
+      ASSERT(heap->InFromSpace(heap_obj) ||
+             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
+      HeapObject* target = map_word.ToForwardingAddress();
+      *slot = target;
+      ASSERT(!heap->InFromSpace(target) &&
+             !MarkCompactCollector::IsOnEvacuationCandidate(target));
+    }
+  }
+
  private:
+  inline void UpdatePointer(Object** p) {
+    UpdateSlot(heap_, p);
+  }
+
   Heap* heap_;
 };
 
 
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It can encounter pointers to dead objects in new space when traversing map
-// space (see comment for MigrateObject).
-static void UpdatePointerToNewGen(HeapObject** p) {
-  if (!(*p)->IsHeapObject()) return;
+static void UpdatePointer(HeapObject** p, HeapObject* object) {
+  ASSERT(*p == object);
 
-  Address old_addr = (*p)->address();
-  ASSERT(HEAP->InFromSpace(*p));
+  Address old_addr = object->address();
 
   Address new_addr = Memory::Address_at(old_addr);
 
-  if (new_addr == NULL) {
-    // We encountered pointer to a dead object. Clear it so we will
-    // not visit it again during next iteration of dirty regions.
-    *p = NULL;
-  } else {
+  // The new space sweep will overwrite the map word of dead objects
+  // with NULL. In this case we do not need to transfer this entry to
+  // the store buffer which we are rebuilding.
+  if (new_addr != NULL) {
     *p = HeapObject::FromAddress(new_addr);
+  } else {
+    // We have to zap this pointer, because the store buffer may overflow later,
+    // and then we have to scan the entire heap and we don't want to find
+    // spurious newspace pointers in the old space.
+    *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
   }
 }
 
 
-static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
-                                                                 Object** p) {
-  Address old_addr = HeapObject::cast(*p)->address();
-  Address new_addr = Memory::Address_at(old_addr);
-  return String::cast(HeapObject::FromAddress(new_addr));
+static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
+                                                         Object** p) {
+  MapWord map_word = HeapObject::cast(*p)->map_word();
+
+  if (map_word.IsForwardingAddress()) {
+    return String::cast(map_word.ToForwardingAddress());
+  }
+
+  return String::cast(*p);
 }
 
 
-static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
+bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
+                                            int object_size) {
   Object* result;
 
-  if (object_size > heap->MaxObjectSizeInPagedSpace()) {
+  if (object_size > Page::kMaxNonCodeHeapObjectSize) {
     MaybeObject* maybe_result =
-        heap->lo_space()->AllocateRawFixedArray(object_size);
+        heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
     if (maybe_result->ToObject(&result)) {
       HeapObject* target = HeapObject::cast(result);
-      MigrateObject(heap, target->address(), object->address(), object_size,
-                    true);
-      heap->mark_compact_collector()->tracer()->
+      MigrateObject(target->address(),
+                    object->address(),
+                    object_size,
+                    LO_SPACE);
+      heap()->mark_compact_collector()->tracer()->
           increment_promoted_objects_size(object_size);
       return true;
     }
   } else {
-    OldSpace* target_space = heap->TargetSpace(object);
+    OldSpace* target_space = heap()->TargetSpace(object);
 
-    ASSERT(target_space == heap->old_pointer_space() ||
-           target_space == heap->old_data_space());
+    ASSERT(target_space == heap()->old_pointer_space() ||
+           target_space == heap()->old_data_space());
     MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
     if (maybe_result->ToObject(&result)) {
       HeapObject* target = HeapObject::cast(result);
-      MigrateObject(heap,
-                    target->address(),
+      MigrateObject(target->address(),
                     object->address(),
                     object_size,
-                    target_space == heap->old_pointer_space());
-      heap->mark_compact_collector()->tracer()->
+                    target_space->identity());
+      heap()->mark_compact_collector()->tracer()->
           increment_promoted_objects_size(object_size);
       return true;
     }
@@ -2243,84 +2827,562 @@
 }
 
 
-static void SweepNewSpace(Heap* heap, NewSpace* space) {
-  heap->CheckNewSpaceExpansionCriteria();
+void MarkCompactCollector::EvacuateNewSpace() {
+  // There are soft limits in the allocation code, designed trigger a mark
+  // sweep collection by failing allocations.  But since we are already in
+  // a mark-sweep allocation, there is no sense in trying to trigger one.
+  AlwaysAllocateScope scope;
+  heap()->CheckNewSpaceExpansionCriteria();
 
-  Address from_bottom = space->bottom();
-  Address from_top = space->top();
+  NewSpace* new_space = heap()->new_space();
+
+  // Store allocation range before flipping semispaces.
+  Address from_bottom = new_space->bottom();
+  Address from_top = new_space->top();
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
-  space->Flip();
-  space->ResetAllocationInfo();
+  new_space->Flip();
+  new_space->ResetAllocationInfo();
 
-  int size = 0;
   int survivors_size = 0;
 
   // First pass: traverse all objects in inactive semispace, remove marks,
-  // migrate live objects and write forwarding addresses.
-  for (Address current = from_bottom; current < from_top; current += size) {
-    HeapObject* object = HeapObject::FromAddress(current);
-
-    if (object->IsMarked()) {
-      object->ClearMark();
-      heap->mark_compact_collector()->tracer()->decrement_marked_count();
-
-      size = object->Size();
+  // migrate live objects and write forwarding addresses.  This stage puts
+  // new entries in the store buffer and may cause some pages to be marked
+  // scan-on-scavenge.
+  SemiSpaceIterator from_it(from_bottom, from_top);
+  for (HeapObject* object = from_it.Next();
+       object != NULL;
+       object = from_it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) {
+      mark_bit.Clear();
+      // Don't bother decrementing live bytes count. We'll discard the
+      // entire page at the end.
+      int size = object->Size();
       survivors_size += size;
 
       // Aggressively promote young survivors to the old space.
-      if (TryPromoteObject(heap, object, size)) {
+      if (TryPromoteObject(object, size)) {
         continue;
       }
 
       // Promotion failed. Just migrate object to another semispace.
-      // Allocation cannot fail at this point: semispaces are of equal size.
-      Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
+      MaybeObject* allocation = new_space->AllocateRaw(size);
+      if (allocation->IsFailure()) {
+        if (!new_space->AddFreshPage()) {
+          // Shouldn't happen. We are sweeping linearly, and to-space
+          // has the same number of pages as from-space, so there is
+          // always room.
+          UNREACHABLE();
+        }
+        allocation = new_space->AllocateRaw(size);
+        ASSERT(!allocation->IsFailure());
+      }
+      Object* target = allocation->ToObjectUnchecked();
 
-      MigrateObject(heap,
-                    HeapObject::cast(target)->address(),
-                    current,
+      MigrateObject(HeapObject::cast(target)->address(),
+                    object->address(),
                     size,
-                    false);
+                    NEW_SPACE);
     } else {
       // Process the dead object before we write a NULL into its header.
       LiveObjectList::ProcessNonLive(object);
 
-      size = object->Size();
-      Memory::Address_at(current) = NULL;
+      // Mark dead objects in the new space with null in their map field.
+      Memory::Address_at(object->address()) = NULL;
     }
   }
 
-  // Second pass: find pointers to new space and update them.
-  PointersToNewGenUpdatingVisitor updating_visitor(heap);
+  heap_->IncrementYoungSurvivorsCounter(survivors_size);
+  new_space->set_age_mark(new_space->top());
+}
 
-  // Update pointers in to space.
-  Address current = space->bottom();
-  while (current < space->top()) {
-    HeapObject* object = HeapObject::FromAddress(current);
-    current +=
-        StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
-                                                           object);
+
+void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
+  AlwaysAllocateScope always_allocate;
+  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
+  MarkBit::CellType* cells = p->markbits()->cells();
+  p->MarkSweptPrecisely();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  Address cell_base = p->area_start();
+  int cell_index = Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(cell_base)));
+
+  int offsets[16];
+
+  for (;
+       cell_index < last_cell_index;
+       cell_index++, cell_base += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(cell_base))));
+    if (cells[cell_index] == 0) continue;
+
+    int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+    for (int i = 0; i < live_objects; i++) {
+      Address object_addr = cell_base + offsets[i] * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(object_addr);
+      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
+
+      int size = object->Size();
+
+      MaybeObject* target = space->AllocateRaw(size);
+      if (target->IsFailure()) {
+        // OS refused to give us memory.
+        V8::FatalProcessOutOfMemory("Evacuation");
+        return;
+      }
+
+      Object* target_object = target->ToObjectUnchecked();
+
+      MigrateObject(HeapObject::cast(target_object)->address(),
+                    object_addr,
+                    size,
+                    space->identity());
+      ASSERT(object->map_word().IsForwardingAddress());
+    }
+
+    // Clear marking bits for current cell.
+    cells[cell_index] = 0;
+  }
+  p->ResetLiveBytes();
+}
+
+
+void MarkCompactCollector::EvacuatePages() {
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    ASSERT(p->IsEvacuationCandidate() ||
+           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+    if (p->IsEvacuationCandidate()) {
+      // During compaction we might have to request a new page.
+      // Check that space still have room for that.
+      if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
+        EvacuateLiveObjectsFromPage(p);
+      } else {
+        // Without room for expansion evacuation is not guaranteed to succeed.
+        // Pessimistically abandon unevacuated pages.
+        for (int j = i; j < npages; j++) {
+          Page* page = evacuation_candidates_[j];
+          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
+          page->ClearEvacuationCandidate();
+          page->SetFlag(Page::RESCAN_ON_EVACUATION);
+        }
+        return;
+      }
+    }
+  }
+}
+
+
+class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  virtual Object* RetainAs(Object* object) {
+    if (object->IsHeapObject()) {
+      HeapObject* heap_object = HeapObject::cast(object);
+      MapWord map_word = heap_object->map_word();
+      if (map_word.IsForwardingAddress()) {
+        return map_word.ToForwardingAddress();
+      }
+    }
+    return object;
+  }
+};
+
+
+static inline void UpdateSlot(ObjectVisitor* v,
+                              SlotsBuffer::SlotType slot_type,
+                              Address addr) {
+  switch (slot_type) {
+    case SlotsBuffer::CODE_TARGET_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
+      rinfo.Visit(v);
+      break;
+    }
+    case SlotsBuffer::CODE_ENTRY_SLOT: {
+      v->VisitCodeEntry(addr);
+      break;
+    }
+    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+      HeapObject* obj = HeapObject::FromAddress(addr);
+      Code::cast(obj)->CodeIterateBody(v);
+      break;
+    }
+    case SlotsBuffer::DEBUG_TARGET_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
+      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
+      break;
+    }
+    case SlotsBuffer::JS_RETURN_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
+      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
+      break;
+    }
+    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+      rinfo.Visit(v);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+enum SweepingMode {
+  SWEEP_ONLY,
+  SWEEP_AND_VISIT_LIVE_OBJECTS
+};
+
+
+enum SkipListRebuildingMode {
+  REBUILD_SKIP_LIST,
+  IGNORE_SKIP_LIST
+};
+
+
+// Sweep a space precisely.  After this has been done the space can
+// be iterated precisely, hitting only the live objects.  Code space
+// is always swept precisely because we want to be able to iterate
+// over it.  Map space is swept precisely, because it is not compacted.
+// Slots in live objects pointing into evacuation candidates are updated
+// if requested.
+template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
+static void SweepPrecisely(PagedSpace* space,
+                           Page* p,
+                           ObjectVisitor* v) {
+  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
+            space->identity() == CODE_SPACE);
+  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
+
+  MarkBit::CellType* cells = p->markbits()->cells();
+  p->MarkSweptPrecisely();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  Address free_start = p->area_start();
+  int cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(free_start)));
+
+  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+  Address object_address = free_start;
+  int offsets[16];
+
+  SkipList* skip_list = p->skip_list();
+  int curr_region = -1;
+  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
+    skip_list->Clear();
   }
 
-  // Update roots.
-  heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
-  LiveObjectList::IterateElements(&updating_visitor);
+  for (;
+       cell_index < last_cell_index;
+       cell_index++, object_address += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(object_address))));
+    int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+    int live_index = 0;
+    for ( ; live_objects != 0; live_objects--) {
+      Address free_end = object_address + offsets[live_index++] * kPointerSize;
+      if (free_end != free_start) {
+        space->Free(free_start, static_cast<int>(free_end - free_start));
+      }
+      HeapObject* live_object = HeapObject::FromAddress(free_end);
+      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
+      Map* map = live_object->map();
+      int size = live_object->SizeFromMap(map);
+      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
+        live_object->IterateBody(map->instance_type(), size, v);
+      }
+      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+        int new_region_start =
+            SkipList::RegionNumber(free_end);
+        int new_region_end =
+            SkipList::RegionNumber(free_end + size - kPointerSize);
+        if (new_region_start != curr_region ||
+            new_region_end != curr_region) {
+          skip_list->AddObject(free_end, size);
+          curr_region = new_region_end;
+        }
+      }
+      free_start = free_end + size;
+    }
+    // Clear marking bits for current cell.
+    cells[cell_index] = 0;
+  }
+  if (free_start != p->area_end()) {
+    space->Free(free_start, static_cast<int>(p->area_end() - free_start));
+  }
+  p->ResetLiveBytes();
+}
 
-  // Update pointers in old spaces.
-  heap->IterateDirtyRegions(heap->old_pointer_space(),
-                            &Heap::IteratePointersInDirtyRegion,
-                            &UpdatePointerToNewGen,
-                            heap->WATERMARK_SHOULD_BE_VALID);
 
-  heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
+static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
+  Page* p = Page::FromAddress(code->address());
+
+  if (p->IsEvacuationCandidate() ||
+      p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+    return false;
+  }
+
+  Address code_start = code->address();
+  Address code_end = code_start + code->Size();
+
+  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
+  uint32_t end_index =
+      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
+
+  Bitmap* b = p->markbits();
+
+  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
+  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
+
+  MarkBit::CellType* start_cell = start_mark_bit.cell();
+  MarkBit::CellType* end_cell = end_mark_bit.cell();
+
+  if (value) {
+    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
+    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
+
+    if (start_cell == end_cell) {
+      *start_cell |= start_mask & end_mask;
+    } else {
+      *start_cell |= start_mask;
+      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
+        *cell = ~0;
+      }
+      *end_cell |= end_mask;
+    }
+  } else {
+    for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
+      *cell = 0;
+    }
+  }
+
+  return true;
+}
+
+
+static bool IsOnInvalidatedCodeObject(Address addr) {
+  // We did not record any slots in large objects thus
+  // we can safely go to the page from the slot address.
+  Page* p = Page::FromAddress(addr);
+
+  // First check owner's identity because old pointer and old data spaces
+  // are swept lazily and might still have non-zero mark-bits on some
+  // pages.
+  if (p->owner()->identity() != CODE_SPACE) return false;
+
+  // In code space only bits on evacuation candidates (but we don't record
+  // any slots on them) and under invalidated code objects are non-zero.
+  MarkBit mark_bit =
+      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
+
+  return mark_bit.Get();
+}
+
+
+void MarkCompactCollector::InvalidateCode(Code* code) {
+  if (heap_->incremental_marking()->IsCompacting() &&
+      !ShouldSkipEvacuationSlotRecording(code)) {
+    ASSERT(compacting_);
+
+    // If the object is white than no slots were recorded on it yet.
+    MarkBit mark_bit = Marking::MarkBitFrom(code);
+    if (Marking::IsWhite(mark_bit)) return;
+
+    invalidated_code_.Add(code);
+  }
+}
+
+
+bool MarkCompactCollector::MarkInvalidatedCode() {
+  bool code_marked = false;
+
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+
+    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
+      code_marked = true;
+    }
+  }
+
+  return code_marked;
+}
+
+
+void MarkCompactCollector::RemoveDeadInvalidatedCode() {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
+  }
+}
+
+
+void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+    if (code != NULL) {
+      code->Iterate(visitor);
+      SetMarkBitsUnderInvalidatedCode(code, false);
+    }
+  }
+  invalidated_code_.Rewind(0);
+}
+
+
+void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+  bool code_slots_filtering_required;
+  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
+    code_slots_filtering_required = MarkInvalidatedCode();
+
+    EvacuateNewSpace();
+  }
+
+
+  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
+    EvacuatePages();
+  }
+
+  // Second pass: find pointers to new space and update them.
+  PointersUpdatingVisitor updating_visitor(heap());
+
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
+    // Update pointers in to space.
+    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
+                            heap()->new_space()->top());
+    for (HeapObject* object = to_it.Next();
+         object != NULL;
+         object = to_it.Next()) {
+      Map* map = object->map();
+      object->IterateBody(map->instance_type(),
+                          object->SizeFromMap(map),
+                          &updating_visitor);
+    }
+  }
+
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
+    // Update roots.
+    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+    LiveObjectList::IterateElements(&updating_visitor);
+  }
+
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
+    StoreBufferRebuildScope scope(heap_,
+                                  heap_->store_buffer(),
+                                  &Heap::ScavengeStoreBufferCallback);
+    heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
+  }
+
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
+    SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+                                       migration_slots_buffer_,
+                                       code_slots_filtering_required);
+    if (FLAG_trace_fragmentation) {
+      PrintF("  migration slots buffer: %d\n",
+             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+    }
+
+    if (compacting_ && was_marked_incrementally_) {
+      // It's difficult to filter out slots recorded for large objects.
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        // LargeObjectSpace is not swept yet thus we have to skip
+        // dead objects explicitly.
+        if (!IsMarked(obj)) continue;
+
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          obj->Iterate(&updating_visitor);
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
+  }
+
+  int npages = evacuation_candidates_.length();
+  { GCTracer::Scope gc_scope(
+      tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      ASSERT(p->IsEvacuationCandidate() ||
+             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+
+      if (p->IsEvacuationCandidate()) {
+        SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+                                           p->slots_buffer(),
+                                           code_slots_filtering_required);
+        if (FLAG_trace_fragmentation) {
+          PrintF("  page %p slots buffer: %d\n",
+                 reinterpret_cast<void*>(p),
+                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
+        }
+
+        // Important: skip list should be cleared only after roots were updated
+        // because root iteration traverses the stack and might have to find
+        // code objects from non-updated pc pointing into evacuation candidate.
+        SkipList* list = p->skip_list();
+        if (list != NULL) list->Clear();
+      } else {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+
+        switch (space->identity()) {
+          case OLD_DATA_SPACE:
+            SweepConservatively(space, p);
+            break;
+          case OLD_POINTER_SPACE:
+            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
+                space, p, &updating_visitor);
+            break;
+          case CODE_SPACE:
+            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
+                space, p, &updating_visitor);
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+    }
+  }
+
+  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
 
   // Update pointers from cells.
-  HeapObjectIterator cell_iterator(heap->cell_space());
-  for (HeapObject* cell = cell_iterator.next();
+  HeapObjectIterator cell_iterator(heap_->cell_space());
+  for (HeapObject* cell = cell_iterator.Next();
        cell != NULL;
-       cell = cell_iterator.next()) {
+       cell = cell_iterator.Next()) {
     if (cell->IsJSGlobalPropertyCell()) {
       Address value_address =
           reinterpret_cast<Address>(cell) +
@@ -2330,1020 +3392,575 @@
   }
 
   // Update pointer from the global contexts list.
-  updating_visitor.VisitPointer(heap->global_contexts_list_address());
+  updating_visitor.VisitPointer(heap_->global_contexts_list_address());
+
+  heap_->symbol_table()->Iterate(&updating_visitor);
 
   // Update pointers from external string table.
-  heap->UpdateNewSpaceReferencesInExternalStringTable(
-      &UpdateNewSpaceReferenceInExternalStringTableEntry);
+  heap_->UpdateReferencesInExternalStringTable(
+      &UpdateReferenceInExternalStringTableEntry);
 
-  // All pointers were updated. Update auxiliary allocation info.
-  heap->IncrementYoungSurvivorsCounter(survivors_size);
-  space->set_age_mark(space->top());
+  if (!FLAG_watch_ic_patching) {
+    // Update JSFunction pointers from the runtime profiler.
+    heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
+        &updating_visitor);
+  }
 
-  // Update JSFunction pointers from the runtime profiler.
-  heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+  EvacuationWeakObjectRetainer evacuation_object_retainer;
+  heap()->ProcessWeakReferences(&evacuation_object_retainer);
+
+  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
+  // under it.
+  ProcessInvalidatedCode(&updating_visitor);
+
+#ifdef DEBUG
+  if (FLAG_verify_heap) {
+    VerifyEvacuation(heap_);
+  }
+#endif
+
+  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
+  ASSERT(migration_slots_buffer_ == NULL);
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    if (!p->IsEvacuationCandidate()) continue;
+    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+    space->Free(p->area_start(), p->area_size());
+    p->set_scan_on_scavenge(false);
+    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+    p->ResetLiveBytes();
+    space->ReleasePage(p);
+  }
+  evacuation_candidates_.Rewind(0);
+  compacting_ = false;
 }
 
 
-static void SweepSpace(Heap* heap, PagedSpace* space) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
+static const int kStartTableEntriesPerLine = 5;
+static const int kStartTableLines = 171;
+static const int kStartTableInvalidLine = 127;
+static const int kStartTableUnusedEntry = 126;
 
-  // During sweeping of paged space we are trying to find longest sequences
-  // of pages without live objects and free them (instead of putting them on
-  // the free list).
+#define _ kStartTableUnusedEntry
+#define X kStartTableInvalidLine
+// Mark-bit to object start offset table.
+//
+// The line is indexed by the mark bits in a byte.  The first number on
+// the line describes the number of live object starts for the line and the
+// other numbers on the line describe the offsets (in words) of the object
+// starts.
+//
+// Since objects are at least 2 words large we don't have entries for two
+// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
+char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
+  0, _, _, _, _,  // 0
+  1, 0, _, _, _,  // 1
+  1, 1, _, _, _,  // 2
+  X, _, _, _, _,  // 3
+  1, 2, _, _, _,  // 4
+  2, 0, 2, _, _,  // 5
+  X, _, _, _, _,  // 6
+  X, _, _, _, _,  // 7
+  1, 3, _, _, _,  // 8
+  2, 0, 3, _, _,  // 9
+  2, 1, 3, _, _,  // 10
+  X, _, _, _, _,  // 11
+  X, _, _, _, _,  // 12
+  X, _, _, _, _,  // 13
+  X, _, _, _, _,  // 14
+  X, _, _, _, _,  // 15
+  1, 4, _, _, _,  // 16
+  2, 0, 4, _, _,  // 17
+  2, 1, 4, _, _,  // 18
+  X, _, _, _, _,  // 19
+  2, 2, 4, _, _,  // 20
+  3, 0, 2, 4, _,  // 21
+  X, _, _, _, _,  // 22
+  X, _, _, _, _,  // 23
+  X, _, _, _, _,  // 24
+  X, _, _, _, _,  // 25
+  X, _, _, _, _,  // 26
+  X, _, _, _, _,  // 27
+  X, _, _, _, _,  // 28
+  X, _, _, _, _,  // 29
+  X, _, _, _, _,  // 30
+  X, _, _, _, _,  // 31
+  1, 5, _, _, _,  // 32
+  2, 0, 5, _, _,  // 33
+  2, 1, 5, _, _,  // 34
+  X, _, _, _, _,  // 35
+  2, 2, 5, _, _,  // 36
+  3, 0, 2, 5, _,  // 37
+  X, _, _, _, _,  // 38
+  X, _, _, _, _,  // 39
+  2, 3, 5, _, _,  // 40
+  3, 0, 3, 5, _,  // 41
+  3, 1, 3, 5, _,  // 42
+  X, _, _, _, _,  // 43
+  X, _, _, _, _,  // 44
+  X, _, _, _, _,  // 45
+  X, _, _, _, _,  // 46
+  X, _, _, _, _,  // 47
+  X, _, _, _, _,  // 48
+  X, _, _, _, _,  // 49
+  X, _, _, _, _,  // 50
+  X, _, _, _, _,  // 51
+  X, _, _, _, _,  // 52
+  X, _, _, _, _,  // 53
+  X, _, _, _, _,  // 54
+  X, _, _, _, _,  // 55
+  X, _, _, _, _,  // 56
+  X, _, _, _, _,  // 57
+  X, _, _, _, _,  // 58
+  X, _, _, _, _,  // 59
+  X, _, _, _, _,  // 60
+  X, _, _, _, _,  // 61
+  X, _, _, _, _,  // 62
+  X, _, _, _, _,  // 63
+  1, 6, _, _, _,  // 64
+  2, 0, 6, _, _,  // 65
+  2, 1, 6, _, _,  // 66
+  X, _, _, _, _,  // 67
+  2, 2, 6, _, _,  // 68
+  3, 0, 2, 6, _,  // 69
+  X, _, _, _, _,  // 70
+  X, _, _, _, _,  // 71
+  2, 3, 6, _, _,  // 72
+  3, 0, 3, 6, _,  // 73
+  3, 1, 3, 6, _,  // 74
+  X, _, _, _, _,  // 75
+  X, _, _, _, _,  // 76
+  X, _, _, _, _,  // 77
+  X, _, _, _, _,  // 78
+  X, _, _, _, _,  // 79
+  2, 4, 6, _, _,  // 80
+  3, 0, 4, 6, _,  // 81
+  3, 1, 4, 6, _,  // 82
+  X, _, _, _, _,  // 83
+  3, 2, 4, 6, _,  // 84
+  4, 0, 2, 4, 6,  // 85
+  X, _, _, _, _,  // 86
+  X, _, _, _, _,  // 87
+  X, _, _, _, _,  // 88
+  X, _, _, _, _,  // 89
+  X, _, _, _, _,  // 90
+  X, _, _, _, _,  // 91
+  X, _, _, _, _,  // 92
+  X, _, _, _, _,  // 93
+  X, _, _, _, _,  // 94
+  X, _, _, _, _,  // 95
+  X, _, _, _, _,  // 96
+  X, _, _, _, _,  // 97
+  X, _, _, _, _,  // 98
+  X, _, _, _, _,  // 99
+  X, _, _, _, _,  // 100
+  X, _, _, _, _,  // 101
+  X, _, _, _, _,  // 102
+  X, _, _, _, _,  // 103
+  X, _, _, _, _,  // 104
+  X, _, _, _, _,  // 105
+  X, _, _, _, _,  // 106
+  X, _, _, _, _,  // 107
+  X, _, _, _, _,  // 108
+  X, _, _, _, _,  // 109
+  X, _, _, _, _,  // 110
+  X, _, _, _, _,  // 111
+  X, _, _, _, _,  // 112
+  X, _, _, _, _,  // 113
+  X, _, _, _, _,  // 114
+  X, _, _, _, _,  // 115
+  X, _, _, _, _,  // 116
+  X, _, _, _, _,  // 117
+  X, _, _, _, _,  // 118
+  X, _, _, _, _,  // 119
+  X, _, _, _, _,  // 120
+  X, _, _, _, _,  // 121
+  X, _, _, _, _,  // 122
+  X, _, _, _, _,  // 123
+  X, _, _, _, _,  // 124
+  X, _, _, _, _,  // 125
+  X, _, _, _, _,  // 126
+  X, _, _, _, _,  // 127
+  1, 7, _, _, _,  // 128
+  2, 0, 7, _, _,  // 129
+  2, 1, 7, _, _,  // 130
+  X, _, _, _, _,  // 131
+  2, 2, 7, _, _,  // 132
+  3, 0, 2, 7, _,  // 133
+  X, _, _, _, _,  // 134
+  X, _, _, _, _,  // 135
+  2, 3, 7, _, _,  // 136
+  3, 0, 3, 7, _,  // 137
+  3, 1, 3, 7, _,  // 138
+  X, _, _, _, _,  // 139
+  X, _, _, _, _,  // 140
+  X, _, _, _, _,  // 141
+  X, _, _, _, _,  // 142
+  X, _, _, _, _,  // 143
+  2, 4, 7, _, _,  // 144
+  3, 0, 4, 7, _,  // 145
+  3, 1, 4, 7, _,  // 146
+  X, _, _, _, _,  // 147
+  3, 2, 4, 7, _,  // 148
+  4, 0, 2, 4, 7,  // 149
+  X, _, _, _, _,  // 150
+  X, _, _, _, _,  // 151
+  X, _, _, _, _,  // 152
+  X, _, _, _, _,  // 153
+  X, _, _, _, _,  // 154
+  X, _, _, _, _,  // 155
+  X, _, _, _, _,  // 156
+  X, _, _, _, _,  // 157
+  X, _, _, _, _,  // 158
+  X, _, _, _, _,  // 159
+  2, 5, 7, _, _,  // 160
+  3, 0, 5, 7, _,  // 161
+  3, 1, 5, 7, _,  // 162
+  X, _, _, _, _,  // 163
+  3, 2, 5, 7, _,  // 164
+  4, 0, 2, 5, 7,  // 165
+  X, _, _, _, _,  // 166
+  X, _, _, _, _,  // 167
+  3, 3, 5, 7, _,  // 168
+  4, 0, 3, 5, 7,  // 169
+  4, 1, 3, 5, 7   // 170
+};
+#undef _
+#undef X
 
-  // Page preceding current.
-  Page* prev = Page::FromAddress(NULL);
 
-  // First empty page in a sequence.
-  Page* first_empty_page = Page::FromAddress(NULL);
+// Takes a word of mark bits.  Returns the number of objects that start in the
+// range.  Puts the offsets of the words in the supplied array.
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
+  int objects = 0;
+  int offset = 0;
 
-  // Page preceding first empty page.
-  Page* prec_first_empty_page = Page::FromAddress(NULL);
+  // No consecutive 1 bits.
+  ASSERT((mark_bits & 0x180) != 0x180);
+  ASSERT((mark_bits & 0x18000) != 0x18000);
+  ASSERT((mark_bits & 0x1800000) != 0x1800000);
 
-  // If last used page of space ends with a sequence of dead objects
-  // we can adjust allocation top instead of puting this free area into
-  // the free list. Thus during sweeping we keep track of such areas
-  // and defer their deallocation until the sweeping of the next page
-  // is done: if one of the next pages contains live objects we have
-  // to put such area into the free list.
-  Address last_free_start = NULL;
-  int last_free_size = 0;
+  while (mark_bits != 0) {
+    int byte = (mark_bits & 0xff);
+    mark_bits >>= 8;
+    if (byte != 0) {
+      ASSERT(byte < kStartTableLines);  // No consecutive 1 bits.
+      char* table = kStartTable + byte * kStartTableEntriesPerLine;
+      int objects_in_these_8_words = table[0];
+      ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
+      ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
+      for (int i = 0; i < objects_in_these_8_words; i++) {
+        starts[objects++] = offset + table[1 + i];
+      }
+    }
+    offset += 8;
+  }
+  return objects;
+}
+
+
+static inline Address DigestFreeStart(Address approximate_free_start,
+                                      uint32_t free_start_cell) {
+  ASSERT(free_start_cell != 0);
+
+  // No consecutive 1 bits.
+  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
+
+  int offsets[16];
+  uint32_t cell = free_start_cell;
+  int offset_of_last_live;
+  if ((cell & 0x80000000u) != 0) {
+    // This case would overflow below.
+    offset_of_last_live = 31;
+  } else {
+    // Remove all but one bit, the most significant.  This is an optimization
+    // that may or may not be worthwhile.
+    cell |= cell >> 16;
+    cell |= cell >> 8;
+    cell |= cell >> 4;
+    cell |= cell >> 2;
+    cell |= cell >> 1;
+    cell = (cell + 1) >> 1;
+    int live_objects = MarkWordToObjectStarts(cell, offsets);
+    ASSERT(live_objects == 1);
+    offset_of_last_live = offsets[live_objects - 1];
+  }
+  Address last_live_start =
+      approximate_free_start + offset_of_last_live * kPointerSize;
+  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
+  Address free_start = last_live_start + last_live->Size();
+  return free_start;
+}
+
+
+static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
+  ASSERT(cell != 0);
+
+  // No consecutive 1 bits.
+  ASSERT((cell & (cell << 1)) == 0);
+
+  int offsets[16];
+  if (cell == 0x80000000u) {  // Avoid overflow below.
+    return block_address + 31 * kPointerSize;
+  }
+  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
+  ASSERT((first_set_bit & cell) == first_set_bit);
+  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
+  ASSERT(live_objects == 1);
+  USE(live_objects);
+  return block_address + offsets[0] * kPointerSize;
+}
+
+
+// Sweeps a space conservatively.  After this has been done the larger free
+// spaces have been put on the free list and the smaller ones have been
+// ignored and left untouched.  A free space is always either ignored or put
+// on the free list, never split up into two parts.  This is important
+// because it means that any FreeSpace maps left actually describe a region of
+// memory that can be ignored when scanning.  Dead objects other than free
+// spaces will not contain the free space map.
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+  MarkBit::CellType* cells = p->markbits()->cells();
+  p->MarkSweptConservatively();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  int cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_start())));
+
+  intptr_t freed_bytes = 0;
+
+  // This is the start of the 32 word block that we are currently looking at.
+  Address block_address = p->area_start();
+
+  // Skip over all the dead objects at the start of the page and mark them free.
+  for (;
+       cell_index < last_cell_index;
+       cell_index++, block_address += 32 * kPointerSize) {
+    if (cells[cell_index] != 0) break;
+  }
+  size_t size = block_address - p->area_start();
+  if (cell_index == last_cell_index) {
+    freed_bytes += static_cast<int>(space->Free(p->area_start(),
+                                                static_cast<int>(size)));
+    ASSERT_EQ(0, p->LiveBytes());
+    return freed_bytes;
+  }
+  // Grow the size of the start-of-page free space a little to get up to the
+  // first live object.
+  Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
+  // Free the first free space.
+  size = free_end - p->area_start();
+  freed_bytes += space->Free(p->area_start(),
+                             static_cast<int>(size));
+  // The start of the current free area is represented in undigested form by
+  // the address of the last 32-word section that contained a live object and
+  // the marking bitmap for that cell, which describes where the live object
+  // started.  Unless we find a large free space in the bitmap we will not
+  // digest this pair into a real address.  We start the iteration here at the
+  // first word in the marking bit map that indicates a live object.
+  Address free_start = block_address;
+  uint32_t free_start_cell = cells[cell_index];
+
+  for ( ;
+       cell_index < last_cell_index;
+       cell_index++, block_address += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(block_address))));
+    uint32_t cell = cells[cell_index];
+    if (cell != 0) {
+      // We have a live object.  Check approximately whether it is more than 32
+      // words since the last live object.
+      if (block_address - free_start > 32 * kPointerSize) {
+        free_start = DigestFreeStart(free_start, free_start_cell);
+        if (block_address - free_start > 32 * kPointerSize) {
+          // Now that we know the exact start of the free space it still looks
+          // like we have a large enough free space to be worth bothering with.
+          // so now we need to find the start of the first live object at the
+          // end of the free space.
+          free_end = StartOfLiveObject(block_address, cell);
+          freed_bytes += space->Free(free_start,
+                                     static_cast<int>(free_end - free_start));
+        }
+      }
+      // Update our undigested record of where the current free area started.
+      free_start = block_address;
+      free_start_cell = cell;
+      // Clear marking bits for current cell.
+      cells[cell_index] = 0;
+    }
+  }
+
+  // Handle the free space at the end of the page.
+  if (block_address - free_start > 32 * kPointerSize) {
+    free_start = DigestFreeStart(free_start, free_start_cell);
+    freed_bytes += space->Free(free_start,
+                               static_cast<int>(block_address - free_start));
+  }
+
+  p->ResetLiveBytes();
+  return freed_bytes;
+}
+
+
+void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
+  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
+                                      sweeper == LAZY_CONSERVATIVE);
+
+  space->ClearStats();
+
+  PageIterator it(space);
+
+  intptr_t freed_bytes = 0;
+  int pages_swept = 0;
+  intptr_t newspace_size = space->heap()->new_space()->Size();
+  bool lazy_sweeping_active = false;
+  bool unused_page_present = false;
+
+  intptr_t old_space_size = heap()->PromotedSpaceSize();
+  intptr_t space_left =
+      Min(heap()->OldGenPromotionLimit(old_space_size),
+          heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
 
   while (it.has_next()) {
     Page* p = it.next();
 
-    bool is_previous_alive = true;
-    Address free_start = NULL;
-    HeapObject* object;
+    // Clear sweeping flags indicating that marking bits are still intact.
+    p->ClearSweptPrecisely();
+    p->ClearSweptConservatively();
 
-    for (Address current = p->ObjectAreaStart();
-         current < p->AllocationTop();
-         current += object->Size()) {
-      object = HeapObject::FromAddress(current);
-      if (object->IsMarked()) {
-        object->ClearMark();
-        heap->mark_compact_collector()->tracer()->decrement_marked_count();
+    if (p->IsEvacuationCandidate()) {
+      ASSERT(evacuation_candidates_.length() > 0);
+      continue;
+    }
 
-        if (!is_previous_alive) {  // Transition from free to live.
-          space->DeallocateBlock(free_start,
-                                 static_cast<int>(current - free_start),
-                                 true);
-          is_previous_alive = true;
+    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+      // Will be processed in EvacuateNewSpaceAndCandidates.
+      continue;
+    }
+
+    // One unused page is kept, all further are released before sweeping them.
+    if (p->LiveBytes() == 0) {
+      if (unused_page_present) {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
+                 reinterpret_cast<intptr_t>(p));
         }
-      } else {
-        heap->mark_compact_collector()->ReportDeleteIfNeeded(
-            object, heap->isolate());
-        if (is_previous_alive) {  // Transition from live to free.
-          free_start = current;
-          is_previous_alive = false;
-        }
-        LiveObjectList::ProcessNonLive(object);
+        // Adjust unswept free bytes because releasing a page expects said
+        // counter to be accurate for unswept pages.
+        space->IncreaseUnsweptFreeBytes(p);
+        space->ReleasePage(p);
+        continue;
       }
-      // The object is now unmarked for the call to Size() at the top of the
-      // loop.
+      unused_page_present = true;
     }
 
-    bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
-        || (!is_previous_alive && free_start == p->ObjectAreaStart());
-
-    if (page_is_empty) {
-      // This page is empty. Check whether we are in the middle of
-      // sequence of empty pages and start one if not.
-      if (!first_empty_page->is_valid()) {
-        first_empty_page = p;
-        prec_first_empty_page = prev;
-      }
-
-      if (!is_previous_alive) {
-        // There are dead objects on this page. Update space accounting stats
-        // without putting anything into free list.
-        int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
-        if (size_in_bytes > 0) {
-          space->DeallocateBlock(free_start, size_in_bytes, false);
-        }
-      }
-    } else {
-      // This page is not empty. Sequence of empty pages ended on the previous
-      // one.
-      if (first_empty_page->is_valid()) {
-        space->FreePages(prec_first_empty_page, prev);
-        prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
-      }
-
-      // If there is a free ending area on one of the previous pages we have
-      // deallocate that area and put it on the free list.
-      if (last_free_size > 0) {
-        Page::FromAddress(last_free_start)->
-            SetAllocationWatermark(last_free_start);
-        space->DeallocateBlock(last_free_start, last_free_size, true);
-        last_free_start = NULL;
-        last_free_size  = 0;
-      }
-
-      // If the last region of this page was not live we remember it.
-      if (!is_previous_alive) {
-        ASSERT(last_free_size == 0);
-        last_free_size = static_cast<int>(p->AllocationTop() - free_start);
-        last_free_start = free_start;
-      }
-    }
-
-    prev = p;
-  }
-
-  // We reached end of space. See if we need to adjust allocation top.
-  Address new_allocation_top = NULL;
-
-  if (first_empty_page->is_valid()) {
-    // Last used pages in space are empty. We can move allocation top backwards
-    // to the beginning of first empty page.
-    ASSERT(prev == space->AllocationTopPage());
-
-    new_allocation_top = first_empty_page->ObjectAreaStart();
-  }
-
-  if (last_free_size > 0) {
-    // There was a free ending area on the previous page.
-    // Deallocate it without putting it into freelist and move allocation
-    // top to the beginning of this free area.
-    space->DeallocateBlock(last_free_start, last_free_size, false);
-    new_allocation_top = last_free_start;
-  }
-
-  if (new_allocation_top != NULL) {
-#ifdef DEBUG
-    Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
-    if (!first_empty_page->is_valid()) {
-      ASSERT(new_allocation_top_page == space->AllocationTopPage());
-    } else if (last_free_size > 0) {
-      ASSERT(new_allocation_top_page == prec_first_empty_page);
-    } else {
-      ASSERT(new_allocation_top_page == first_empty_page);
-    }
-#endif
-
-    space->SetTop(new_allocation_top);
-  }
-}
-
-
-void MarkCompactCollector::EncodeForwardingAddresses() {
-  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
-  // Objects in the active semispace of the young generation may be
-  // relocated to the inactive semispace (if not promoted).  Set the
-  // relocation info to the beginning of the inactive semispace.
-  heap()->new_space()->MCResetRelocationInfo();
-
-  // Compute the forwarding pointers in each space.
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
-                                        ReportDeleteIfNeeded>(
-      heap()->old_pointer_space());
-
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
-                                        IgnoreNonLiveObject>(
-      heap()->old_data_space());
-
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
-                                        ReportDeleteIfNeeded>(
-      heap()->code_space());
-
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
-                                        IgnoreNonLiveObject>(
-      heap()->cell_space());
-
-
-  // Compute new space next to last after the old and code spaces have been
-  // compacted.  Objects in new space can be promoted to old or code space.
-  EncodeForwardingAddressesInNewSpace();
-
-  // Compute map space last because computing forwarding addresses
-  // overwrites non-live objects.  Objects in the other spaces rely on
-  // non-live map pointers to get the sizes of non-live objects.
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
-                                        IgnoreNonLiveObject>(
-      heap()->map_space());
-
-  // Write relocation info to the top page, so we can use it later.  This is
-  // done after promoting objects from the new space so we get the correct
-  // allocation top.
-  heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
-  heap()->old_data_space()->MCWriteRelocationInfoToPage();
-  heap()->code_space()->MCWriteRelocationInfoToPage();
-  heap()->map_space()->MCWriteRelocationInfoToPage();
-  heap()->cell_space()->MCWriteRelocationInfoToPage();
-}
-
-
-class MapIterator : public HeapObjectIterator {
- public:
-  explicit MapIterator(Heap* heap)
-    : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
-
-  MapIterator(Heap* heap, Address start)
-      : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
-
- private:
-  static int SizeCallback(HeapObject* unused) {
-    USE(unused);
-    return Map::kSize;
-  }
-};
-
-
-class MapCompact {
- public:
-  explicit MapCompact(Heap* heap, int live_maps)
-    : heap_(heap),
-      live_maps_(live_maps),
-      to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
-      vacant_map_it_(heap),
-      map_to_evacuate_it_(heap, to_evacuate_start_),
-      first_map_to_evacuate_(
-          reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
-  }
-
-  void CompactMaps() {
-    // As we know the number of maps to evacuate beforehand,
-    // we stop then there is no more vacant maps.
-    for (Map* next_vacant_map = NextVacantMap();
-         next_vacant_map;
-         next_vacant_map = NextVacantMap()) {
-      EvacuateMap(next_vacant_map, NextMapToEvacuate());
-    }
-
-#ifdef DEBUG
-    CheckNoMapsToEvacuate();
-#endif
-  }
-
-  void UpdateMapPointersInRoots() {
-    MapUpdatingVisitor map_updating_visitor;
-    heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
-    heap()->isolate()->global_handles()->IterateWeakRoots(
-        &map_updating_visitor);
-    LiveObjectList::IterateElements(&map_updating_visitor);
-  }
-
-  void UpdateMapPointersInPagedSpace(PagedSpace* space) {
-    ASSERT(space != heap()->map_space());
-
-    PageIterator it(space, PageIterator::PAGES_IN_USE);
-    while (it.has_next()) {
-      Page* p = it.next();
-      UpdateMapPointersInRange(heap(),
-                               p->ObjectAreaStart(),
-                               p->AllocationTop());
-    }
-  }
-
-  void UpdateMapPointersInNewSpace() {
-    NewSpace* space = heap()->new_space();
-    UpdateMapPointersInRange(heap(), space->bottom(), space->top());
-  }
-
-  void UpdateMapPointersInLargeObjectSpace() {
-    LargeObjectIterator it(heap()->lo_space());
-    for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
-      UpdateMapPointersInObject(heap(), obj);
-  }
-
-  void Finish() {
-    heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
-  }
-
-  inline Heap* heap() const { return heap_; }
-
- private:
-  Heap* heap_;
-  int live_maps_;
-  Address to_evacuate_start_;
-  MapIterator vacant_map_it_;
-  MapIterator map_to_evacuate_it_;
-  Map* first_map_to_evacuate_;
-
-  // Helper class for updating map pointers in HeapObjects.
-  class MapUpdatingVisitor: public ObjectVisitor {
-  public:
-    MapUpdatingVisitor() {}
-
-    void VisitPointer(Object** p) {
-      UpdateMapPointer(p);
-    }
-
-    void VisitPointers(Object** start, Object** end) {
-      for (Object** p = start; p < end; p++) UpdateMapPointer(p);
-    }
-
-  private:
-    void UpdateMapPointer(Object** p) {
-      if (!(*p)->IsHeapObject()) return;
-      HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
-
-      // Moved maps are tagged with overflowed map word.  They are the only
-      // objects those map word is overflowed as marking is already complete.
-      MapWord map_word = old_map->map_word();
-      if (!map_word.IsOverflowed()) return;
-
-      *p = GetForwardedMap(map_word);
-    }
-  };
-
-  static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
-    while (true) {
-      HeapObject* next = it->next();
-      ASSERT(next != NULL);
-      if (next == last)
-        return NULL;
-      ASSERT(!next->IsOverflowed());
-      ASSERT(!next->IsMarked());
-      ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
-      if (next->IsMap() == live)
-        return reinterpret_cast<Map*>(next);
-    }
-  }
-
-  Map* NextVacantMap() {
-    Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
-    ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
-    return map;
-  }
-
-  Map* NextMapToEvacuate() {
-    Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
-    ASSERT(map != NULL);
-    ASSERT(map->IsMap());
-    return map;
-  }
-
-  static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
-    ASSERT(FreeListNode::IsFreeListNode(vacant_map));
-    ASSERT(map_to_evacuate->IsMap());
-
-    ASSERT(Map::kSize % 4 == 0);
-
-    map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
-        vacant_map->address(), map_to_evacuate->address(), Map::kSize);
-
-    ASSERT(vacant_map->IsMap());  // Due to memcpy above.
-
-    MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
-    forwarding_map_word.SetOverflow();
-    map_to_evacuate->set_map_word(forwarding_map_word);
-
-    ASSERT(map_to_evacuate->map_word().IsOverflowed());
-    ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
-  }
-
-  static Map* GetForwardedMap(MapWord map_word) {
-    ASSERT(map_word.IsOverflowed());
-    map_word.ClearOverflow();
-    Map* new_map = map_word.ToMap();
-    ASSERT_MAP_ALIGNED(new_map->address());
-    return new_map;
-  }
-
-  static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
-    ASSERT(!obj->IsMarked());
-    Map* map = obj->map();
-    ASSERT(heap->map_space()->Contains(map));
-    MapWord map_word = map->map_word();
-    ASSERT(!map_word.IsMarked());
-    if (map_word.IsOverflowed()) {
-      Map* new_map = GetForwardedMap(map_word);
-      ASSERT(heap->map_space()->Contains(new_map));
-      obj->set_map(new_map);
-
-#ifdef DEBUG
+    if (lazy_sweeping_active) {
       if (FLAG_gc_verbose) {
-        PrintF("update %p : %p -> %p\n",
-               obj->address(),
-               reinterpret_cast<void*>(map),
-               reinterpret_cast<void*>(new_map));
+        PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
+               reinterpret_cast<intptr_t>(p));
       }
-#endif
+      space->IncreaseUnsweptFreeBytes(p);
+      continue;
     }
 
-    int size = obj->SizeFromMap(map);
-    MapUpdatingVisitor map_updating_visitor;
-    obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
-    return size;
-  }
-
-  static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
-    HeapObject* object;
-    int size;
-    for (Address current = start; current < end; current += size) {
-      object = HeapObject::FromAddress(current);
-      size = UpdateMapPointersInObject(heap, object);
-      ASSERT(size > 0);
+    switch (sweeper) {
+      case CONSERVATIVE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        SweepConservatively(space, p);
+        pages_swept++;
+        break;
+      }
+      case LAZY_CONSERVATIVE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        freed_bytes += SweepConservatively(space, p);
+        pages_swept++;
+        if (space_left + freed_bytes > newspace_size) {
+          space->SetPagesToSweep(p->next_page());
+          lazy_sweeping_active = true;
+        } else {
+          if (FLAG_gc_verbose) {
+            PrintF("Only %" V8PRIdPTR " bytes freed.  Still sweeping.\n",
+                   freed_bytes);
+          }
+        }
+        break;
+      }
+      case PRECISE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        if (space->identity() == CODE_SPACE) {
+          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
+        } else {
+          SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
+        }
+        pages_swept++;
+        break;
+      }
+      default: {
+        UNREACHABLE();
+      }
     }
   }
 
-#ifdef DEBUG
-  void CheckNoMapsToEvacuate() {
-    if (!FLAG_enable_slow_asserts)
-      return;
-
-    for (HeapObject* obj = map_to_evacuate_it_.next();
-         obj != NULL; obj = map_to_evacuate_it_.next())
-      ASSERT(FreeListNode::IsFreeListNode(obj));
+  if (FLAG_gc_verbose) {
+    PrintF("SweepSpace: %s (%d pages swept)\n",
+           AllocationSpaceName(space->identity()),
+           pages_swept);
   }
-#endif
-};
+
+  // Give pages that are queued to be freed back to the OS.
+  heap()->FreeQueuedChunks();
+}
 
 
 void MarkCompactCollector::SweepSpaces() {
   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
-
-  ASSERT(state_ == SWEEP_SPACES);
-  ASSERT(!IsCompacting());
+#ifdef DEBUG
+  state_ = SWEEP_SPACES;
+#endif
+  SweeperType how_to_sweep =
+      FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+  if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
+  if (sweep_precisely_) how_to_sweep = PRECISE;
   // Noncompacting collections simply sweep the spaces to clear the mark
   // bits and free the nonlive blocks (for old and map spaces).  We sweep
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
-  SweepSpace(heap(), heap()->old_pointer_space());
-  SweepSpace(heap(), heap()->old_data_space());
-  SweepSpace(heap(), heap()->code_space());
-  SweepSpace(heap(), heap()->cell_space());
-  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
-    SweepNewSpace(heap(), heap()->new_space());
-  }
-  SweepSpace(heap(), heap()->map_space());
+  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
+  SweepSpace(heap()->old_data_space(), how_to_sweep);
 
-  heap()->IterateDirtyRegions(heap()->map_space(),
-                             &heap()->IteratePointersInDirtyMapsRegion,
-                             &UpdatePointerToNewGen,
-                             heap()->WATERMARK_SHOULD_BE_VALID);
+  RemoveDeadInvalidatedCode();
+  SweepSpace(heap()->code_space(), PRECISE);
 
-  intptr_t live_maps_size = heap()->map_space()->Size();
-  int live_maps = static_cast<int>(live_maps_size / Map::kSize);
-  ASSERT(live_map_objects_size_ == live_maps_size);
+  SweepSpace(heap()->cell_space(), PRECISE);
 
-  if (heap()->map_space()->NeedsCompaction(live_maps)) {
-    MapCompact map_compact(heap(), live_maps);
+  EvacuateNewSpaceAndCandidates();
 
-    map_compact.CompactMaps();
-    map_compact.UpdateMapPointersInRoots();
+  // ClearNonLiveTransitions depends on precise sweeping of map space to
+  // detect whether unmarked map became dead in this collection or in one
+  // of the previous ones.
+  SweepSpace(heap()->map_space(), PRECISE);
 
-    PagedSpaces spaces;
-    for (PagedSpace* space = spaces.next();
-         space != NULL; space = spaces.next()) {
-      if (space == heap()->map_space()) continue;
-      map_compact.UpdateMapPointersInPagedSpace(space);
-    }
-    map_compact.UpdateMapPointersInNewSpace();
-    map_compact.UpdateMapPointersInLargeObjectSpace();
-
-    map_compact.Finish();
-  }
-}
-
-
-// Iterate the live objects in a range of addresses (eg, a page or a
-// semispace).  The live regions of the range have been linked into a list.
-// The first live region is [first_live_start, first_live_end), and the last
-// address in the range is top.  The callback function is used to get the
-// size of each live object.
-int MarkCompactCollector::IterateLiveObjectsInRange(
-    Address start,
-    Address end,
-    LiveObjectCallback size_func) {
-  int live_objects_size = 0;
-  Address current = start;
-  while (current < end) {
-    uint32_t encoded_map = Memory::uint32_at(current);
-    if (encoded_map == kSingleFreeEncoding) {
-      current += kPointerSize;
-    } else if (encoded_map == kMultiFreeEncoding) {
-      current += Memory::int_at(current + kIntSize);
-    } else {
-      int size = (this->*size_func)(HeapObject::FromAddress(current));
-      current += size;
-      live_objects_size += size;
-    }
-  }
-  return live_objects_size;
-}
-
-
-int MarkCompactCollector::IterateLiveObjects(
-    NewSpace* space, LiveObjectCallback size_f) {
-  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
-  return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
-}
-
-
-int MarkCompactCollector::IterateLiveObjects(
-    PagedSpace* space, LiveObjectCallback size_f) {
-  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
-  int total = 0;
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  while (it.has_next()) {
-    Page* p = it.next();
-    total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
-                                       p->AllocationTop(),
-                                       size_f);
-  }
-  return total;
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 3: Update pointers
-
-// Helper class for updating pointers in HeapObjects.
-class UpdatingVisitor: public ObjectVisitor {
- public:
-  explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
-
-  void VisitPointer(Object** p) {
-    UpdatePointer(p);
-  }
-
-  void VisitPointers(Object** start, Object** end) {
-    // Mark all HeapObject pointers in [start, end)
-    for (Object** p = start; p < end; p++) UpdatePointer(p);
-  }
-
-  void VisitCodeTarget(RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    VisitPointer(&target);
-    rinfo->set_target_address(
-        reinterpret_cast<Code*>(target)->instruction_start());
-  }
-
-  void VisitDebugTarget(RelocInfo* rinfo) {
-    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
-            rinfo->IsPatchedReturnSequence()) ||
-           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
-            rinfo->IsPatchedDebugBreakSlotSequence()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    VisitPointer(&target);
-    rinfo->set_call_address(
-        reinterpret_cast<Code*>(target)->instruction_start());
-  }
-
-  inline Heap* heap() const { return heap_; }
-
- private:
-  void UpdatePointer(Object** p) {
-    if (!(*p)->IsHeapObject()) return;
-
-    HeapObject* obj = HeapObject::cast(*p);
-    Address old_addr = obj->address();
-    Address new_addr;
-    ASSERT(!heap()->InFromSpace(obj));
-
-    if (heap()->new_space()->Contains(obj)) {
-      Address forwarding_pointer_addr =
-          heap()->new_space()->FromSpaceLow() +
-          heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
-      new_addr = Memory::Address_at(forwarding_pointer_addr);
-
-#ifdef DEBUG
-      ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
-             heap()->old_data_space()->Contains(new_addr) ||
-             heap()->new_space()->FromSpaceContains(new_addr) ||
-             heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
-
-      if (heap()->new_space()->FromSpaceContains(new_addr)) {
-        ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
-               heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
-      }
-#endif
-
-    } else if (heap()->lo_space()->Contains(obj)) {
-      // Don't move objects in the large object space.
-      return;
-
-    } else {
-#ifdef DEBUG
-      PagedSpaces spaces;
-      PagedSpace* original_space = spaces.next();
-      while (original_space != NULL) {
-        if (original_space->Contains(obj)) break;
-        original_space = spaces.next();
-      }
-      ASSERT(original_space != NULL);
-#endif
-      new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
-      ASSERT(original_space->Contains(new_addr));
-      ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
-             original_space->MCSpaceOffsetForAddress(old_addr));
-    }
-
-    *p = HeapObject::FromAddress(new_addr);
-
-#ifdef DEBUG
-    if (FLAG_gc_verbose) {
-      PrintF("update %p : %p -> %p\n",
-             reinterpret_cast<Address>(p), old_addr, new_addr);
-    }
-#endif
-  }
-
-  Heap* heap_;
-};
-
-
-void MarkCompactCollector::UpdatePointers() {
-#ifdef DEBUG
-  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
-  state_ = UPDATE_POINTERS;
-#endif
-  UpdatingVisitor updating_visitor(heap());
-  heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
-      &updating_visitor);
-  heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
-  heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
-
-  // Update the pointer to the head of the weak list of global contexts.
-  updating_visitor.VisitPointer(&heap()->global_contexts_list_);
-
-  LiveObjectList::IterateElements(&updating_visitor);
-
-  int live_maps_size = IterateLiveObjects(
-      heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_pointer_olds_size = IterateLiveObjects(
-      heap()->old_pointer_space(),
-      &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_data_olds_size = IterateLiveObjects(
-      heap()->old_data_space(),
-      &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_codes_size = IterateLiveObjects(
-      heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_cells_size = IterateLiveObjects(
-      heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_news_size = IterateLiveObjects(
-      heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
-
-  // Large objects do not move, the map word can be updated directly.
-  LargeObjectIterator it(heap()->lo_space());
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
-    UpdatePointersInNewObject(obj);
-  }
-
-  USE(live_maps_size);
-  USE(live_pointer_olds_size);
-  USE(live_data_olds_size);
-  USE(live_codes_size);
-  USE(live_cells_size);
-  USE(live_news_size);
-  ASSERT(live_maps_size == live_map_objects_size_);
-  ASSERT(live_data_olds_size == live_old_data_objects_size_);
-  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
-  ASSERT(live_codes_size == live_code_objects_size_);
-  ASSERT(live_cells_size == live_cell_objects_size_);
-  ASSERT(live_news_size == live_young_objects_size_);
-}
-
-
-int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
-  // Keep old map pointers
-  Map* old_map = obj->map();
-  ASSERT(old_map->IsHeapObject());
-
-  Address forwarded = GetForwardingAddressInOldSpace(old_map);
-
-  ASSERT(heap()->map_space()->Contains(old_map));
-  ASSERT(heap()->map_space()->Contains(forwarded));
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
-           forwarded);
-  }
-#endif
-  // Update the map pointer.
-  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
-
-  // We have to compute the object size relying on the old map because
-  // map objects are not relocated yet.
-  int obj_size = obj->SizeFromMap(old_map);
-
-  // Update pointers in the object body.
-  UpdatingVisitor updating_visitor(heap());
-  obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
-  return obj_size;
-}
-
-
-int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
-  // Decode the map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
-  // At this point, the first word of map_addr is also encoded, cannot
-  // cast it to Map* using Map::cast.
-  Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
-  int obj_size = obj->SizeFromMap(map);
-  InstanceType type = map->instance_type();
-
-  // Update map pointer.
-  Address new_map_addr = GetForwardingAddressInOldSpace(map);
-  int offset = encoding.DecodeOffset();
-  obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("update %p : %p -> %p\n", obj->address(),
-           map_addr, new_map_addr);
-  }
-#endif
-
-  // Update pointers in the object body.
-  UpdatingVisitor updating_visitor(heap());
-  obj->IterateBody(type, obj_size, &updating_visitor);
-  return obj_size;
-}
-
-
-Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
-  // Object should either in old or map space.
-  MapWord encoding = obj->map_word();
-
-  // Offset to the first live object's forwarding address.
-  int offset = encoding.DecodeOffset();
-  Address obj_addr = obj->address();
-
-  // Find the first live object's forwarding address.
-  Page* p = Page::FromAddress(obj_addr);
-  Address first_forwarded = p->mc_first_forwarded;
-
-  // Page start address of forwarded address.
-  Page* forwarded_page = Page::FromAddress(first_forwarded);
-  int forwarded_offset = forwarded_page->Offset(first_forwarded);
-
-  // Find end of allocation in the page of first_forwarded.
-  int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
-
-  // Check if current object's forward pointer is in the same page
-  // as the first live object's forwarding pointer
-  if (forwarded_offset + offset < mc_top_offset) {
-    // In the same page.
-    return first_forwarded + offset;
-  }
-
-  // Must be in the next page, NOTE: this may cross chunks.
-  Page* next_page = forwarded_page->next_page();
-  ASSERT(next_page->is_valid());
-
-  offset -= (mc_top_offset - forwarded_offset);
-  offset += Page::kObjectStartOffset;
-
-  ASSERT_PAGE_OFFSET(offset);
-  ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
-
-  return next_page->OffsetToAddress(offset);
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 4: Relocate objects
-
-void MarkCompactCollector::RelocateObjects() {
-#ifdef DEBUG
-  ASSERT(state_ == UPDATE_POINTERS);
-  state_ = RELOCATE_OBJECTS;
-#endif
-  // Relocates objects, always relocate map objects first. Relocating
-  // objects in other space relies on map objects to get object size.
-  int live_maps_size = IterateLiveObjects(
-      heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
-  int live_pointer_olds_size = IterateLiveObjects(
-      heap()->old_pointer_space(),
-      &MarkCompactCollector::RelocateOldPointerObject);
-  int live_data_olds_size = IterateLiveObjects(
-      heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
-  int live_codes_size = IterateLiveObjects(
-      heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
-  int live_cells_size = IterateLiveObjects(
-      heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
-  int live_news_size = IterateLiveObjects(
-      heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
-
-  USE(live_maps_size);
-  USE(live_pointer_olds_size);
-  USE(live_data_olds_size);
-  USE(live_codes_size);
-  USE(live_cells_size);
-  USE(live_news_size);
-  ASSERT(live_maps_size == live_map_objects_size_);
-  ASSERT(live_data_olds_size == live_old_data_objects_size_);
-  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
-  ASSERT(live_codes_size == live_code_objects_size_);
-  ASSERT(live_cells_size == live_cell_objects_size_);
-  ASSERT(live_news_size == live_young_objects_size_);
-
-  // Flip from and to spaces
-  heap()->new_space()->Flip();
-
-  heap()->new_space()->MCCommitRelocationInfo();
-
-  // Set age_mark to bottom in to space
-  Address mark = heap()->new_space()->bottom();
-  heap()->new_space()->set_age_mark(mark);
-
-  PagedSpaces spaces;
-  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
-    space->MCCommitRelocationInfo();
-
-  heap()->CheckNewSpaceExpansionCriteria();
-  heap()->IncrementYoungSurvivorsCounter(live_news_size);
-}
-
-
-int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
-  // Recover map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
-  // Get forwarding address before resetting map pointer
-  Address new_addr = GetForwardingAddressInOldSpace(obj);
-
-  // Reset map pointer.  The meta map object may not be copied yet so
-  // Map::cast does not yet work.
-  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
-
-  Address old_addr = obj->address();
-
-  if (new_addr != old_addr) {
-    // Move contents.
-    heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
-                                                   old_addr,
-                                                   Map::kSize);
-  }
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("relocate %p -> %p\n", old_addr, new_addr);
-  }
-#endif
-
-  return Map::kSize;
-}
-
-
-static inline int RestoreMap(HeapObject* obj,
-                             PagedSpace* space,
-                             Address new_addr,
-                             Address map_addr) {
-  // This must be a non-map object, and the function relies on the
-  // assumption that the Map space is compacted before the other paged
-  // spaces (see RelocateObjects).
-
-  // Reset map pointer.
-  obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
-
-  int obj_size = obj->Size();
-  ASSERT_OBJECT_SIZE(obj_size);
-
-  ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
-         space->MCSpaceOffsetForAddress(obj->address()));
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("relocate %p -> %p\n", obj->address(), new_addr);
-  }
-#endif
-
-  return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
-                                                   PagedSpace* space) {
-  // Recover map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(map_addr));
-
-  // Get forwarding address before resetting map pointer.
-  Address new_addr = GetForwardingAddressInOldSpace(obj);
-
-  // Reset the map pointer.
-  int obj_size = RestoreMap(obj, space, new_addr, map_addr);
-
-  Address old_addr = obj->address();
-
-  if (new_addr != old_addr) {
-    // Move contents.
-    if (space == heap()->old_data_space()) {
-      heap()->MoveBlock(new_addr, old_addr, obj_size);
-    } else {
-      heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
-                                                     old_addr,
-                                                     obj_size);
-    }
-  }
-
-  ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
-
-  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
-  if (copied_to->IsSharedFunctionInfo()) {
-    PROFILE(heap()->isolate(),
-            SharedFunctionInfoMoveEvent(old_addr, new_addr));
-  }
-  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
-  return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
-}
-
-
-int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap()->old_data_space());
-}
-
-
-int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap()->cell_space());
-}
-
-
-int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
-  // Recover map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
-  // Get forwarding address before resetting map pointer
-  Address new_addr = GetForwardingAddressInOldSpace(obj);
-
-  // Reset the map pointer.
-  int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
-
-  Address old_addr = obj->address();
-
-  if (new_addr != old_addr) {
-    // Move contents.
-    heap()->MoveBlock(new_addr, old_addr, obj_size);
-  }
-
-  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
-  if (copied_to->IsCode()) {
-    // May also update inline cache target.
-    Code::cast(copied_to)->Relocate(new_addr - old_addr);
-    // Notify the logger that compiled code has moved.
-    PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
-  }
-  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
-  return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
-  int obj_size = obj->Size();
-
-  // Get forwarding address
-  Address old_addr = obj->address();
-  int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
-
-  Address new_addr =
-    Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
-
-#ifdef DEBUG
-  if (heap()->new_space()->FromSpaceContains(new_addr)) {
-    ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
-           heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
-  } else {
-    ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
-           heap()->TargetSpace(obj) == heap()->old_data_space());
-  }
-#endif
-
-  // New and old addresses cannot overlap.
-  if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
-    heap()->CopyBlock(new_addr, old_addr, obj_size);
-  } else {
-    heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
-                                                   old_addr,
-                                                   obj_size);
-  }
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("relocate %p -> %p\n", old_addr, new_addr);
-  }
-#endif
-
-  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
-  if (copied_to->IsSharedFunctionInfo()) {
-    PROFILE(heap()->isolate(),
-            SharedFunctionInfoMoveEvent(old_addr, new_addr));
-  }
-  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
-  return obj_size;
+  // Deallocate unmarked objects and clear marked bits for marked objects.
+  heap_->lo_space()->FreeUnmarkedObjects();
 }
 
 
@@ -3359,6 +3976,9 @@
 }
 
 
+// TODO(1466) ReportDeleteIfNeeded is not called currently.
+// Our profiling tools do not expect intersections between
+// code objects. We should either reenable it or change our tools.
 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
                                                 Isolate* isolate) {
 #ifdef ENABLE_GDB_JIT_INTERFACE
@@ -3372,16 +3992,148 @@
 }
 
 
-int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
-  MapWord map_word = obj->map_word();
-  map_word.ClearMark();
-  return obj->SizeFromMap(map_word.ToMap());
+void MarkCompactCollector::Initialize() {
+  StaticMarkingVisitor::Initialize();
 }
 
 
-void MarkCompactCollector::Initialize() {
-  StaticPointersToNewGenUpdatingVisitor::Initialize();
-  StaticMarkingVisitor::Initialize();
+bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
+  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
+}
+
+
+bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
+                        SlotsBuffer** buffer_address,
+                        SlotType type,
+                        Address addr,
+                        AdditionMode mode) {
+  SlotsBuffer* buffer = *buffer_address;
+  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
+    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+      allocator->DeallocateChain(buffer_address);
+      return false;
+    }
+    buffer = allocator->AllocateBuffer(buffer);
+    *buffer_address = buffer;
+  }
+  ASSERT(buffer->HasSpaceForTypedSlot());
+  buffer->Add(reinterpret_cast<ObjectSlot>(type));
+  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
+  return true;
+}
+
+
+static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+  if (RelocInfo::IsCodeTarget(rmode)) {
+    return SlotsBuffer::CODE_TARGET_SLOT;
+  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
+    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
+  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
+    return SlotsBuffer::DEBUG_TARGET_SLOT;
+  } else if (RelocInfo::IsJSReturn(rmode)) {
+    return SlotsBuffer::JS_RETURN_SLOT;
+  }
+  UNREACHABLE();
+  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+}
+
+
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  if (target_page->IsEvacuationCandidate() &&
+      (rinfo->host() == NULL ||
+       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            target_page->slots_buffer_address(),
+                            SlotTypeForRMode(rinfo->rmode()),
+                            rinfo->pc(),
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  if (target_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            target_page->slots_buffer_address(),
+                            SlotsBuffer::CODE_ENTRY_SLOT,
+                            slot,
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+static inline SlotsBuffer::SlotType DecodeSlotType(
+    SlotsBuffer::ObjectSlot slot) {
+  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
+}
+
+
+void SlotsBuffer::UpdateSlots(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      PointersUpdatingVisitor::UpdateSlot(heap, slot);
+    } else {
+      ++slot_idx;
+      ASSERT(slot_idx < idx_);
+      UpdateSlot(&v,
+                 DecodeSlotType(slot),
+                 reinterpret_cast<Address>(slots_[slot_idx]));
+    }
+  }
+}
+
+
+void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
+        PointersUpdatingVisitor::UpdateSlot(heap, slot);
+      }
+    } else {
+      ++slot_idx;
+      ASSERT(slot_idx < idx_);
+      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
+      if (!IsOnInvalidatedCodeObject(pc)) {
+        UpdateSlot(&v,
+                   DecodeSlotType(slot),
+                   reinterpret_cast<Address>(slots_[slot_idx]));
+      }
+    }
+  }
+}
+
+
+SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
+  return new SlotsBuffer(next_buffer);
+}
+
+
+void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
+  delete buffer;
+}
+
+
+void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
+  SlotsBuffer* buffer = *buffer_address;
+  while (buffer != NULL) {
+    SlotsBuffer* next_buffer = buffer->next();
+    DeallocateBuffer(buffer);
+    buffer = next_buffer;
+  }
+  *buffer_address = NULL;
 }
 
 
diff --git a/src/mark-compact.h b/src/mark-compact.h
index f72c813..66ffd19 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_MARK_COMPACT_H_
 #define V8_MARK_COMPACT_H_
 
+#include "compiler-intrinsics.h"
 #include "spaces.h"
 
 namespace v8 {
@@ -45,54 +46,340 @@
 class RootMarkingVisitor;
 
 
-// ----------------------------------------------------------------------------
-// Marking stack for tracing live objects.
-
-class MarkingStack {
+class Marking {
  public:
-  MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { }
+  explicit Marking(Heap* heap)
+      : heap_(heap) {
+  }
+
+  static inline MarkBit MarkBitFrom(Address addr);
+
+  static inline MarkBit MarkBitFrom(HeapObject* obj) {
+    return MarkBitFrom(reinterpret_cast<Address>(obj));
+  }
+
+  // Impossible markbits: 01
+  static const char* kImpossibleBitPattern;
+  static inline bool IsImpossible(MarkBit mark_bit) {
+    return !mark_bit.Get() && mark_bit.Next().Get();
+  }
+
+  // Black markbits: 10 - this is required by the sweeper.
+  static const char* kBlackBitPattern;
+  static inline bool IsBlack(MarkBit mark_bit) {
+    return mark_bit.Get() && !mark_bit.Next().Get();
+  }
+
+  // White markbits: 00 - this is required by the mark bit clearer.
+  static const char* kWhiteBitPattern;
+  static inline bool IsWhite(MarkBit mark_bit) {
+    return !mark_bit.Get();
+  }
+
+  // Grey markbits: 11
+  static const char* kGreyBitPattern;
+  static inline bool IsGrey(MarkBit mark_bit) {
+    return mark_bit.Get() && mark_bit.Next().Get();
+  }
+
+  static inline void MarkBlack(MarkBit mark_bit) {
+    mark_bit.Set();
+    mark_bit.Next().Clear();
+  }
+
+  static inline void BlackToGrey(MarkBit markbit) {
+    markbit.Next().Set();
+  }
+
+  static inline void WhiteToGrey(MarkBit markbit) {
+    markbit.Set();
+    markbit.Next().Set();
+  }
+
+  static inline void GreyToBlack(MarkBit markbit) {
+    markbit.Next().Clear();
+  }
+
+  static inline void BlackToGrey(HeapObject* obj) {
+    BlackToGrey(MarkBitFrom(obj));
+  }
+
+  static inline void AnyToGrey(MarkBit markbit) {
+    markbit.Set();
+    markbit.Next().Set();
+  }
+
+  // Returns true if the the object whose mark is transferred is marked black.
+  bool TransferMark(Address old_start, Address new_start);
+
+#ifdef DEBUG
+  enum ObjectColor {
+    BLACK_OBJECT,
+    WHITE_OBJECT,
+    GREY_OBJECT,
+    IMPOSSIBLE_COLOR
+  };
+
+  static const char* ColorName(ObjectColor color) {
+    switch (color) {
+      case BLACK_OBJECT: return "black";
+      case WHITE_OBJECT: return "white";
+      case GREY_OBJECT: return "grey";
+      case IMPOSSIBLE_COLOR: return "impossible";
+    }
+    return "error";
+  }
+
+  static ObjectColor Color(HeapObject* obj) {
+    return Color(Marking::MarkBitFrom(obj));
+  }
+
+  static ObjectColor Color(MarkBit mark_bit) {
+    if (IsBlack(mark_bit)) return BLACK_OBJECT;
+    if (IsWhite(mark_bit)) return WHITE_OBJECT;
+    if (IsGrey(mark_bit)) return GREY_OBJECT;
+    UNREACHABLE();
+    return IMPOSSIBLE_COLOR;
+  }
+#endif
+
+  // Returns true if the transferred color is black.
+  INLINE(static bool TransferColor(HeapObject* from,
+                                   HeapObject* to)) {
+    MarkBit from_mark_bit = MarkBitFrom(from);
+    MarkBit to_mark_bit = MarkBitFrom(to);
+    bool is_black = false;
+    if (from_mark_bit.Get()) {
+      to_mark_bit.Set();
+      is_black = true;  // Looks black so far.
+    }
+    if (from_mark_bit.Next().Get()) {
+      to_mark_bit.Next().Set();
+      is_black = false;  // Was actually gray.
+    }
+    return is_black;
+  }
+
+ private:
+  Heap* heap_;
+};
+
+// ----------------------------------------------------------------------------
+// Marking deque for tracing live objects.
+
+class MarkingDeque {
+ public:
+  MarkingDeque()
+      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
 
   void Initialize(Address low, Address high) {
-    top_ = low_ = reinterpret_cast<HeapObject**>(low);
-    high_ = reinterpret_cast<HeapObject**>(high);
+    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
+    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
+    array_ = obj_low;
+    mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
+    top_ = bottom_ = 0;
     overflowed_ = false;
   }
 
-  bool is_full() const { return top_ >= high_; }
+  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
 
-  bool is_empty() const { return top_ <= low_; }
+  inline bool IsEmpty() { return top_ == bottom_; }
 
   bool overflowed() const { return overflowed_; }
 
-  void clear_overflowed() { overflowed_ = false; }
+  void ClearOverflowed() { overflowed_ = false; }
+
+  void SetOverflowed() { overflowed_ = true; }
 
   // Push the (marked) object on the marking stack if there is room,
   // otherwise mark the object as overflowed and wait for a rescan of the
   // heap.
-  void Push(HeapObject* object) {
-    CHECK(object->IsHeapObject());
-    if (is_full()) {
-      object->SetOverflow();
-      overflowed_ = true;
+  inline void PushBlack(HeapObject* object) {
+    ASSERT(object->IsHeapObject());
+    if (IsFull()) {
+      Marking::BlackToGrey(object);
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
+      SetOverflowed();
     } else {
-      *(top_++) = object;
+      array_[top_] = object;
+      top_ = ((top_ + 1) & mask_);
     }
   }
 
-  HeapObject* Pop() {
-    ASSERT(!is_empty());
-    HeapObject* object = *(--top_);
-    CHECK(object->IsHeapObject());
+  inline void PushGrey(HeapObject* object) {
+    ASSERT(object->IsHeapObject());
+    if (IsFull()) {
+      SetOverflowed();
+    } else {
+      array_[top_] = object;
+      top_ = ((top_ + 1) & mask_);
+    }
+  }
+
+  inline HeapObject* Pop() {
+    ASSERT(!IsEmpty());
+    top_ = ((top_ - 1) & mask_);
+    HeapObject* object = array_[top_];
+    ASSERT(object->IsHeapObject());
     return object;
   }
 
+  inline void UnshiftGrey(HeapObject* object) {
+    ASSERT(object->IsHeapObject());
+    if (IsFull()) {
+      SetOverflowed();
+    } else {
+      bottom_ = ((bottom_ - 1) & mask_);
+      array_[bottom_] = object;
+    }
+  }
+
+  HeapObject** array() { return array_; }
+  int bottom() { return bottom_; }
+  int top() { return top_; }
+  int mask() { return mask_; }
+  void set_top(int top) { top_ = top; }
+
  private:
-  HeapObject** low_;
-  HeapObject** top_;
-  HeapObject** high_;
+  HeapObject** array_;
+  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
+  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
+  // (mod mask + 1).
+  int top_;
+  int bottom_;
+  int mask_;
   bool overflowed_;
 
-  DISALLOW_COPY_AND_ASSIGN(MarkingStack);
+  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
+};
+
+
+class SlotsBufferAllocator {
+ public:
+  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
+  void DeallocateBuffer(SlotsBuffer* buffer);
+
+  void DeallocateChain(SlotsBuffer** buffer_address);
+};
+
+
+// SlotsBuffer records a sequence of slots that has to be updated
+// after live objects were relocated from evacuation candidates.
+// All slots are either untyped or typed:
+//    - Untyped slots are expected to contain a tagged object pointer.
+//      They are recorded by an address.
+//    - Typed slots are expected to contain an encoded pointer to a heap
+//      object where the way of encoding depends on the type of the slot.
+//      They are recorded as a pair (SlotType, slot address).
+// We assume that zero-page is never mapped this allows us to distinguish
+// untyped slots from typed slots during iteration by a simple comparison:
+// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
+// is the first element of typed slot's pair.
+class SlotsBuffer {
+ public:
+  typedef Object** ObjectSlot;
+
+  explicit SlotsBuffer(SlotsBuffer* next_buffer)
+      : idx_(0), chain_length_(1), next_(next_buffer) {
+    if (next_ != NULL) {
+      chain_length_ = next_->chain_length_ + 1;
+    }
+  }
+
+  ~SlotsBuffer() {
+  }
+
+  void Add(ObjectSlot slot) {
+    ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
+    slots_[idx_++] = slot;
+  }
+
+  enum SlotType {
+    EMBEDDED_OBJECT_SLOT,
+    RELOCATED_CODE_OBJECT,
+    CODE_TARGET_SLOT,
+    CODE_ENTRY_SLOT,
+    DEBUG_TARGET_SLOT,
+    JS_RETURN_SLOT,
+    NUMBER_OF_SLOT_TYPES
+  };
+
+  void UpdateSlots(Heap* heap);
+
+  void UpdateSlotsWithFilter(Heap* heap);
+
+  SlotsBuffer* next() { return next_; }
+
+  static int SizeOfChain(SlotsBuffer* buffer) {
+    if (buffer == NULL) return 0;
+    return static_cast<int>(buffer->idx_ +
+                            (buffer->chain_length_ - 1) * kNumberOfElements);
+  }
+
+  inline bool IsFull() {
+    return idx_ == kNumberOfElements;
+  }
+
+  inline bool HasSpaceForTypedSlot() {
+    return idx_ < kNumberOfElements - 1;
+  }
+
+  static void UpdateSlotsRecordedIn(Heap* heap,
+                                    SlotsBuffer* buffer,
+                                    bool code_slots_filtering_required) {
+    while (buffer != NULL) {
+      if (code_slots_filtering_required) {
+        buffer->UpdateSlotsWithFilter(heap);
+      } else {
+        buffer->UpdateSlots(heap);
+      }
+      buffer = buffer->next();
+    }
+  }
+
+  enum AdditionMode {
+    FAIL_ON_OVERFLOW,
+    IGNORE_OVERFLOW
+  };
+
+  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
+    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
+  }
+
+  static bool AddTo(SlotsBufferAllocator* allocator,
+                    SlotsBuffer** buffer_address,
+                    ObjectSlot slot,
+                    AdditionMode mode) {
+    SlotsBuffer* buffer = *buffer_address;
+    if (buffer == NULL || buffer->IsFull()) {
+      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+        allocator->DeallocateChain(buffer_address);
+        return false;
+      }
+      buffer = allocator->AllocateBuffer(buffer);
+      *buffer_address = buffer;
+    }
+    buffer->Add(slot);
+    return true;
+  }
+
+  static bool IsTypedSlot(ObjectSlot slot);
+
+  static bool AddTo(SlotsBufferAllocator* allocator,
+                    SlotsBuffer** buffer_address,
+                    SlotType type,
+                    Address addr,
+                    AdditionMode mode);
+
+  static const int kNumberOfElements = 1021;
+
+ private:
+  static const int kChainLengthThreshold = 15;
+
+  intptr_t idx_;
+  intptr_t chain_length_;
+  SlotsBuffer* next_;
+  ObjectSlot slots_[kNumberOfElements];
 };
 
 
@@ -102,9 +389,6 @@
 
 // -------------------------------------------------------------------------
 // Mark-Compact collector
-
-class OverflowedObjectsScanner;
-
 class MarkCompactCollector {
  public:
   // Type of functions to compute forwarding addresses of objects in
@@ -123,7 +407,7 @@
   // object from the forwarding address of the previous live object in the
   // page as input, and is updated to contain the offset to be used for the
   // next live object in the same page.  For spaces using a different
-  // encoding (ie, contiguous spaces), the offset parameter is ignored.
+  // encoding (i.e., contiguous spaces), the offset parameter is ignored.
   typedef void (*EncodingFunction)(Heap* heap,
                                    HeapObject* old_object,
                                    int object_size,
@@ -136,15 +420,15 @@
   // Pointer to member function, used in IterateLiveObjects.
   typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
 
-  // Set the global force_compaction flag, it must be called before Prepare
-  // to take effect.
-  void SetForceCompaction(bool value) {
-    force_compaction_ = value;
-  }
-
+  // Set the global flags, it must be called before Prepare to take effect.
+  inline void SetFlags(int flags);
 
   static void Initialize();
 
+  void CollectEvacuationCandidates(PagedSpace* space);
+
+  void AddEvacuationCandidate(Page* p);
+
   // Prepares for GC by resetting relocation info in old and map spaces and
   // choosing spaces to compact.
   void Prepare(GCTracer* tracer);
@@ -152,23 +436,14 @@
   // Performs a global garbage collection.
   void CollectGarbage();
 
-  // True if the last full GC performed heap compaction.
-  bool HasCompacted() { return compacting_collection_; }
+  enum CompactionMode {
+    INCREMENTAL_COMPACTION,
+    NON_INCREMENTAL_COMPACTION
+  };
 
-  // True after the Prepare phase if the compaction is taking place.
-  bool IsCompacting() {
-#ifdef DEBUG
-    // For the purposes of asserts we don't want this to keep returning true
-    // after the collection is completed.
-    return state_ != IDLE && compacting_collection_;
-#else
-    return compacting_collection_;
-#endif
-  }
+  bool StartCompaction(CompactionMode mode);
 
-  // The count of the number of objects left marked at the end of the last
-  // completed full GC (expected to be zero).
-  int previous_marked_count() { return previous_marked_count_; }
+  void AbortCompaction();
 
   // During a full GC, there is a stack-allocated GCTracer that is used for
   // bookkeeping information.  Return a pointer to that tracer.
@@ -183,29 +458,101 @@
   // Determine type of object and emit deletion log event.
   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
 
-  // Returns size of a possibly marked object.
-  static int SizeOfMarkedObject(HeapObject* obj);
-
   // Distinguishable invalid map encodings (for single word and multiple words)
   // that indicate free regions.
   static const uint32_t kSingleFreeEncoding = 0;
   static const uint32_t kMultiFreeEncoding = 1;
 
+  static inline bool IsMarked(Object* obj);
+
   inline Heap* heap() const { return heap_; }
 
   CodeFlusher* code_flusher() { return code_flusher_; }
   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
   void EnableCodeFlushing(bool enable);
 
+  enum SweeperType {
+    CONSERVATIVE,
+    LAZY_CONSERVATIVE,
+    PRECISE
+  };
+
+#ifdef DEBUG
+  void VerifyMarkbitsAreClean();
+  static void VerifyMarkbitsAreClean(PagedSpace* space);
+  static void VerifyMarkbitsAreClean(NewSpace* space);
+#endif
+
+  // Sweep a single page from the given space conservatively.
+  // Return a number of reclaimed bytes.
+  static intptr_t SweepConservatively(PagedSpace* space, Page* p);
+
+  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
+    return Page::FromAddress(reinterpret_cast<Address>(anchor))->
+        ShouldSkipEvacuationSlotRecording();
+  }
+
+  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
+    return Page::FromAddress(reinterpret_cast<Address>(host))->
+        ShouldSkipEvacuationSlotRecording();
+  }
+
+  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
+    return Page::FromAddress(reinterpret_cast<Address>(obj))->
+        IsEvacuationCandidate();
+  }
+
+  void EvictEvacuationCandidate(Page* page) {
+    if (FLAG_trace_fragmentation) {
+      PrintF("Page %p is too popular. Disabling evacuation.\n",
+             reinterpret_cast<void*>(page));
+    }
+
+    // TODO(gc) If all evacuation candidates are too popular we
+    // should stop slots recording entirely.
+    page->ClearEvacuationCandidate();
+
+    // We were not collecting slots on this page that point
+    // to other evacuation candidates thus we have to
+    // rescan the page after evacuation to discover and update all
+    // pointers to evacuated objects.
+    if (page->owner()->identity() == OLD_DATA_SPACE) {
+      evacuation_candidates_.RemoveElement(page);
+    } else {
+      page->SetFlag(Page::RESCAN_ON_EVACUATION);
+    }
+  }
+
+  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
+  void RecordCodeEntrySlot(Address slot, Code* target);
+
+  INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
+
+  void MigrateObject(Address dst,
+                     Address src,
+                     int size,
+                     AllocationSpace to_old_space);
+
+  bool TryPromoteObject(HeapObject* object, int object_size);
+
   inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
   inline void set_encountered_weak_maps(Object* weak_map) {
     encountered_weak_maps_ = weak_map;
   }
 
+  void InvalidateCode(Code* code);
+
+  void ClearMarkbits();
+
  private:
   MarkCompactCollector();
   ~MarkCompactCollector();
 
+  bool MarkInvalidatedCode();
+  void RemoveDeadInvalidatedCode();
+  void ProcessInvalidatedCode(ObjectVisitor* visitor);
+
+
 #ifdef DEBUG
   enum CollectorState {
     IDLE,
@@ -221,23 +568,32 @@
   CollectorState state_;
 #endif
 
-  // Global flag that forces a compaction.
-  bool force_compaction_;
+  // Global flag that forces sweeping to be precise, so we can traverse the
+  // heap.
+  bool sweep_precisely_;
 
-  // Global flag indicating whether spaces were compacted on the last GC.
-  bool compacting_collection_;
+  bool reduce_memory_footprint_;
 
-  // Global flag indicating whether spaces will be compacted on the next GC.
-  bool compact_on_next_gc_;
+  bool abort_incremental_marking_;
 
-  // The number of objects left marked at the end of the last completed full
-  // GC (expected to be zero).
-  int previous_marked_count_;
+  // True if we are collecting slots to perform evacuation from evacuation
+  // candidates.
+  bool compacting_;
+
+  bool was_marked_incrementally_;
+
+  bool collect_maps_;
+
+  bool flush_monomorphic_ics_;
 
   // A pointer to the current stack-allocated GC tracer object during a full
   // collection (NULL before and after).
   GCTracer* tracer_;
 
+  SlotsBufferAllocator slots_buffer_allocator_;
+
+  SlotsBuffer* migration_slots_buffer_;
+
   // Finishes GC, performs heap verification if enabled.
   void Finish();
 
@@ -270,13 +626,19 @@
   // Marking operations for objects reachable from roots.
   void MarkLiveObjects();
 
-  void MarkUnmarkedObject(HeapObject* obj);
+  void AfterMarking();
 
-  inline void MarkObject(HeapObject* obj) {
-    if (!obj->IsMarked()) MarkUnmarkedObject(obj);
-  }
+  // Marks the object black and pushes it on the marking stack.
+  // This is for non-incremental marking.
+  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
 
-  inline void SetMark(HeapObject* obj);
+  INLINE(bool MarkObjectWithoutPush(HeapObject* object));
+  INLINE(void MarkObjectAndPush(HeapObject* value));
+
+  // Marks the object black.  This is for non-incremental marking.
+  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
+
+  void ProcessNewlyMarkedObject(HeapObject* obj);
 
   // Creates back pointers for all map transitions, stores them in
   // the prototype field.  The original prototype pointers are restored
@@ -287,6 +649,7 @@
 
   // Mark a Map and its DescriptorArray together, skipping transitions.
   void MarkMapContents(Map* map);
+  void MarkAccessorPairSlot(HeapObject* accessors, int offset);
   void MarkDescriptorArray(DescriptorArray* descriptors);
 
   // Mark the heap roots and all objects reachable from them.
@@ -310,18 +673,18 @@
 
   // Mark objects reachable (transitively) from objects in the marking stack
   // or overflowed in the heap.
-  void ProcessMarkingStack();
+  void ProcessMarkingDeque();
 
   // Mark objects reachable (transitively) from objects in the marking
   // stack.  This function empties the marking stack, but may leave
   // overflowed objects in the heap, in which case the marking stack's
   // overflow flag will be set.
-  void EmptyMarkingStack();
+  void EmptyMarkingDeque();
 
   // Refill the marking stack with overflowed objects from the heap.  This
   // function either leaves the marking stack full or clears the overflow
   // flag on the marking stack.
-  void RefillMarkingStack();
+  void RefillMarkingDeque();
 
   // After reachable maps have been marked process per context object
   // literal map caches removing unmarked entries.
@@ -331,20 +694,17 @@
   // heap object.
   static bool IsUnmarkedHeapObject(Object** p);
 
-#ifdef DEBUG
-  void UpdateLiveObjectCount(HeapObject* obj);
-#endif
-
-  // We sweep the large object space in the same way whether we are
-  // compacting or not, because the large object space is never compacted.
-  void SweepLargeObjectSpace();
-
-  // Test whether a (possibly marked) object is a Map.
-  static inline bool SafeIsMap(HeapObject* object);
-
   // Map transitions from a live map to a dead map must be killed.
   // We replace them with a null descriptor, with the same key.
   void ClearNonLiveTransitions();
+  void ClearNonLivePrototypeTransitions(Map* map);
+  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
+
+  // Marking detaches initial maps from SharedFunctionInfo objects
+  // to make this reference weak. We need to reattach initial maps
+  // back after collection. This is either done during
+  // ClearNonLiveTransitions pass or by calling this function.
+  void ReattachInitialMaps();
 
   // Mark all values associated with reachable keys in weak maps encountered
   // so far.  This might push new object or even new weak maps onto the
@@ -358,164 +718,31 @@
 
   // -----------------------------------------------------------------------
   // Phase 2: Sweeping to clear mark bits and free non-live objects for
-  // a non-compacting collection, or else computing and encoding
-  // forwarding addresses for a compacting collection.
+  // a non-compacting collection.
   //
   //  Before: Live objects are marked and non-live objects are unmarked.
   //
-  //   After: (Non-compacting collection.)  Live objects are unmarked,
-  //          non-live regions have been added to their space's free
-  //          list.
+  //   After: Live objects are unmarked, non-live regions have been added to
+  //          their space's free list. Active eden semispace is compacted by
+  //          evacuation.
   //
-  //   After: (Compacting collection.)  The forwarding address of live
-  //          objects in the paged spaces is encoded in their map word
-  //          along with their (non-forwarded) map pointer.
-  //
-  //          The forwarding address of live objects in the new space is
-  //          written to their map word's offset in the inactive
-  //          semispace.
-  //
-  //          Bookkeeping data is written to the page header of
-  //          eached paged-space page that contains live objects after
-  //          compaction:
-  //
-  //          The allocation watermark field is used to track the
-  //          relocation top address, the address of the first word
-  //          after the end of the last live object in the page after
-  //          compaction.
-  //
-  //          The Page::mc_page_index field contains the zero-based index of the
-  //          page in its space.  This word is only used for map space pages, in
-  //          order to encode the map addresses in 21 bits to free 11
-  //          bits per map word for the forwarding address.
-  //
-  //          The Page::mc_first_forwarded field contains the (nonencoded)
-  //          forwarding address of the first live object in the page.
-  //
-  //          In both the new space and the paged spaces, a linked list
-  //          of live regions is constructructed (linked through
-  //          pointers in the non-live region immediately following each
-  //          live region) to speed further passes of the collector.
-
-  // Encodes forwarding addresses of objects in compactable parts of the
-  // heap.
-  void EncodeForwardingAddresses();
-
-  // Encodes the forwarding addresses of objects in new space.
-  void EncodeForwardingAddressesInNewSpace();
-
-  // Function template to encode the forwarding addresses of objects in
-  // paged spaces, parameterized by allocation and non-live processing
-  // functions.
-  template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
-  void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
-
-  // Iterates live objects in a space, passes live objects
-  // to a callback function which returns the heap size of the object.
-  // Returns the number of live objects iterated.
-  int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
-  int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
-
-  // Iterates the live objects between a range of addresses, returning the
-  // number of live objects.
-  int IterateLiveObjectsInRange(Address start, Address end,
-                                LiveObjectCallback size_func);
 
   // If we are not compacting the heap, we simply sweep the spaces except
   // for the large object space, clearing mark bits and adding unmarked
   // regions to each space's free list.
   void SweepSpaces();
 
-  // -----------------------------------------------------------------------
-  // Phase 3: Updating pointers in live objects.
-  //
-  //  Before: Same as after phase 2 (compacting collection).
-  //
-  //   After: All pointers in live objects, including encoded map
-  //          pointers, are updated to point to their target's new
-  //          location.
+  void EvacuateNewSpace();
 
-  friend class UpdatingVisitor;  // helper for updating visited objects
+  void EvacuateLiveObjectsFromPage(Page* p);
 
-  // Updates pointers in all spaces.
-  void UpdatePointers();
+  void EvacuatePages();
 
-  // Updates pointers in an object in new space.
-  // Returns the heap size of the object.
-  int UpdatePointersInNewObject(HeapObject* obj);
+  void EvacuateNewSpaceAndCandidates();
 
-  // Updates pointers in an object in old spaces.
-  // Returns the heap size of the object.
-  int UpdatePointersInOldObject(HeapObject* obj);
-
-  // Calculates the forwarding address of an object in an old space.
-  static Address GetForwardingAddressInOldSpace(HeapObject* obj);
-
-  // -----------------------------------------------------------------------
-  // Phase 4: Relocating objects.
-  //
-  //  Before: Pointers to live objects are updated to point to their
-  //          target's new location.
-  //
-  //   After: Objects have been moved to their new addresses.
-
-  // Relocates objects in all spaces.
-  void RelocateObjects();
-
-  // Converts a code object's inline target to addresses, convention from
-  // address to target happens in the marking phase.
-  int ConvertCodeICTargetToAddress(HeapObject* obj);
-
-  // Relocate a map object.
-  int RelocateMapObject(HeapObject* obj);
-
-  // Relocates an old object.
-  int RelocateOldPointerObject(HeapObject* obj);
-  int RelocateOldDataObject(HeapObject* obj);
-
-  // Relocate a property cell object.
-  int RelocateCellObject(HeapObject* obj);
-
-  // Helper function.
-  inline int RelocateOldNonCodeObject(HeapObject* obj,
-                                      PagedSpace* space);
-
-  // Relocates an object in the code space.
-  int RelocateCodeObject(HeapObject* obj);
-
-  // Copy a new object.
-  int RelocateNewObject(HeapObject* obj);
+  void SweepSpace(PagedSpace* space, SweeperType sweeper);
 
 #ifdef DEBUG
-  // -----------------------------------------------------------------------
-  // Debugging variables, functions and classes
-  // Counters used for debugging the marking phase of mark-compact or
-  // mark-sweep collection.
-
-  // Size of live objects in Heap::to_space_.
-  int live_young_objects_size_;
-
-  // Size of live objects in Heap::old_pointer_space_.
-  int live_old_pointer_objects_size_;
-
-  // Size of live objects in Heap::old_data_space_.
-  int live_old_data_objects_size_;
-
-  // Size of live objects in Heap::code_space_.
-  int live_code_objects_size_;
-
-  // Size of live objects in Heap::map_space_.
-  int live_map_objects_size_;
-
-  // Size of live objects in Heap::cell_space_.
-  int live_cell_objects_size_;
-
-  // Size of live objects in Heap::lo_space_.
-  int live_lo_objects_size_;
-
-  // Number of live bytes in this collection.
-  int live_bytes_;
-
   friend class MarkObjectVisitor;
   static void VisitObject(HeapObject* obj);
 
@@ -524,15 +751,19 @@
 #endif
 
   Heap* heap_;
-  MarkingStack marking_stack_;
+  MarkingDeque marking_deque_;
   CodeFlusher* code_flusher_;
   Object* encountered_weak_maps_;
 
+  List<Page*> evacuation_candidates_;
+  List<Code*> invalidated_code_;
+
   friend class Heap;
-  friend class OverflowedObjectsScanner;
 };
 
 
+const char* AllocationSpaceName(AllocationSpace space);
+
 } }  // namespace v8::internal
 
 #endif  // V8_MARK_COMPACT_H_
diff --git a/src/math.js b/src/math.js
index b5a6d18..8e735c4 100644
--- a/src/math.js
+++ b/src/math.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,15 +29,15 @@
 // Keep reference to original values of some global properties.  This
 // has the added benefit that the code in this file is isolated from
 // changes to these properties.
-const $floor = MathFloor;
-const $random = MathRandom;
-const $abs = MathAbs;
+var $floor = MathFloor;
+var $random = MathRandom;
+var $abs = MathAbs;
 
 // Instance class name can only be set on functions. That is the only
 // purpose for MathConstructor.
 function MathConstructor() {}
 %FunctionSetInstanceClassName(MathConstructor, 'Math');
-const $Math = new MathConstructor();
+var $Math = new MathConstructor();
 $Math.__proto__ = $Object.prototype;
 %SetProperty(global, "Math", $Math, DONT_ENUM);
 
@@ -119,6 +119,19 @@
 // ECMA 262 - 15.8.2.11
 function MathMax(arg1, arg2) {  // length == 2
   var length = %_ArgumentsLength();
+  if (length == 2) {
+    if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
+    if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
+    if (arg2 > arg1) return arg2;
+    if (arg1 > arg2) return arg1;
+    if (arg1 == arg2) {
+      // Make sure -0 is considered less than +0.  -0 is never a Smi, +0 can be
+      // a Smi or a heap number.
+      return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg2 : arg1;
+    }
+    // All comparisons failed, one of the arguments must be NaN.
+    return 0/0;  // Compiler constant-folds this to NaN.
+  }
   if (length == 0) {
     return -1/0;  // Compiler constant-folds this to -Infinity.
   }
@@ -131,7 +144,7 @@
     if (NUMBER_IS_NAN(n)) return n;
     // Make sure +0 is considered greater than -0.  -0 is never a Smi, +0 can be
     // a Smi or heap number.
-    if (n > r || (r === 0 && n === 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
+    if (n > r || (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
   }
   return r;
 }
@@ -139,6 +152,19 @@
 // ECMA 262 - 15.8.2.12
 function MathMin(arg1, arg2) {  // length == 2
   var length = %_ArgumentsLength();
+  if (length == 2) {
+    if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
+    if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
+    if (arg2 > arg1) return arg1;
+    if (arg1 > arg2) return arg2;
+    if (arg1 == arg2) {
+      // Make sure -0 is considered less than +0.  -0 is never a Smi, +0 can be
+      // a Smi or a heap number.
+      return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg1 : arg2;
+    }
+    // All comparisons failed, one of the arguments must be NaN.
+    return 0/0;  // Compiler constant-folds this to NaN.
+  }
   if (length == 0) {
     return 1/0;  // Compiler constant-folds this to Infinity.
   }
@@ -149,9 +175,9 @@
     var n = %_Arguments(i);
     if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
     if (NUMBER_IS_NAN(n)) return n;
-    // Make sure -0 is considered less than +0.  -0 is never a Smi, +0 can b a
+    // Make sure -0 is considered less than +0.  -0 is never a Smi, +0 can be a
     // Smi or a heap number.
-    if (n < r || (r === 0 && n === 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
+    if (n < r || (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
   }
   return r;
 }
@@ -189,7 +215,7 @@
 // ECMA 262 - 15.8.2.18
 function MathTan(x) {
   if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  return %Math_tan(x);
+  return %_MathTan(x);
 }
 
 
@@ -239,7 +265,7 @@
 
   // Set up non-enumerable functions of the Math object and
   // set their names.
-  InstallFunctionsOnHiddenPrototype($Math, DONT_ENUM, $Array(
+  InstallFunctions($Math, DONT_ENUM, $Array(
     "random", MathRandom,
     "abs", MathAbs,
     "acos", MathAcos,
diff --git a/src/messages.cc b/src/messages.cc
index b6ad5ac..a0793c2 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -1,5 +1,4 @@
-
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -81,11 +80,11 @@
   }
 
   Handle<Object> stack_trace_handle = stack_trace.is_null()
-      ? FACTORY->undefined_value()
+      ? Handle<Object>::cast(FACTORY->undefined_value())
       : Handle<Object>::cast(stack_trace);
 
   Handle<Object> stack_frames_handle = stack_frames.is_null()
-      ? FACTORY->undefined_value()
+      ? Handle<Object>::cast(FACTORY->undefined_value())
       : Handle<Object>::cast(stack_frames);
 
   Handle<JSMessageObject> message =
@@ -127,7 +126,7 @@
       v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
       Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
       v8::MessageCallback callback =
-          FUNCTION_CAST<v8::MessageCallback>(callback_obj->address());
+          FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
       Handle<Object> callback_data(listener.get(1));
       {
         // Do not allow exceptions to propagate.
@@ -149,12 +148,15 @@
           JSFunction::cast(
               Isolate::Current()->js_builtins_object()->
               GetPropertyNoExceptionThrown(*fmt_str)));
-  Object** argv[1] = { data.location() };
+  Handle<Object> argv[] = { data };
 
   bool caught_exception;
   Handle<Object> result =
       Execution::TryCall(fun,
-          Isolate::Current()->js_builtins_object(), 1, argv, &caught_exception);
+                         Isolate::Current()->js_builtins_object(),
+                         ARRAY_SIZE(argv),
+                         argv,
+                         &caught_exception);
 
   if (caught_exception || !result->IsString()) {
     return FACTORY->LookupAsciiSymbol("<error>");
diff --git a/src/messages.js b/src/messages.js
index a9993af..a3adcf8 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,17 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
 // -------------------------------------------------------------------
 //
 // If this object gets passed to an error constructor the error will
 // get an accessor for .message that constructs a descriptive error
 // message on access.
-const kAddMessageAccessorsMarker = { };
+var kAddMessageAccessorsMarker = { };
 
 // This will be lazily initialized when first needed (and forcibly
 // overwritten even though it's const).
-const kMessages = 0;
+var kMessages = 0;
 
 function FormatString(format, message) {
   var args = %MessageGetArguments(message);
@@ -83,7 +82,7 @@
 // objects between script tags in a browser setting.
 function ToStringCheckErrorObject(obj) {
   if (IsNativeErrorObject(obj)) {
-    return %_CallFunction(obj, errorToString);
+    return %_CallFunction(obj, ErrorToString);
   } else {
     return ToString(obj);
   }
@@ -185,18 +184,20 @@
       "define_disallowed",            ["Cannot define property:", "%0", ", object is not extensible."],
       "non_extensible_proto",         ["%0", " is not extensible"],
       "handler_non_object",           ["Proxy.", "%0", " called with non-object as handler"],
-      "trap_function_expected",       ["Proxy.", "%0", " called with non-function for ", "%1", " trap"],
+      "proto_non_object",             ["Proxy.", "%0", " called with non-object as prototype"],
+      "trap_function_expected",       ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
       "handler_trap_missing",         ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
       "handler_trap_must_be_callable", ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
-      "handler_returned_false",       ["Proxy handler ", "%0", " returned false for '", "%1", "' trap"],
-      "handler_returned_undefined",   ["Proxy handler ", "%0", " returned undefined for '", "%1", "' trap"],
-      "proxy_prop_not_configurable",  ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"],
-      "proxy_non_object_prop_names",  ["Trap ", "%1", " returned non-object ", "%0"],
-      "proxy_repeated_prop_name",     ["Trap ", "%1", " returned repeated property name ", "%2"],
+      "handler_returned_false",       ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
+      "handler_returned_undefined",   ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
+      "proxy_prop_not_configurable",  ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
+      "proxy_non_object_prop_names",  ["Trap '", "%1", "' returned non-object ", "%0"],
+      "proxy_repeated_prop_name",     ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
       "invalid_weakmap_key",          ["Invalid value used as weak map key"],
       // RangeError
       "invalid_array_length",         ["Invalid array length"],
       "stack_overflow",               ["Maximum call stack size exceeded"],
+      "invalid_time_value",           ["Invalid time value"],
       // SyntaxError
       "unable_to_parse",              ["Parse error"],
       "invalid_regexp_flags",         ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
@@ -204,11 +205,12 @@
       "illegal_break",                ["Illegal break statement"],
       "illegal_continue",             ["Illegal continue statement"],
       "illegal_return",               ["Illegal return statement"],
+      "illegal_let",                  ["Illegal let declaration outside extended mode"],
       "error_loading_debugger",       ["Error loading debugger"],
       "no_input_to_regexp",           ["No input to ", "%0"],
       "invalid_json",                 ["String '", "%0", "' is not valid JSON"],
       "circular_structure",           ["Converting circular structure to JSON"],
-      "obj_ctor_property_non_object", ["Object.", "%0", " called on non-object"],
+      "called_on_non_object",         ["%0", " called on non-object"],
       "called_on_null_or_undefined",  ["%0", " called on null or undefined"],
       "array_indexof_not_defined",    ["Array.getIndexOf: Argument undefined"],
       "object_not_extensible",        ["Can't add property ", "%0", ", object is not extensible"],
@@ -240,20 +242,29 @@
       "strict_poison_pill",           ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
       "strict_caller",                ["Illegal access to a strict mode caller function."],
       "unprotected_let",              ["Illegal let declaration in unprotected statement context."],
+      "unprotected_const",            ["Illegal const declaration in unprotected statement context."],
       "cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
       "redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
+      "harmony_const_assign",         ["Assignment to constant variable."],
+      "invalid_module_path",          ["Module does not export '", "%0", "', or export is not itself a module"],
+      "module_type_error",            ["Module '", "%0", "' used improperly"],
     ];
     var messages = { __proto__ : null };
-    var desc = new PropertyDescriptor();
-    desc.setConfigurable(false);
-    desc.setEnumerable(false);
-    desc.setWritable(false);
     for (var i = 0; i < messagesDictionary.length; i += 2) {
       var key = messagesDictionary[i];
       var format = messagesDictionary[i + 1];
-      ObjectFreeze(format);
-      desc.setValue(format);
-      DefineOwnProperty(messages, key, desc);
+
+      for (var j = 0; j < format.length; j++) {
+        %IgnoreAttributesAndSetProperty(format, %_NumberToString(j), format[j],
+                                        DONT_DELETE | READ_ONLY | DONT_ENUM);
+      }
+      %IgnoreAttributesAndSetProperty(format, 'length', format.length,
+                                      DONT_DELETE | READ_ONLY | DONT_ENUM);
+      %PreventExtensions(format);
+      %IgnoreAttributesAndSetProperty(messages,
+                                      key,
+                                      format,
+                                      DONT_DELETE | DONT_ENUM | READ_ONLY);
     }
     %PreventExtensions(messages);
     %IgnoreAttributesAndSetProperty(builtins, "kMessages",
@@ -386,7 +397,7 @@
   }
 
   return new SourceLocation(this, position, line, column, start, end);
-};
+}
 
 
 /**
@@ -416,7 +427,7 @@
   // resource.
   var column = opt_column || 0;
   if (line == 0) {
-    column -= this.column_offset
+    column -= this.column_offset;
   }
 
   var offset_position = opt_offset_position || 0;
@@ -431,7 +442,8 @@
       return null;
     }
 
-    return this.locationFromPosition(this.line_ends[offset_line + line - 1] + 1 + column);  // line > 0 here.
+    return this.locationFromPosition(
+        this.line_ends[offset_line + line - 1] + 1 + column);  // line > 0 here.
   }
 }
 
@@ -447,8 +459,10 @@
  *     invalid
  */
 function ScriptSourceSlice(opt_from_line, opt_to_line) {
-  var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset : opt_from_line;
-  var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount() : opt_to_line
+  var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset
+                                              : opt_from_line;
+  var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount()
+                                          : opt_to_line;
 
   // Adjust according to the offset within the resource.
   from_line -= this.line_offset;
@@ -468,8 +482,10 @@
   var to_position = to_line == 0 ? 0 : line_ends[to_line - 1] + 1;
 
   // Return a source slice with line numbers re-adjusted to the resource.
-  return new SourceSlice(this, from_line + this.line_offset, to_line + this.line_offset,
-                         from_position, to_position);
+  return new SourceSlice(this,
+                         from_line + this.line_offset,
+                         to_line + this.line_offset,
+                          from_position, to_position);
 }
 
 
@@ -502,7 +518,7 @@
 function ScriptLineCount() {
   // Return number of source lines.
   return this.line_ends.length;
-};
+}
 
 
 /**
@@ -519,6 +535,13 @@
   if (this.name) {
     return this.name;
   }
+
+  // The result is cached as on long scripts it takes noticable time to search
+  // for the sourceURL.
+  if (this.hasCachedNameOrSourceURL)
+      return this.cachedNameOrSourceURL;
+  this.hasCachedNameOrSourceURL = true;
+
   // TODO(608): the spaces in a regexp below had to be escaped as \040
   // because this file is being processed by js2c whose handling of spaces
   // in regexps is broken. Also, ['"] are excluded from allowed URLs to
@@ -527,6 +550,7 @@
   // the scanner/parser.
   var source = ToString(this.source);
   var sourceUrlPos = %StringIndexOf(source, "sourceURL=", 0);
+  this.cachedNameOrSourceURL = this.name;
   if (sourceUrlPos > 4) {
     var sourceUrlPattern =
         /\/\/@[\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
@@ -537,15 +561,17 @@
     var match =
         %_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo);
     if (match) {
-      return SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
+      this.cachedNameOrSourceURL =
+          SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
     }
   }
-  return this.name;
+  return this.cachedNameOrSourceURL;
 }
 
 
 SetUpLockedPrototype(Script,
-  $Array("source", "name", "line_ends", "line_offset", "column_offset"),
+  $Array("source", "name", "line_ends", "line_offset", "column_offset",
+         "cachedNameOrSourceURL", "hasCachedNameOrSourceURL" ),
   $Array(
     "lineFromPosition", ScriptLineFromPosition,
     "locationFromPosition", ScriptLocationFromPosition,
@@ -567,10 +593,10 @@
  *   position : position within the source
  *   start    : position of start of source context (inclusive)
  *   end      : position of end of source context (not inclusive)
- * Source text for the source context is the character interval [start, end[. In
- * most cases end will point to a newline character. It might point just past
- * the final position of the source if the last source line does not end with a
- * newline character.
+ * Source text for the source context is the character interval
+ * [start, end[. In most cases end will point to a newline character.
+ * It might point just past the final position of the source if the last
+ * source line does not end with a newline character.
  * @param {Script} script The Script object for which this is a location
  * @param {number} position Source position for the location
  * @param {number} line The line number for the location
@@ -588,7 +614,7 @@
   this.end = end;
 }
 
-const kLineLengthLimit = 78;
+var kLineLengthLimit = 78;
 
 /**
  * Restrict source location start and end positions to make the source slice
@@ -637,7 +663,7 @@
       this.end = this.start + limit;
     }
   }
-};
+}
 
 
 /**
@@ -646,8 +672,11 @@
  *     Source text for this location.
  */
 function SourceLocationSourceText() {
-  return %_CallFunction(this.script.source, this.start, this.end, StringSubstring);
-};
+  return %_CallFunction(this.script.source,
+                        this.start,
+                        this.end,
+                        StringSubstring);
+}
 
 
 SetUpLockedPrototype(SourceLocation,
@@ -655,7 +684,7 @@
   $Array(
     "restrict", SourceLocationRestrict,
     "sourceText", SourceLocationSourceText
-  )
+ )
 );
 
 
@@ -695,7 +724,7 @@
                         this.from_position,
                         this.to_position,
                         StringSubstring);
-};
+}
 
 SetUpLockedPrototype(SourceSlice,
   $Array("script", "from_line", "to_line", "from_position", "to_position"),
@@ -730,24 +759,19 @@
   // can't rely on 'this' being the same as 'obj'.
   var hasBeenSet = false;
   var value;
-  function getter() {
+  var getter = function() {
     if (hasBeenSet) {
       return value;
     }
     hasBeenSet = true;
     value = fun(obj);
     return value;
-  }
-  function setter(v) {
+  };
+  var setter = function(v) {
     hasBeenSet = true;
     value = v;
-  }
-  var desc = { get: getter,
-               set: setter,
-               enumerable: false,
-               configurable: true };
-  desc = ToPropertyDescriptor(desc);
-  DefineOwnProperty(obj, name, desc, true);
+  };
+  %DefineOrRedefineAccessorProperty(obj, name, getter, setter, DONT_ENUM);
 }
 
 function CallSite(receiver, fun, pos) {
@@ -758,7 +782,7 @@
 
 function CallSiteGetThis() {
   return this.receiver;
-};
+}
 
 function CallSiteGetTypeName() {
   var constructor = this.receiver.constructor;
@@ -770,33 +794,33 @@
     return %_CallFunction(this.receiver, ObjectToString);
   }
   return constructorName;
-};
+}
 
 function CallSiteIsToplevel() {
   if (this.receiver == null) {
     return true;
   }
   return IS_GLOBAL(this.receiver);
-};
+}
 
 function CallSiteIsEval() {
   var script = %FunctionGetScript(this.fun);
   return script && script.compilation_type == COMPILATION_TYPE_EVAL;
-};
+}
 
 function CallSiteGetEvalOrigin() {
   var script = %FunctionGetScript(this.fun);
   return FormatEvalOrigin(script);
-};
+}
 
 function CallSiteGetScriptNameOrSourceURL() {
   var script = %FunctionGetScript(this.fun);
   return script ? script.nameOrSourceURL() : null;
-};
+}
 
 function CallSiteGetFunction() {
   return this.fun;
-};
+}
 
 function CallSiteGetFunctionName() {
   // See if the function knows its own name
@@ -812,15 +836,19 @@
     return "eval";
   }
   return null;
-};
+}
 
 function CallSiteGetMethodName() {
   // See if we can find a unique property on the receiver that holds
   // this function.
   var ownName = this.fun.name;
   if (ownName && this.receiver &&
-      (%_CallFunction(this.receiver, ownName, ObjectLookupGetter) === this.fun ||
-       %_CallFunction(this.receiver, ownName, ObjectLookupSetter) === this.fun ||
+      (%_CallFunction(this.receiver,
+                      ownName,
+                      ObjectLookupGetter) === this.fun ||
+       %_CallFunction(this.receiver,
+                      ownName,
+                      ObjectLookupSetter) === this.fun ||
        this.receiver[ownName] === this.fun)) {
     // To handle DontEnum properties we guess that the method has
     // the same name as the function.
@@ -830,7 +858,8 @@
   for (var prop in this.receiver) {
     if (this.receiver.__lookupGetter__(prop) === this.fun ||
         this.receiver.__lookupSetter__(prop) === this.fun ||
-        (!this.receiver.__lookupGetter__(prop) && this.receiver[prop] === this.fun)) {
+        (!this.receiver.__lookupGetter__(prop) &&
+         this.receiver[prop] === this.fun)) {
       // If we find more than one match bail out to avoid confusion.
       if (name) {
         return null;
@@ -842,12 +871,12 @@
     return name;
   }
   return null;
-};
+}
 
 function CallSiteGetFileName() {
   var script = %FunctionGetScript(this.fun);
   return script ? script.name : null;
-};
+}
 
 function CallSiteGetLineNumber() {
   if (this.pos == -1) {
@@ -859,7 +888,7 @@
     location = script.locationFromPosition(this.pos, true);
   }
   return location ? location.line + 1 : null;
-};
+}
 
 function CallSiteGetColumnNumber() {
   if (this.pos == -1) {
@@ -871,16 +900,16 @@
     location = script.locationFromPosition(this.pos, true);
   }
   return location ? location.column + 1: null;
-};
+}
 
 function CallSiteIsNative() {
   var script = %FunctionGetScript(this.fun);
   return script ? (script.type == TYPE_NATIVE) : false;
-};
+}
 
 function CallSiteGetPosition() {
   return this.pos;
-};
+}
 
 function CallSiteIsConstructor() {
   var constructor = this.receiver ? this.receiver.constructor : null;
@@ -888,7 +917,7 @@
     return false;
   }
   return this.fun === constructor;
-};
+}
 
 SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
   "getThis", CallSiteGetThis,
@@ -931,12 +960,13 @@
       // eval script originated from "real" source.
       if (eval_from_script.name) {
         eval_origin += " (" + eval_from_script.name;
-        var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
+        var location = eval_from_script.locationFromPosition(
+            script.eval_from_script_position, true);
         if (location) {
           eval_origin += ":" + (location.line + 1);
           eval_origin += ":" + (location.column + 1);
         }
-        eval_origin += ")"
+        eval_origin += ")";
       } else {
         eval_origin += " (unknown source)";
       }
@@ -944,7 +974,7 @@
   }
 
   return eval_origin;
-};
+}
 
 function FormatSourcePosition(frame) {
   var fileName;
@@ -953,8 +983,9 @@
     fileLocation = "native";
   } else if (frame.isEval()) {
     fileName = frame.getScriptNameOrSourceURL();
-    if (!fileName)
+    if (!fileName) {
       fileLocation = frame.getEvalOrigin();
+    }
   } else {
     fileName = frame.getFileName();
   }
@@ -1057,19 +1088,19 @@
   if (stackTraceLimit < 0 || stackTraceLimit > 10000) {
     stackTraceLimit = 10000;
   }
-  var raw_stack = %CollectStackTrace(cons_opt
-                                     ? cons_opt
-                                     : captureStackTrace, stackTraceLimit);
+  var raw_stack = %CollectStackTrace(obj,
+                                     cons_opt ? cons_opt : captureStackTrace,
+                                     stackTraceLimit);
   DefineOneShotAccessor(obj, 'stack', function (obj) {
     return FormatRawStackTrace(obj, raw_stack);
   });
-};
+}
 
 
 function SetUpError() {
   // Define special error type constructors.
 
-  function DefineError(f) {
+  var DefineError = function(f) {
     // Store the error function in both the global object
     // and the runtime object. The function is fetched
     // from the runtime object when throwing errors from
@@ -1085,7 +1116,7 @@
       // However, it can't be an instance of the Error object because
       // it hasn't been properly configured yet.  Instead we create a
       // special not-a-true-error-but-close-enough object.
-      function ErrorPrototype() {}
+      var ErrorPrototype = function() {};
       %FunctionSetPrototype(ErrorPrototype, $Object.prototype);
       %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
       %FunctionSetPrototype(f, new ErrorPrototype());
@@ -1126,7 +1157,8 @@
         return new f(m);
       }
     });
-  }
+    %SetNativeFlag(f);
+  };
 
   DefineError(function Error() { });
   DefineError(function TypeError() { });
@@ -1143,42 +1175,42 @@
 
 %SetProperty($Error.prototype, 'message', '', DONT_ENUM);
 
-// Global list of error objects visited during errorToString. This is
+// Global list of error objects visited during ErrorToString. This is
 // used to detect cycles in error toString formatting.
-const visited_errors = new InternalArray();
-const cyclic_error_marker = new $Object();
+var visited_errors = new InternalArray();
+var cyclic_error_marker = new $Object();
 
-function errorToStringDetectCycle(error) {
+function ErrorToStringDetectCycle(error) {
   if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
   try {
     var type = error.type;
+    var name = error.name;
+    name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
+    var message = error.message;
     var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
     if (type && !hasMessage) {
-      var formatted = FormatMessage(%NewMessageObject(type, error.arguments));
-      return error.name + ": " + formatted;
+      message = FormatMessage(%NewMessageObject(type, error.arguments));
     }
-    var message = hasMessage ? (": " + error.message) : "";
-    return error.name + message;
+    message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
+    if (name === "") return message;
+    if (message === "") return name;
+    return name + ": " + message;
   } finally {
     visited_errors.length = visited_errors.length - 1;
   }
 }
 
-function errorToString() {
-  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
-    throw MakeTypeError("called_on_null_or_undefined",
-                        ["Error.prototype.toString"]);
+function ErrorToString() {
+  if (!IS_SPEC_OBJECT(this)) {
+    throw MakeTypeError("called_on_non_object", ["Error.prototype.toString"]);
   }
-  // This helper function is needed because access to properties on
-  // the builtins object do not work inside of a catch clause.
-  function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
 
   try {
-    return errorToStringDetectCycle(this);
+    return ErrorToStringDetectCycle(this);
   } catch(e) {
     // If this error message was encountered already return the empty
     // string for it instead of recursively formatting it.
-    if (isCyclicErrorMarker(e)) {
+    if (e === cyclic_error_marker) {
       return '';
     }
     throw e;
@@ -1186,8 +1218,8 @@
 }
 
 
-InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
+InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
 
 // Boilerplate for exceptions for stack overflows. Used from
 // Isolate::StackOverflow().
-const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
+var kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index c4c4fd2..2ff4710 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -30,13 +30,14 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
 #define V8_MIPS_ASSEMBLER_MIPS_INL_H_
 
 #include "mips/assembler-mips.h"
+
 #include "cpu.h"
 #include "debug.h"
 
@@ -78,6 +79,15 @@
 }
 
 
+int FPURegister::ToAllocationIndex(FPURegister reg) {
+  ASSERT(reg.code() % 2 == 0);
+  ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
+  ASSERT(reg.is_valid());
+  ASSERT(!reg.is(kDoubleRegZero));
+  ASSERT(!reg.is(kLithiumScratchDouble));
+  return (reg.code() / 2);
+}
+
 
 // -----------------------------------------------------------------------------
 // RelocInfo.
@@ -107,19 +117,42 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  return reinterpret_cast<Address>(pc_);
+  ASSERT(IsCodeTarget(rmode_) ||
+         rmode_ == RUNTIME_ENTRY ||
+         rmode_ == EMBEDDED_OBJECT ||
+         rmode_ == EXTERNAL_REFERENCE);
+  // Read the address of the word containing the target_address in an
+  // instruction stream.
+  // The only architecture-independent user of this function is the serializer.
+  // The serializer uses it to find out how many raw bytes of instruction to
+  // output before the next target.
+  // For an instruction like LUI/ORI where the target bits are mixed into the
+  // instruction bits, the size of the target will be zero, indicating that the
+  // serializer should not step forward in memory after a target is resolved
+  // and written. In this case the target_address_address function should
+  // return the end of the instructions to be patched, allowing the
+  // deserializer to deserialize the instructions as raw bytes and put them in
+  // place, ready to be patched with the target. After jump optimization,
+  // that is the address of the instruction that follows J/JAL/JR/JALR
+  // instruction.
+  return reinterpret_cast<Address>(
+    pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
 }
 
 
 int RelocInfo::target_address_size() {
-  return Assembler::kExternalTargetSize;
+  return Assembler::kSpecialTargetSize;
 }
 
 
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -129,7 +162,7 @@
 }
 
 
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Handle<Object>(reinterpret_cast<Object**>(
       Assembler::target_address_at(pc_)));
@@ -146,9 +179,15 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+  if (mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -176,10 +215,17 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+                                WriteBarrierMode mode) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -200,6 +246,11 @@
   // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
   // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
   Assembler::set_target_address_at(pc_, target);
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -242,24 +293,19 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    Object** p = target_object_address();
-    Object* orig = *p;
-    visitor->VisitPointer(p);
-    if (*p != orig) {
-      set_target_object(*p);
-    }
+    visitor->VisitEmbeddedPointer(this);
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(target_reference_address());
+    visitor->VisitExternalReference(this);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
   } else if (((RelocInfo::IsJSReturn(mode) &&
-               IsPatchedReturnSequence()) ||
-              (RelocInfo::IsDebugBreakSlot(mode) &&
-               IsPatchedDebugBreakSlotSequence())) &&
+              IsPatchedReturnSequence()) ||
+             (RelocInfo::IsDebugBreakSlot(mode) &&
+             IsPatchedDebugBreakSlotSequence())) &&
              Isolate::Current()->debug()->has_break_points()) {
     visitor->VisitDebugTarget(this);
 #endif
@@ -273,13 +319,13 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(target_reference_address());
+    StaticVisitor::VisitExternalReference(this);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
              ((RelocInfo::IsJSReturn(mode) &&
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index e01a0ca..0d7f921 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 
 #include "v8.h"
@@ -74,7 +74,9 @@
 
 
 void CpuFeatures::Probe() {
-  ASSERT(!initialized_);
+  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
+                                CpuFeaturesImpliedByCompiler());
+  ASSERT(supported_ == 0 || supported_ == standard_features);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -82,8 +84,7 @@
   // Get the features implied by the OS and the compiler settings. This is the
   // minimal set of features which is also allowed for generated code in the
   // snapshot.
-  supported_ |= OS::CpuFeaturesImpliedByPlatform();
-  supported_ |= CpuFeaturesImpliedByCompiler();
+  supported_ |= standard_features;
 
   if (Serializer::enabled()) {
     // No probing for features if we might serialize (generate snapshot).
@@ -300,7 +301,7 @@
     own_buffer_ = false;
   }
 
-  // Setup buffer pointers.
+  // Set up buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -336,7 +337,7 @@
 
 void Assembler::GetCode(CodeDesc* desc) {
   ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
-  // Setup code descriptor.
+  // Set up code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -849,7 +850,6 @@
   return rmode != RelocInfo::NONE;
 }
 
-
 void Assembler::GenInstrRegister(Opcode opcode,
                                  Register rs,
                                  Register rt,
@@ -1244,6 +1244,7 @@
 
 
 void Assembler::andi(Register rt, Register rs, int32_t j) {
+  ASSERT(is_uint16(j));
   GenInstrImmediate(ANDI, rs, rt, j);
 }
 
@@ -1254,6 +1255,7 @@
 
 
 void Assembler::ori(Register rt, Register rs, int32_t j) {
+  ASSERT(is_uint16(j));
   GenInstrImmediate(ORI, rs, rt, j);
 }
 
@@ -1264,6 +1266,7 @@
 
 
 void Assembler::xori(Register rt, Register rs, int32_t j) {
+  ASSERT(is_uint16(j));
   GenInstrImmediate(XORI, rs, rt, j);
 }
 
@@ -1315,7 +1318,7 @@
 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
   // Should be called via MacroAssembler::Ror.
   ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
-  ASSERT(mips32r2);
+  ASSERT(kArchVariant == kMips32r2);
   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
       | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
   emit(instr);
@@ -1325,7 +1328,7 @@
 void Assembler::rotrv(Register rd, Register rt, Register rs) {
   // Should be called via MacroAssembler::Ror.
   ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
-  ASSERT(mips32r2);
+  ASSERT(kArchVariant == kMips32r2);
   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
      | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
   emit(instr);
@@ -1444,6 +1447,7 @@
 
 
 void Assembler::lui(Register rd, int32_t j) {
+  ASSERT(is_uint16(j));
   GenInstrImmediate(LUI, zero_reg, rd, j);
 }
 
@@ -1599,7 +1603,7 @@
 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   // Should be called via MacroAssembler::Ins.
   // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
-  ASSERT(mips32r2);
+  ASSERT(kArchVariant == kMips32r2);
   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
 }
 
@@ -1607,7 +1611,7 @@
 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   // Should be called via MacroAssembler::Ext.
   // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
-  ASSERT(mips32r2);
+  ASSERT(kArchVariant == kMips32r2);
   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
 }
 
@@ -1767,25 +1771,25 @@
 
 
 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
-  ASSERT(mips32r2);
+  ASSERT(kArchVariant == kMips32r2);
   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
 }
 
 
 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
-  ASSERT(mips32r2);
+  ASSERT(kArchVariant == kMips32r2);
   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
 }
 
 
 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
-  ASSERT(mips32r2);
+  ASSERT(kArchVariant == kMips32r2);
   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
 }
 
 
 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
-  ASSERT(mips32r2);
+  ASSERT(kArchVariant == kMips32r2);
   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
 }
 
@@ -1826,7 +1830,7 @@
 
 
 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
-  ASSERT(mips32r2);
+  ASSERT(kArchVariant == kMips32r2);
   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
 }
 
@@ -1842,7 +1846,7 @@
 
 
 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
-  ASSERT(mips32r2);
+  ASSERT(kArchVariant == kMips32r2);
   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
 }
 
@@ -1969,7 +1973,7 @@
   }
   CHECK_GT(desc.buffer_size, 0);  // No overflow.
 
-  // Setup new buffer.
+  // Set up new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
 
   desc.instr_size = pc_offset();
@@ -2018,7 +2022,8 @@
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  RelocInfo rinfo(pc_, rmode, data);  // We do not try to reuse pool constants.
+  // We do not try to reuse pool constants.
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
     // Adjust code for new modes.
     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2041,7 +2046,7 @@
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
-      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
       ClearRecordedAstId();
       reloc_info_writer.Write(&reloc_info_with_ast_id);
     } else {
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 38e9537..8b877f6 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_
@@ -182,12 +182,7 @@
       kNumReservedRegisters;
 
 
-  static int ToAllocationIndex(FPURegister reg) {
-    ASSERT(reg.code() % 2 == 0);
-    ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
-    ASSERT(reg.is_valid());
-    return (reg.code() / 2);
-  }
+  inline static int ToAllocationIndex(FPURegister reg);
 
   static FPURegister FromAllocationIndex(int index) {
     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
@@ -302,7 +297,15 @@
 const FPURegister f30 = { 30 };
 const FPURegister f31 = { 31 };
 
-const FPURegister kDoubleRegZero = f28;
+// Register aliases.
+// cp is assumed to be a callee saved register.
+static const Register& kLithiumScratchReg = s3;  // Scratch register.
+static const Register& kLithiumScratchReg2 = s4;  // Scratch register.
+static const Register& kRootRegister = s6;  // Roots array pointer.
+static const Register& cp = s7;     // JavaScript context pointer.
+static const Register& fp = s8_fp;  // Alias for fp.
+static const DoubleRegister& kLithiumScratchDouble = f30;
+static const FPURegister& kDoubleRegZero = f28;
 
 // FPU (coprocessor 1) control registers.
 // Currently only FCSR (#31) is implemented.
@@ -550,10 +553,13 @@
   static void JumpLabelToJumpRegister(Address pc);
 
   // This sets the branch destination (which gets loaded at the call address).
-  // This is for calls and branches within generated code.
-  inline static void set_target_at(Address instruction_payload,
-                                   Address target) {
-    set_target_address_at(instruction_payload, target);
+  // This is for calls and branches within generated code.  The serializer
+  // has already deserialized the lui/ori instructions etc.
+  inline static void deserialization_set_special_target_at(
+      Address instruction_payload, Address target) {
+    set_target_address_at(
+        instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+        target);
   }
 
   // This sets the branch destination.
@@ -575,8 +581,7 @@
   // are split across two consecutive instructions and don't exist separately
   // in the code, so the serializer should not step forwards in memory after
   // a target is resolved and written.
-  static const int kCallTargetSize = 0 * kInstrSize;
-  static const int kExternalTargetSize = 0 * kInstrSize;
+  static const int kSpecialTargetSize = 0;
 
   // Number of consecutive instructions used to store 32bit constant.
   // Before jump-optimizations, this constant was used in
@@ -667,7 +672,7 @@
   // Never use the int16_t b(l)cond version with a branch offset
   // instead of using the Label* version.
 
-  // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
+  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
   void j(int32_t target);
   void jal(int32_t target);
   void jalr(Register rs, Register rd = ra);
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index d772304..eeb84c3 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -67,33 +67,45 @@
     ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
-  // JumpToExternalReference expects a0 to contain the number of arguments
+  // JumpToExternalReference expects s0 to contain the number of arguments
   // including the receiver and the extra arguments.
-  __ Addu(a0, a0, Operand(num_extra_args + 1));
+  __ Addu(s0, a0, num_extra_args + 1);
+  __ sll(s1, s0, kPointerSizeLog2);
+  __ Subu(s1, s1, kPointerSize);
   __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
 }
 
 
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the global context.
+
+  __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(result,
+        FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the InternalArray function from the global context.
+  __ lw(result,
+         MemOperand(result,
+                    Context::SlotOffset(
+                        Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+}
+
+
 // Load the built-in Array function from the current context.
 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
   // Load the global context.
 
   __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ lw(result,
-         FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+        FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
   // Load the Array function from the global context.
   __ lw(result,
-         MemOperand(result,
-                    Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+        MemOperand(result,
+                   Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
 }
 
 
-// This constant has the same value as JSArray::kPreallocatedArrayElements and
-// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
-// below should be reconsidered.
-static const int kLoopUnfoldLimit = 4;
-
-
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. An elements backing store is allocated with size initial_capacity
 // and filled with the hole values.
@@ -103,16 +115,17 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
-                                 int initial_capacity,
                                  Label* gc_required) {
-  ASSERT(initial_capacity > 0);
-  // Load the initial map from the array function.
-  __ lw(scratch1, FieldMemOperand(array_function,
-                                  JSFunction::kPrototypeOrInitialMapOffset));
+  const int initial_capacity = JSArray::kPreallocatedArrayElements;
+  STATIC_ASSERT(initial_capacity >= 0);
+  __ LoadInitialArrayMap(array_function, scratch2, scratch1);
 
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
-  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+  int size = JSArray::kSize;
+  if (initial_capacity > 0) {
+    size += FixedArray::SizeFor(initial_capacity);
+  }
   __ AllocateInNewSpace(size,
                         result,
                         scratch2,
@@ -131,6 +144,11 @@
   __ mov(scratch3,  zero_reg);
   __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
 
+  if (initial_capacity == 0) {
+    __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+    return;
+  }
+
   // Calculate the location of the elements array and set elements array member
   // of the JSArray.
   // result: JSObject
@@ -147,21 +165,31 @@
   // scratch1: elements array (untagged)
   // scratch2: start of next object
   __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
-  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
   __ sw(scratch3, MemOperand(scratch1));
   __ Addu(scratch1, scratch1, kPointerSize);
   __ li(scratch3,  Operand(Smi::FromInt(initial_capacity)));
-  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
   __ sw(scratch3, MemOperand(scratch1));
   __ Addu(scratch1, scratch1, kPointerSize);
 
-  // Fill the FixedArray with the hole value.
-  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-  ASSERT(initial_capacity <= kLoopUnfoldLimit);
+  // Fill the FixedArray with the hole value. Inline the code if short.
+  STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
   __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  for (int i = 0; i < initial_capacity; i++) {
+  static const int kLoopUnfoldLimit = 4;
+  if (initial_capacity <= kLoopUnfoldLimit) {
+    for (int i = 0; i < initial_capacity; i++) {
+      __ sw(scratch3, MemOperand(scratch1, i * kPointerSize));
+    }
+  } else {
+    Label loop, entry;
+    __ Addu(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
+    __ Branch(&entry);
+    __ bind(&loop);
     __ sw(scratch3, MemOperand(scratch1));
     __ Addu(scratch1, scratch1, kPointerSize);
+    __ bind(&entry);
+    __ Branch(&loop, lt, scratch1, Operand(scratch2));
   }
 }
 
@@ -177,7 +205,7 @@
 // register elements_array_storage is scratched.
 static void AllocateJSArray(MacroAssembler* masm,
                             Register array_function,  // Array function.
-                            Register array_size,  // As a smi.
+                            Register array_size,  // As a smi, cannot be 0.
                             Register result,
                             Register elements_array_storage,
                             Register elements_array_end,
@@ -185,31 +213,16 @@
                             Register scratch2,
                             bool fill_with_hole,
                             Label* gc_required) {
-  Label not_empty, allocated;
-
   // Load the initial map from the array function.
-  __ lw(elements_array_storage,
-         FieldMemOperand(array_function,
-                         JSFunction::kPrototypeOrInitialMapOffset));
+  __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
 
-  // Check whether an empty sized array is requested.
-  __ Branch(&not_empty, ne, array_size, Operand(zero_reg));
-
-  // If an empty array is requested allocate a small elements array anyway. This
-  // keeps the code below free of special casing for the empty array.
-  int size = JSArray::kSize +
-             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
-  __ AllocateInNewSpace(size,
-                        result,
-                        elements_array_end,
-                        scratch1,
-                        gc_required,
-                        TAG_OBJECT);
-  __ Branch(&allocated);
+  if (FLAG_debug_code) {  // Assert that array size is not zero.
+    __ Assert(
+        ne, "array size is unexpectedly 0", array_size, Operand(zero_reg));
+  }
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested number of elements.
-  __ bind(&not_empty);
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ li(elements_array_end,
         (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
@@ -228,7 +241,6 @@
   // result: JSObject
   // elements_array_storage: initial map
   // array_size: size of array (smi)
-  __ bind(&allocated);
   __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
   __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
   __ sw(elements_array_storage,
@@ -262,8 +274,6 @@
   // the actual JSArray has length 0 and the size of the JSArray for non-empty
   // JSArrays. The length of a FixedArray is stored as a smi.
   STATIC_ASSERT(kSmiTag == 0);
-  __ li(at, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-  __ movz(array_size, at, array_size);
 
   ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
   __ sw(array_size, MemOperand(elements_array_storage));
@@ -312,21 +322,22 @@
 static void ArrayNativeCode(MacroAssembler* masm,
                             Label* call_generic_code) {
   Counters* counters = masm->isolate()->counters();
-  Label argc_one_or_more, argc_two_or_more;
+  Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
+      has_non_smi_element, finish, cant_transition_map, not_double;
 
   // Check for array construction with zero arguments or one.
   __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
   // Handle construction of an empty array.
+  __ bind(&empty_array);
   AllocateEmptyJSArray(masm,
                        a1,
                        a2,
                        a3,
                        t0,
                        t1,
-                       JSArray::kPreallocatedArrayElements,
                        call_generic_code);
   __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
-  // Setup return value, remove receiver from stack and return.
+  // Set up return value, remove receiver from stack and return.
   __ mov(v0, a2);
   __ Addu(sp, sp, Operand(kPointerSize));
   __ Ret();
@@ -338,6 +349,12 @@
 
   STATIC_ASSERT(kSmiTag == 0);
   __ lw(a2, MemOperand(sp));  // Get the argument from the stack.
+  __ Branch(&not_empty_array, ne, a2, Operand(zero_reg));
+  __ Drop(1);  // Adjust stack.
+  __ mov(a0, zero_reg);  // Treat this as a call with argc of zero.
+  __ Branch(&empty_array);
+
+  __ bind(&not_empty_array);
   __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
   __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
 
@@ -363,7 +380,7 @@
                   call_generic_code);
   __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
 
-  // Setup return value, remove receiver and argument from stack and return.
+  // Set up return value, remove receiver and argument from stack and return.
   __ mov(v0, a3);
   __ Addu(sp, sp, Operand(2 * kPointerSize));
   __ Ret();
@@ -398,14 +415,22 @@
   // sp[0]: last argument
 
   Label loop, entry;
-  __ Branch(&entry);
+  __ Branch(USE_DELAY_SLOT, &entry);
+  __ mov(t3, sp);
   __ bind(&loop);
-  __ pop(a2);
+  __ lw(a2, MemOperand(t3));
+  if (FLAG_smi_only_arrays) {
+    __ JumpIfNotSmi(a2, &has_non_smi_element);
+  }
+  __ Addu(t3, t3, kPointerSize);
   __ Addu(t1, t1, -kPointerSize);
   __ sw(a2, MemOperand(t1));
   __ bind(&entry);
   __ Branch(&loop, lt, t0, Operand(t1));
 
+  __ bind(&finish);
+  __ mov(sp, t3);
+
   // Remove caller arguments and receiver from the stack, setup return value and
   // return.
   // a0: argc
@@ -414,6 +439,77 @@
   __ Addu(sp, sp, Operand(kPointerSize));
   __ mov(v0, a3);
   __ Ret();
+
+  __ bind(&has_non_smi_element);
+  // Double values are handled by the runtime.
+  __ CheckMap(
+      a2, t5, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
+  __ bind(&cant_transition_map);
+  __ UndoAllocationInNewSpace(a3, t0);
+  __ Branch(call_generic_code);
+
+  __ bind(&not_double);
+  // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+  // a3: JSArray
+  __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         a2,
+                                         t5,
+                                         &cant_transition_map);
+  __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ RecordWriteField(a3,
+                      HeapObject::kMapOffset,
+                      a2,
+                      t5,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  Label loop2;
+  __ bind(&loop2);
+  __ lw(a2, MemOperand(t3));
+  __ Addu(t3, t3, kPointerSize);
+  __ Subu(t1, t1, kPointerSize);
+  __ sw(a2, MemOperand(t1));
+  __ Branch(&loop2, lt, t0, Operand(t1));
+  __ Branch(&finish);
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, a1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for InternalArray function",
+              t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t0);
+    __ Assert(eq, "Unexpected initial map for InternalArray function",
+              t0, Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code if the specialized code cannot handle the
+  // construction.
+  __ bind(&generic_array_code);
+
+  Handle<Code> array_code =
+      masm->isolate()->builtins()->InternalArrayCodeGeneric();
+  __ Jump(array_code, RelocInfo::CODE_TARGET);
 }
 
 
@@ -587,10 +683,11 @@
   __ bind(&convert_argument);
   __ push(function);  // Preserve the function.
   __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
-  __ EnterInternalFrame();
-  __ push(v0);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(v0);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  }
   __ pop(function);
   __ mov(argument, v0);
   __ Branch(&argument_is_string);
@@ -606,15 +703,18 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
-  __ EnterInternalFrame();
-  __ push(argument);
-  __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(argument);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
   __ Ret();
 }
 
 
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool count_constructions) {
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
   //  -- a1     : constructor function
@@ -622,38 +722,6 @@
   //  -- sp[...]: constructor arguments
   // -----------------------------------
 
-  Label non_function_call;
-  // Check that the function is not a smi.
-  __ And(t0, a1, Operand(kSmiTagMask));
-  __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
-  // Check that the function is a JSFunction.
-  __ GetObjectType(a1, a2, a2);
-  __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
-
-  // Jump to the function-specific construct stub.
-  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
-  __ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
-
-  // a0: number of arguments
-  // a1: called object
-  __ bind(&non_function_call);
-  // CALL_NON_FUNCTION expects the non-function constructor as receiver
-  // (instead of the original receiver from the call site). The receiver is
-  // stack element argc.
-  // Set expected number of arguments to zero (not changing a0).
-  __ mov(a2, zero_reg);
-  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-  __ SetCallKind(t1, CALL_AS_METHOD);
-  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool count_constructions) {
   // Should never count constructions for api objects.
   ASSERT(!is_api_function || !count_constructions);
 
@@ -667,331 +735,323 @@
   // -----------------------------------
 
   // Enter a construct frame.
-  __ EnterConstructFrame();
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Preserve the two incoming parameters on the stack.
-  __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
-  __ MultiPushReversed(a0.bit() | a1.bit());
+    // Preserve the two incoming parameters on the stack.
+    __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
+    __ MultiPushReversed(a0.bit() | a1.bit());
 
-  // Use t7 to hold undefined, which is used in several places below.
-  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+    // Use t7 to hold undefined, which is used in several places below.
+    __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
 
-  Label rt_call, allocated;
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    Label rt_call, allocated;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(isolate);
-    __ li(a2, Operand(debug_step_in_fp));
-    __ lw(a2, MemOperand(a2));
-    __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(isolate);
+      __ li(a2, Operand(debug_step_in_fp));
+      __ lw(a2, MemOperand(a2));
+      __ Branch(&rt_call, ne, a2, Operand(zero_reg));
 #endif
 
-    // Load the initial map and verify that it is in fact a map.
-    // a1: constructor function
-    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ And(t0, a2, Operand(kSmiTagMask));
-    __ Branch(&rt_call, eq, t0, Operand(zero_reg));
-    __ GetObjectType(a2, a3, t4);
-    __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+      // Load the initial map and verify that it is in fact a map.
+      // a1: constructor function
+      __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+      __ JumpIfSmi(a2, &rt_call);
+      __ GetObjectType(a2, a3, t4);
+      __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // a1: constructor function
-    // a2: initial map
-    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
-    __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // a1: constructor function
+      // a2: initial map
+      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+      __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-      MemOperand constructor_count =
-         FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
-      __ lbu(t0, constructor_count);
-      __ Subu(t0, t0, Operand(1));
-      __ sb(t0, constructor_count);
-      __ Branch(&allocate, ne, t0, Operand(zero_reg));
-
-      __ Push(a1, a2);
-
-      __ push(a1);  // Constructor.
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(a2);
-      __ pop(a1);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    // a1: constructor function
-    // a2: initial map
-    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-    __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
-
-    // Allocated the JSObject, now initialize the fields. Map is set to initial
-    // map and properties and elements are set to empty fixed array.
-    // a1: constructor function
-    // a2: initial map
-    // a3: object size
-    // t4: JSObject (not tagged)
-    __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
-    __ mov(t5, t4);
-    __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
-    __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
-    __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
-    __ Addu(t5, t5, Operand(3*kPointerSize));
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
-    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
-    // Fill all the in-object properties with appropriate filler.
-    // a1: constructor function
-    // a2: initial map
-    // a3: object size (in words)
-    // t4: JSObject (not tagged)
-    // t5: First in-object property of JSObject (not tagged)
-    __ sll(t0, a3, kPointerSizeLog2);
-    __ addu(t6, t4, t0);   // End of object.
-    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-    { Label loop, entry;
       if (count_constructions) {
+        Label allocate;
+        // Decrease generous allocation count.
+        __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+        MemOperand constructor_count =
+           FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+        __ lbu(t0, constructor_count);
+        __ Subu(t0, t0, Operand(1));
+        __ sb(t0, constructor_count);
+        __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+        __ Push(a1, a2);
+
+        __ push(a1);  // Constructor.
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(a2);
+        __ pop(a1);
+
+        __ bind(&allocate);
+      }
+
+      // Now allocate the JSObject on the heap.
+      // a1: constructor function
+      // a2: initial map
+      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+      __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+      // Allocated the JSObject, now initialize the fields. Map is set to
+      // initial map and properties and elements are set to empty fixed array.
+      // a1: constructor function
+      // a2: initial map
+      // a3: object size
+      // t4: JSObject (not tagged)
+      __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+      __ mov(t5, t4);
+      __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+      __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+      __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+      __ Addu(t5, t5, Operand(3*kPointerSize));
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+      // Fill all the in-object properties with appropriate filler.
+      // a1: constructor function
+      // a2: initial map
+      // a3: object size (in words)
+      // t4: JSObject (not tagged)
+      // t5: First in-object property of JSObject (not tagged)
+      __ sll(t0, a3, kPointerSizeLog2);
+      __ addu(t6, t4, t0);   // End of object.
+      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+      __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+      if (count_constructions) {
+        __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+        __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+                kBitsPerByte);
+        __ sll(t0, a0, kPointerSizeLog2);
+        __ addu(a0, t5, t0);
+        // a0: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ Assert(le, "Unexpected number of pre-allocated property fields.",
+              a0, Operand(t6));
+        }
+        __ InitializeFieldsWithFiller(t5, a0, t7);
         // To allow for truncation.
         __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
-      } else {
-        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
       }
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ sw(t7, MemOperand(t5, 0));
-      __ addiu(t5, t5, kPointerSize);
-      __ bind(&entry);
-      __ Branch(&loop, Uless, t5, Operand(t6));
+      __ InitializeFieldsWithFiller(t5, t6, t7);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed. Continue with
+      // allocated object if not fall through to runtime call if it is.
+      // a1: constructor function
+      // t4: JSObject
+      // t5: start of next object (not tagged)
+      __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+      // The field instance sizes contains both pre-allocated property fields
+      // and in-object properties.
+      __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+      __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+             kBitsPerByte);
+      __ Addu(a3, a3, Operand(t6));
+      __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
+              kBitsPerByte);
+      __ subu(a3, a3, t6);
+
+      // Done if no extra properties are to be allocated.
+      __ Branch(&allocated, eq, a3, Operand(zero_reg));
+      __ Assert(greater_equal, "Property allocation count failed.",
+          a3, Operand(zero_reg));
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // a1: constructor
+      // a3: number of elements in properties array
+      // t4: JSObject
+      // t5: start of next object
+      __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+      __ AllocateInNewSpace(
+          a0,
+          t5,
+          t6,
+          a2,
+          &undo_allocation,
+          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+      // Initialize the FixedArray.
+      // a1: constructor
+      // a3: number of elements in properties array (untagged)
+      // t4: JSObject
+      // t5: start of next object
+      __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+      __ mov(a2, t5);
+      __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+      __ sll(a0, a3, kSmiTagSize);
+      __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+      __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+      // Initialize the fields to undefined.
+      // a1: constructor
+      // a2: First element of FixedArray (not tagged)
+      // a3: number of elements in properties array
+      // t4: JSObject
+      // t5: FixedArray (not tagged)
+      __ sll(t3, a3, kPointerSizeLog2);
+      __ addu(t6, a2, t3);  // End of object.
+      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+      { Label loop, entry;
+        if (count_constructions) {
+          __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+        } else if (FLAG_debug_code) {
+          __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+          __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+        }
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ sw(t7, MemOperand(a2));
+        __ addiu(a2, a2, kPointerSize);
+        __ bind(&entry);
+        __ Branch(&loop, less, a2, Operand(t6));
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject.
+      // a1: constructor function
+      // t4: JSObject
+      // t5: FixedArray (not tagged)
+      __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
+      __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+      // Continue with JSObject being successfully allocated.
+      // a1: constructor function
+      // a4: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // t4: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(t4, t5);
     }
 
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    __ Addu(t4, t4, Operand(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed. Continue with allocated
-    // object if not fall through to runtime call if it is.
+    __ bind(&rt_call);
+    // Allocate the new receiver object using the runtime call.
     // a1: constructor function
+    __ push(a1);  // Argument for Runtime_NewObject.
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ mov(t4, v0);
+
+    // Receiver for constructor call allocated.
     // t4: JSObject
-    // t5: start of next object (not tagged)
-    __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
-    // The field instance sizes contains both pre-allocated property fields and
-    // in-object properties.
-    __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
-    __ And(t6,
-           a0,
-           Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
-    __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
-    __ Addu(a3, a3, Operand(t0));
-    __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
-    __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
-    __ subu(a3, a3, t0);
+    __ bind(&allocated);
+    __ push(t4);
+    __ push(t4);
 
-    // Done if no extra properties are to be allocated.
-    __ Branch(&allocated, eq, a3, Operand(zero_reg));
-    __ Assert(greater_equal, "Property allocation count failed.",
-        a3, Operand(zero_reg));
+    // Reload the number of arguments from the stack.
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: constructor function
+    // sp[3]: number of arguments (smi-tagged)
+    __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+    __ lw(a3, MemOperand(sp, 3 * kPointerSize));
 
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // a1: constructor
-    // a3: number of elements in properties array
-    // t4: JSObject
-    // t5: start of next object
-    __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
-    __ AllocateInNewSpace(
-        a0,
-        t5,
-        t6,
-        a2,
-        &undo_allocation,
-        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+    // Set up pointer to last argument.
+    __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
 
-    // Initialize the FixedArray.
-    // a1: constructor
-    // a3: number of elements in properties array (un-tagged)
-    // t4: JSObject
-    // t5: start of next object
-    __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
-    __ mov(a2, t5);
-    __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
-    __ sll(a0, a3, kSmiTagSize);
-    __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
-    __ Addu(a2, a2, Operand(2 * kPointerSize));
+    // Set up number of arguments for function call below.
+    __ srl(a0, a3, kSmiTagSize);
 
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+    // Copy arguments and receiver to the expression stack.
+    // a0: number of arguments
+    // a1: constructor function
+    // a2: address of last argument (caller sp)
+    // a3: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: constructor function
+    // sp[3]: number of arguments (smi-tagged)
+    Label loop, entry;
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(t0, a2, Operand(t0));
+    __ lw(t1, MemOperand(t0));
+    __ push(t1);
+    __ bind(&entry);
+    __ Addu(a3, a3, Operand(-2));
+    __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
 
-    // Initialize the fields to undefined.
-    // a1: constructor
-    // a2: First element of FixedArray (not tagged)
-    // a3: number of elements in properties array
-    // t4: JSObject
-    // t5: FixedArray (not tagged)
-    __ sll(t3, a3, kPointerSizeLog2);
-    __ addu(t6, a2, t3);  // End of object.
-    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-    { Label loop, entry;
-      if (count_constructions) {
-        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-      } else if (FLAG_debug_code) {
-        __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
-        __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
-      }
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ sw(t7, MemOperand(a2));
-      __ addiu(a2, a2, kPointerSize);
-      __ bind(&entry);
-      __ Branch(&loop, less, a2, Operand(t6));
+    // Call the function.
+    // a0: number of arguments
+    // a1: constructor function
+    if (is_api_function) {
+      __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected,
+                    RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(a0);
+      __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject.
-    // a1: constructor function
-    // t4: JSObject
-    // t5: FixedArray (not tagged)
-    __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
-    __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+    // Store offset of return address for deoptimizer.
+    if (!is_api_function && !count_constructions) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
 
-    // Continue with JSObject being successfully allocated.
-    // a1: constructor function
-    // a4: JSObject
-    __ jmp(&allocated);
+    // Restore context from the frame.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // t4: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(t4, t5);
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    // v0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ JumpIfSmi(v0, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ GetObjectType(v0, a3, a3);
+    __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ lw(v0, MemOperand(sp));
+
+    // Remove receiver from the stack, remove caller arguments, and
+    // return.
+    __ bind(&exit);
+    // v0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+
+    // Leave construct frame.
   }
 
-  __ bind(&rt_call);
-  // Allocate the new receiver object using the runtime call.
-  // a1: constructor function
-  __ push(a1);  // Argument for Runtime_NewObject.
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ mov(t4, v0);
-
-  // Receiver for constructor call allocated.
-  // t4: JSObject
-  __ bind(&allocated);
-  __ push(t4);
-
-  // Push the function and the allocated receiver from the stack.
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ lw(a1, MemOperand(sp, kPointerSize));
-  __ MultiPushReversed(a1.bit() | t4.bit());
-
-  // Reload the number of arguments from the stack.
-  // a1: constructor function
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ lw(a3, MemOperand(sp, 4 * kPointerSize));
-
-  // Setup pointer to last argument.
-  __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Setup number of arguments for function call below.
-  __ srl(a0, a3, kSmiTagSize);
-
-  // Copy arguments and receiver to the expression stack.
-  // a0: number of arguments
-  // a1: constructor function
-  // a2: address of last argument (caller sp)
-  // a3: number of arguments (smi-tagged)
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  Label loop, entry;
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t0, a2, Operand(t0));
-  __ lw(t1, MemOperand(t0));
-  __ push(t1);
-  __ bind(&entry);
-  __ Addu(a3, a3, Operand(-2));
-  __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
-
-  // Call the function.
-  // a0: number of arguments
-  // a1: constructor function
-  if (is_api_function) {
-    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(a0);
-    __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Pop the function from the stack.
-  // v0: result
-  // sp[0]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ Pop();
-
-  // Restore context from the frame.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  // v0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ And(t0, v0, Operand(kSmiTagMask));
-  __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ GetObjectType(v0, a3, a3);
-  __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ lw(v0, MemOperand(sp));
-
-  // Remove receiver from the stack, remove caller arguments, and
-  // return.
-  __ bind(&exit);
-  // v0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ lw(a1, MemOperand(sp, 2 * kPointerSize));
-  __ LeaveConstructFrame();
   __ sll(t0, a1, kPointerSizeLog2 - 1);
   __ Addu(sp, sp, t0);
   __ Addu(sp, sp, kPointerSize);
@@ -1022,7 +1082,7 @@
   // ----------- S t a t e -------------
   //  -- a0: code entry
   //  -- a1: function
-  //  -- a2: reveiver_pointer
+  //  -- a2: receiver_pointer
   //  -- a3: argc
   //  -- s0: argv
   // -----------------------------------
@@ -1031,59 +1091,57 @@
   __ mov(cp, zero_reg);
 
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Set up the context from the function argument.
-  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    // Set up the context from the function argument.
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
-  // Set up the roots register.
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
-  __ li(s6, Operand(roots_address));
+    // Push the function and the receiver onto the stack.
+    __ Push(a1, a2);
 
-  // Push the function and the receiver onto the stack.
-  __ Push(a1, a2);
+    // Copy arguments to the stack in a loop.
+    // a3: argc
+    // s0: argv, i.e. points to first arg
+    Label loop, entry;
+    __ sll(t0, a3, kPointerSizeLog2);
+    __ addu(t2, s0, t0);
+    __ b(&entry);
+    __ nop();   // Branch delay slot nop.
+    // t2 points past last arg.
+    __ bind(&loop);
+    __ lw(t0, MemOperand(s0));  // Read next parameter.
+    __ addiu(s0, s0, kPointerSize);
+    __ lw(t0, MemOperand(t0));  // Dereference handle.
+    __ push(t0);  // Push parameter.
+    __ bind(&entry);
+    __ Branch(&loop, ne, s0, Operand(t2));
 
-  // Copy arguments to the stack in a loop.
-  // a3: argc
-  // s0: argv, ie points to first arg
-  Label loop, entry;
-  __ sll(t0, a3, kPointerSizeLog2);
-  __ addu(t2, s0, t0);
-  __ b(&entry);
-  __ nop();   // Branch delay slot nop.
-  // t2 points past last arg.
-  __ bind(&loop);
-  __ lw(t0, MemOperand(s0));  // Read next parameter.
-  __ addiu(s0, s0, kPointerSize);
-  __ lw(t0, MemOperand(t0));  // Dereference handle.
-  __ push(t0);  // Push parameter.
-  __ bind(&entry);
-  __ Branch(&loop, ne, s0, Operand(t2));
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+    __ mov(s1, t0);
+    __ mov(s2, t0);
+    __ mov(s3, t0);
+    __ mov(s4, t0);
+    __ mov(s5, t0);
+    // s6 holds the root address. Do not clobber.
+    // s7 is cp. Do not init.
 
-  // Initialize all JavaScript callee-saved registers, since they will be seen
-  // by the garbage collector as part of handlers.
-  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
-  __ mov(s1, t0);
-  __ mov(s2, t0);
-  __ mov(s3, t0);
-  __ mov(s4, t0);
-  __ mov(s5, t0);
-  // s6 holds the root address. Do not clobber.
-  // s7 is cp. Do not init.
+    // Invoke the code and pass argc as a0.
+    __ mov(a0, a3);
+    if (is_construct) {
+      CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+      __ CallStub(&stub);
+    } else {
+      ParameterCount actual(a0);
+      __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
 
-  // Invoke the code and pass argc as a0.
-  __ mov(a0, a3);
-  if (is_construct) {
-    __ Call(masm->isolate()->builtins()->JSConstructCall());
-  } else {
-    ParameterCount actual(a0);
-    __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+    // Leave internal frame.
   }
 
-  __ LeaveInternalFrame();
-
   __ Jump(ra);
 }
 
@@ -1100,27 +1158,28 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(a1);
-  // Push call kind information.
-  __ push(t1);
+    // Preserve the function.
+    __ push(a1);
+    // Push call kind information.
+    __ push(t1);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(a1);
-  // Call the runtime function.
-  __ CallRuntime(Runtime::kLazyCompile, 1);
-  // Calculate the entry point.
-  __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(a1);
+    // Call the runtime function.
+    __ CallRuntime(Runtime::kLazyCompile, 1);
+    // Calculate the entry point.
+    __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
 
-  // Restore call kind information.
-  __ pop(t1);
-  // Restore saved function.
-  __ pop(a1);
+    // Restore call kind information.
+    __ pop(t1);
+    // Restore saved function.
+    __ pop(a1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down temporary frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(t9);
@@ -1129,50 +1188,120 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(a1);
-  // Push call kind information.
-  __ push(t1);
+    // Preserve the function.
+    __ push(a1);
+    // Push call kind information.
+    __ push(t1);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(a1);
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
-  // Calculate the entry point.
-  __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(a1);
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
+    // Calculate the entry point.
+    __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-  // Restore call kind information.
-  __ pop(t1);
-  // Restore saved function.
-  __ pop(a1);
+    // Restore call kind information.
+    __ pop(t1);
+    // Restore saved function.
+    __ pop(a1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down temporary frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(t9);
 }
 
 
-// These functions are called from C++ but cannot be used in live code.
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(a0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  }
+
+  // Get the full codegen state from the stack and untag it -> t2.
+  __ lw(t2, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(t2);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ Branch(&with_tos_register,
+            ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ Addu(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&with_tos_register);
+  __ lw(v0, MemOperand(sp, 1 * kPointerSize));
+  __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
+
+  __ Addu(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+
 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
 
 
 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
 
 void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  // For now, we are relying on the fact that Runtime::NotifyOSR
+  // doesn't do any garbage collection which allows us to save/restore
+  // the registers without worrying about which of them contain
+  // pointers. This seems a bit fragile.
+  RegList saved_regs =
+      (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
+  __ MultiPush(saved_regs);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
+  __ MultiPop(saved_regs);
+  __ Ret();
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  CpuFeatures::TryForceFeatureScope scope(VFP3);
+  if (!CpuFeatures::IsSupported(FPU)) {
+    __ Abort("Unreachable code: Cannot optimize without FPU support.");
+    return;
+  }
+
+  // Lookup the function in the JavaScript frame and push it as an
+  // argument to the on-stack replacement function.
+  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(a0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
+
+  // If the result was -1 it means that we couldn't optimize the
+  // function. Just return and continue in the unoptimized version.
+  __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
+
+  // Untag the AST id and push it on the stack.
+  __ SmiUntag(v0);
+  __ push(v0);
+
+  // Generate the code for doing the frame-to-frame translation using
+  // the deoptimizer infrastructure.
+  Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+  generator.Generate();
 }
 
 
@@ -1190,19 +1319,19 @@
   // 2. Get the function to call (passed as receiver) from the stack, check
   //    if it is a function.
   // a0: actual number of arguments
-  Label non_function;
+  Label slow, non_function;
   __ sll(at, a0, kPointerSizeLog2);
   __ addu(at, sp, at);
   __ lw(a1, MemOperand(at));
-  __ And(at, a1, Operand(kSmiTagMask));
-  __ Branch(&non_function, eq, at, Operand(zero_reg));
+  __ JumpIfSmi(a1, &non_function);
   __ GetObjectType(a1, a2, a2);
-  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
 
   // 3a. Patch the first argument if necessary when calling a function.
   // a0: actual number of arguments
   // a1: function
   Label shift_arguments;
+  __ li(t0, Operand(0, RelocInfo::NONE));  // Indicate regular JS_FUNCTION.
   { Label convert_to_object, use_global_receiver, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -1210,13 +1339,13 @@
     // Do not transform the receiver for strict mode functions.
     __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
     __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
-    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+    __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
                                  kSmiTagSize)));
-    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+    __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
 
     // Do not transform the receiver for native (Compilerhints already in a3).
-    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+    __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
 
     // Compute the receiver in non-strict mode.
     // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
@@ -1238,21 +1367,25 @@
     __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
-    __ push(a0);
+    // Enter an internal frame in order to preserve argument count.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
+      __ push(a0);
 
-    __ push(a2);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(a2, v0);
+      __ push(a2);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mov(a2, v0);
 
-    __ pop(a0);
-    __ sra(a0, a0, kSmiTagSize);  // Un-tag.
-    __ LeaveInternalFrame();
-    // Restore the function to a1.
+      __ pop(a0);
+      __ sra(a0, a0, kSmiTagSize);  // Un-tag.
+      // Leave internal frame.
+    }
+    // Restore the function to a1, and the flag to t0.
     __ sll(at, a0, kPointerSizeLog2);
     __ addu(at, sp, at);
     __ lw(a1, MemOperand(at));
+    __ li(t0, Operand(0, RelocInfo::NONE));
     __ Branch(&patch_receiver);
 
     // Use the global receiver object from the called function as the
@@ -1273,25 +1406,31 @@
     __ Branch(&shift_arguments);
   }
 
-  // 3b. Patch the first argument when calling a non-function.  The
+  // 3b. Check for function proxy.
+  __ bind(&slow);
+  __ li(t0, Operand(1, RelocInfo::NONE));  // Indicate function proxy.
+  __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+  __ bind(&non_function);
+  __ li(t0, Operand(2, RelocInfo::NONE));  // Indicate non-function.
+
+  // 3c. Patch the first argument when calling a non-function.  The
   //     CALL_NON_FUNCTION builtin expects the non-function callee as
   //     receiver, so overwrite the first argument which will ultimately
   //     become the receiver.
   // a0: actual number of arguments
   // a1: function
-  __ bind(&non_function);
-  // Restore the function in case it has been modified.
+  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
   __ sll(at, a0, kPointerSizeLog2);
   __ addu(a2, sp, at);
   __ sw(a1, MemOperand(a2, -kPointerSize));
-  // Clear a1 to indicate a non-function being called.
-  __ mov(a1, zero_reg);
 
   // 4. Shift arguments and return address one slot down on the stack
   //    (overwriting the original receiver).  Adjust argument count to make
   //    the original first argument the new receiver.
   // a0: actual number of arguments
   // a1: function
+  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
   __ bind(&shift_arguments);
   { Label loop;
     // Calculate the copy start address (destination). Copy end address is sp.
@@ -1309,14 +1448,26 @@
     __ Pop();
   }
 
-  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+  //     or a function proxy via CALL_FUNCTION_PROXY.
   // a0: actual number of arguments
   // a1: function
-  { Label function;
-    __ Branch(&function, ne, a1, Operand(zero_reg));
-    __ mov(a2, zero_reg);  // expected arguments is 0 for CALL_NON_FUNCTION
-    __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
+  { Label function, non_proxy;
+    __ Branch(&function, eq, t0, Operand(zero_reg));
+    // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+    __ mov(a2, zero_reg);
     __ SetCallKind(t1, CALL_AS_METHOD);
+    __ Branch(&non_proxy, ne, t0, Operand(1));
+
+    __ push(a1);  // Re-add proxy object as additional argument.
+    __ Addu(a0, a0, Operand(1));
+    __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+
+    __ bind(&non_proxy);
+    __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
     __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
             RelocInfo::CODE_TARGET);
     __ bind(&function);
@@ -1350,134 +1501,158 @@
   const int kRecvOffset     =  3 * kPointerSize;
   const int kFunctionOffset =  4 * kPointerSize;
 
-  __ EnterInternalFrame();
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
+    __ push(a0);
+    __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
+    __ push(a0);
+    // Returns (in v0) number of arguments to copy to stack as Smi.
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
-  __ push(a0);
-  __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
-  __ push(a0);
-  // Returns (in v0) number of arguments to copy to stack as Smi.
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+    // Make a2 the space we have left. The stack might already be overflowed
+    // here which will cause a2 to become negative.
+    __ subu(a2, sp, a2);
+    // Check if the arguments will overflow the stack.
+    __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
+    __ Branch(&okay, gt, a2, Operand(t3));  // Signed comparison.
 
-  // Check the stack for overflow. We are not trying need to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
-  // Make a2 the space we have left. The stack might already be overflowed
-  // here which will cause a2 to become negative.
-  __ subu(a2, sp, a2);
-  // Check if the arguments will overflow the stack.
-  __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
-  __ Branch(&okay, gt, a2, Operand(t0));  // Signed comparison.
+    // Out of stack space.
+    __ lw(a1, MemOperand(fp, kFunctionOffset));
+    __ push(a1);
+    __ push(v0);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    // End of stack check.
 
-  // Out of stack space.
-  __ lw(a1, MemOperand(fp, kFunctionOffset));
-  __ push(a1);
-  __ push(v0);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  // End of stack check.
+    // Push current limit and index.
+    __ bind(&okay);
+    __ push(v0);  // Limit.
+    __ mov(a1, zero_reg);  // Initial index.
+    __ push(a1);
 
-  // Push current limit and index.
-  __ bind(&okay);
-  __ push(v0);  // Limit.
-  __ mov(a1, zero_reg);  // Initial index.
-  __ push(a1);
+    // Get the receiver.
+    __ lw(a0, MemOperand(fp, kRecvOffset));
 
-  // Change context eagerly to get the right global object if necessary.
-  __ lw(a0, MemOperand(fp, kFunctionOffset));
-  __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
-  // Load the shared function info while the function is still in a0.
-  __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ lw(a1, MemOperand(fp, kFunctionOffset));
+    __ GetObjectType(a1, a2, a2);
+    __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
 
-  // Compute the receiver.
-  Label call_to_object, use_global_receiver, push_receiver;
-  __ lw(a0, MemOperand(fp, kRecvOffset));
+    // Change context eagerly to get the right global object if necessary.
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    // Load the shared function info while the function is still in a1.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
 
-  // Do not transform the receiver for strict mode functions.
-  __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
-  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
-                               kSmiTagSize)));
-  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                                 kSmiTagSize)));
+    __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
 
-  // Do not transform the receiver for native (Compilerhints already in a2).
-  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+    // Do not transform the receiver for native (Compilerhints already in a2).
+    __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
 
-  // Compute the receiver in non-strict mode.
-  __ And(t0, a0, Operand(kSmiTagMask));
-  __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
-  __ LoadRoot(a1, Heap::kNullValueRootIndex);
-  __ Branch(&use_global_receiver, eq, a0, Operand(a1));
-  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-  __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+    // Compute the receiver in non-strict mode.
+    __ JumpIfSmi(a0, &call_to_object);
+    __ LoadRoot(a1, Heap::kNullValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a0, Operand(a2));
 
-  // Check if the receiver is already a JavaScript object.
-  // a0: receiver
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ GetObjectType(a0, a1, a1);
-  __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+    // Check if the receiver is already a JavaScript object.
+    // a0: receiver
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ GetObjectType(a0, a1, a1);
+    __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  // Convert the receiver to a regular object.
-  // a0: receiver
-  __ bind(&call_to_object);
-  __ push(a0);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
-  __ Branch(&push_receiver);
+    // Convert the receiver to a regular object.
+    // a0: receiver
+    __ bind(&call_to_object);
+    __ push(a0);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
+    __ Branch(&push_receiver);
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
-  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
-  __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
-  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+    __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
 
-  // Push the receiver.
-  // a0: receiver
-  __ bind(&push_receiver);
-  __ push(a0);
+    // Push the receiver.
+    // a0: receiver
+    __ bind(&push_receiver);
+    __ push(a0);
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ lw(a0, MemOperand(fp, kIndexOffset));
-  __ Branch(&entry);
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ lw(a0, MemOperand(fp, kIndexOffset));
+    __ Branch(&entry);
 
-  // Load the current argument from the arguments array and push it to the
-  // stack.
-  // a0: current argument index
-  __ bind(&loop);
-  __ lw(a1, MemOperand(fp, kArgsOffset));
-  __ push(a1);
-  __ push(a0);
+    // Load the current argument from the arguments array and push it to the
+    // stack.
+    // a0: current argument index
+    __ bind(&loop);
+    __ lw(a1, MemOperand(fp, kArgsOffset));
+    __ push(a1);
+    __ push(a0);
 
-  // Call the runtime to access the property in the arguments array.
-  __ CallRuntime(Runtime::kGetProperty, 2);
-  __ push(v0);
+    // Call the runtime to access the property in the arguments array.
+    __ CallRuntime(Runtime::kGetProperty, 2);
+    __ push(v0);
 
-  // Use inline caching to access the arguments.
-  __ lw(a0, MemOperand(fp, kIndexOffset));
-  __ Addu(a0, a0, Operand(1 << kSmiTagSize));
-  __ sw(a0, MemOperand(fp, kIndexOffset));
+    // Use inline caching to access the arguments.
+    __ lw(a0, MemOperand(fp, kIndexOffset));
+    __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+    __ sw(a0, MemOperand(fp, kIndexOffset));
 
-  // Test if the copy loop has finished copying all the elements from the
-  // arguments object.
-  __ bind(&entry);
-  __ lw(a1, MemOperand(fp, kLimitOffset));
-  __ Branch(&loop, ne, a0, Operand(a1));
-  // Invoke the function.
-  ParameterCount actual(a0);
-  __ sra(a0, a0, kSmiTagSize);
-  __ lw(a1, MemOperand(fp, kFunctionOffset));
-  __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    // Test if the copy loop has finished copying all the elements from the
+    // arguments object.
+    __ bind(&entry);
+    __ lw(a1, MemOperand(fp, kLimitOffset));
+    __ Branch(&loop, ne, a0, Operand(a1));
 
-  // Tear down the internal frame and remove function, receiver and args.
-  __ LeaveInternalFrame();
-  __ Addu(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(a0);
+    __ sra(a0, a0, kSmiTagSize);
+    __ lw(a1, MemOperand(fp, kFunctionOffset));
+    __ GetObjectType(a1, a2, a2);
+    __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+    __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
+
+    frame_scope.GenerateLeaveFrame();
+    __ Ret(USE_DELAY_SLOT);
+    __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
+
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(a1);  // Add function proxy as last argument.
+    __ Addu(a0, a0, Operand(1));
+    __ li(a2, Operand(0, RelocInfo::NONE));
+    __ SetCallKind(t1, CALL_AS_METHOD);
+    __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+    // Tear down the internal frame and remove function, receiver and args.
+  }
+
+  __ Ret(USE_DELAY_SLOT);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
 }
 
 
@@ -1560,8 +1735,6 @@
     __ bind(&too_few);
     EnterArgumentsAdaptorFrame(masm);
 
-    // TODO(MIPS): Optimize these loops.
-
     // Calculate copy start address into a0 and copy end address is fp.
     // a0: actual number of arguments as a smi
     // a1: function
@@ -1583,9 +1756,10 @@
     Label copy;
     __ bind(&copy);
     __ lw(t0, MemOperand(a0));  // Adjusted above for return addr and receiver.
-    __ push(t0);
+    __ Subu(sp, sp, kPointerSize);
     __ Subu(a0, a0, kPointerSize);
-    __ Branch(&copy, ne, a0, Operand(t3));
+    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t3));
+    __ sw(t0, MemOperand(sp));  // In the delay slot.
 
     // Fill the remaining expected arguments with undefined.
     // a1: function
@@ -1598,8 +1772,9 @@
 
     Label fill;
     __ bind(&fill);
-    __ push(t0);
-    __ Branch(&fill, ne, sp, Operand(a2));
+    __ Subu(sp, sp, kPointerSize);
+    __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2));
+    __ sw(t0, MemOperand(sp));
   }
 
   // Call the entry point.
@@ -1607,6 +1782,9 @@
 
   __ Call(a3);
 
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
   // Exit frame and return.
   LeaveArgumentsAdaptorFrame(masm);
   __ Ret();
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index c3c3874..1b3242c 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -70,13 +70,13 @@
   // The ToNumber stub takes one argument in a0.
   Label check_heap_number, call_builtin;
   __ JumpIfNotSmi(a0, &check_heap_number);
+  __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);
-  __ Ret();
 
   __ bind(&check_heap_number);
   EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
+  __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);
-  __ Ret();
 
   __ bind(&call_builtin);
   __ push(a0);
@@ -100,9 +100,9 @@
                         &gc,
                         TAG_OBJECT);
 
-  int map_index = strict_mode_ == kStrictMode
-      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
-      : Context::FUNCTION_MAP_INDEX;
+  int map_index = (language_mode_ == CLASSIC_MODE)
+      ? Context::FUNCTION_MAP_INDEX
+      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -128,9 +128,9 @@
   // found in the shared function info object.
   __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
   __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
 
   // Return result. The argument function info has been popped already.
+  __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
   __ Ret();
 
   // Create a new closure through the slower runtime call.
@@ -157,21 +157,19 @@
   // Load the function from the stack.
   __ lw(a3, MemOperand(sp, 0));
 
-  // Setup the object header.
-  __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
-  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  // Set up the object header.
+  __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
   __ li(a2, Operand(Smi::FromInt(length)));
   __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+  __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
 
-  // Setup the fixed slots.
+  // Set up the fixed slots, copy the global object from the previous context.
+  __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ li(a1, Operand(Smi::FromInt(0)));
   __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
   __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-
-  // Copy the global object from the previous context.
-  __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
 
   // Initialize the rest of the slots to undefined.
   __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
@@ -181,8 +179,7 @@
 
   // Remove the on-stack argument and return.
   __ mov(cp, v0);
-  __ Pop();
-  __ Ret();
+  __ DropAndRet(1);
 
   // Need to collect. Call into runtime system.
   __ bind(&gc);
@@ -190,16 +187,123 @@
 }
 
 
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: function.
+  // [sp + kPointerSize]: serialized scope info
+
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        v0, a1, a2, &gc, TAG_OBJECT);
+
+  // Load the function from the stack.
+  __ lw(a3, MemOperand(sp, 0));
+
+  // Load the serialized scope info from the stack.
+  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+  // Set up the object header.
+  __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ li(a2, Operand(Smi::FromInt(length)));
+  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+
+  // If this block context is nested in the global context we get a smi
+  // sentinel instead of a function. The block context should get the
+  // canonical empty function of the global context as its closure which
+  // we still have to look up.
+  Label after_sentinel;
+  __ JumpIfNotSmi(a3, &after_sentinel);
+  if (FLAG_debug_code) {
+    const char* message = "Expected 0 as a Smi sentinel";
+    __ Assert(eq, message, a3, Operand(zero_reg));
+  }
+  __ lw(a3, GlobalObjectOperand());
+  __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+  __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
+  __ bind(&after_sentinel);
+
+  // Set up the fixed slots, copy the global object from the previous context.
+  __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
+  __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
+  __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
+  __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
+
+  // Initialize the rest of the slots to the hole value.
+  __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < slots_; i++) {
+    __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
+  }
+
+  // Remove the on-stack argument and return.
+  __ mov(cp, v0);
+  __ DropAndRet(2);
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
+  // a3: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = 0;
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
+  }
+  int size = JSArray::kSize + elements_size;
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size,
+                        v0,
+                        a1,
+                        a2,
+                        fail,
+                        TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
+      __ lw(a1, FieldMemOperand(a3, i));
+      __ sw(a1, FieldMemOperand(v0, i));
+    }
+  }
+
+  if (length > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ Addu(a2, v0, Operand(JSArray::kSize));
+    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+    // Copy the elements array.
+    ASSERT((elements_size % kPointerSize) == 0);
+    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+  }
+}
+
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
+  //
   // [sp]: constant elements.
   // [sp + kPointerSize]: literal index.
   // [sp + (2 * kPointerSize)]: literals array.
 
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
-  int size = JSArray::kSize + elements_size;
-
   // Load boilerplate object into r3 and check if we need to create a
   // boilerplate.
   Label slow_case;
@@ -212,14 +316,42 @@
   __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
   __ Branch(&slow_case, eq, a3, Operand(t1));
 
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
+    __ Branch(&check_fast_elements, ne, v0, Operand(t1));
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    // Return and remove the on-stack parameters.
+    __ DropAndRet(3);
+
+    __ bind(&check_fast_elements);
+    __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+    __ Branch(&double_elements, ne, v0, Operand(t1));
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    // Return and remove the on-stack parameters.
+    __ DropAndRet(3);
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
   if (FLAG_debug_code) {
     const char* message;
     Heap::RootListIndex expected_map_index;
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map_index = Heap::kFixedArrayMapRootIndex;
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+      message = "Expected (writable) fixed double array";
+      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
     } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
     }
@@ -231,44 +363,59 @@
     __ pop(a3);
   }
 
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  // Return new object in v0.
-  __ AllocateInNewSpace(size,
-                        v0,
-                        a1,
-                        a2,
-                        &slow_case,
-                        TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
-      __ lw(a1, FieldMemOperand(a3, i));
-      __ sw(a1, FieldMemOperand(v0, i));
-    }
-  }
-
-  if (length_ > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
-    __ Addu(a2, v0, Operand(JSArray::kSize));
-    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
-
-    // Copy the elements array.
-    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
-  }
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
 
   // Return and remove the on-stack parameters.
-  __ Addu(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(3);
 
   __ bind(&slow_case);
   __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
 }
 
 
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: object literal flags.
+  // [sp + kPointerSize]: constant properties.
+  // [sp + (2 * kPointerSize)]: literal index.
+  // [sp + (3 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into a3 and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ lw(a3, MemOperand(sp, 3 * kPointerSize));
+  __ lw(a0, MemOperand(sp, 2 * kPointerSize));
+  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, t0, a3);
+  __ lw(a3, MemOperand(a3));
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&slow_case, eq, a3, Operand(t0));
+
+  // Check that the boilerplate contains only fast properties and we can
+  // statically determine the instance size.
+  int size = JSObject::kHeaderSize + length_ * kPointerSize;
+  __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
+  __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
+
+  // Allocate the JS object and copy header together with all in-object
+  // properties from the boilerplate.
+  __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
+  for (int i = 0; i < size; i += kPointerSize) {
+    __ lw(a1, FieldMemOperand(a3, i));
+    __ sw(a1, FieldMemOperand(v0, i));
+  }
+
+  // Return and remove the on-stack parameters.
+  __ DropAndRet(4);
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
+
+
 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
 // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
@@ -326,7 +473,7 @@
   __ And(exponent, source_, Operand(HeapNumber::kSignMask));
   // Subtract from 0 if source was negative.
   __ subu(at, zero_reg, source_);
-  __ movn(source_, at, exponent);
+  __ Movn(source_, at, exponent);
 
   // We have -1, 0 or 1, which we treat specially. Register source_ contains
   // absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -338,15 +485,15 @@
       HeapNumber::kExponentBias << HeapNumber::kExponentShift;
   // Safe to use 'at' as dest reg here.
   __ Or(at, exponent, Operand(exponent_word_for_1));
-  __ movn(exponent, at, source_);  // Write exp when source not 0.
+  __ Movn(exponent, at, source_);  // Write exp when source not 0.
   // 1, 0 and -1 all have 0 for the second word.
+  __ Ret(USE_DELAY_SLOT);
   __ mov(mantissa, zero_reg);
-  __ Ret();
 
   __ bind(&not_special);
   // Count leading zeros.
   // Gets the wrong answer for 0, but we already checked for that case above.
-  __ clz(zeros_, source_);
+  __ Clz(zeros_, source_);
   // Compute exponent and or it into the exponent register.
   // We use mantissa as a scratch register here.
   __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
@@ -362,9 +509,9 @@
   __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
   // And the top (top 20 bits).
   __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
-  __ or_(exponent, exponent, source_);
 
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ or_(exponent, exponent, source_);
 }
 
 
@@ -436,7 +583,9 @@
 
   Label is_smi, done;
 
-  __ JumpIfSmi(object, &is_smi);
+  // Smi-check
+  __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
+  // Heap number check
   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
 
   // Handle loading a double from a heap number.
@@ -463,7 +612,6 @@
   if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
     // Convert smi to double using FPU instructions.
-    __ SmiUntag(scratch1, object);
     __ mtc1(scratch1, dst);
     __ cvt_d_w(dst, dst);
     if (destination == kCoreRegisters) {
@@ -498,11 +646,10 @@
                            Heap::kHeapNumberMapRootIndex,
                            "HeapNumberMap register clobbered.");
   }
-  Label is_smi;
   Label done;
   Label not_in_int32_range;
 
-  __ JumpIfSmi(object, &is_smi);
+  __ UntagAndJumpIfSmi(dst, object, &done);
   __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
   __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
   __ ConvertToInt32(object,
@@ -522,10 +669,6 @@
                                  scratch2,
                                  scratch3);
 
-  __ jmp(&done);
-
-  __ bind(&is_smi);
-  __ SmiUntag(dst, object);
   __ bind(&done);
 }
 
@@ -570,10 +713,10 @@
     __ Subu(int_scratch, zero_reg, int_scratch);
     __ bind(&skip_sub);
 
-    // Get mantisssa[51:20].
+    // Get mantissa[51:20].
 
     // Get the position of the first set bit.
-    __ clz(dst1, int_scratch);
+    __ Clz(dst1, int_scratch);
     __ li(scratch2, 31);
     __ Subu(dst1, scratch2, dst1);
 
@@ -615,7 +758,7 @@
 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
                                                   Register object,
                                                   Destination destination,
-                                                  FPURegister double_dst,
+                                                  DoubleRegister double_dst,
                                                   Register dst1,
                                                   Register dst2,
                                                   Register heap_number_map,
@@ -651,25 +794,16 @@
     // Load the double value.
     __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
 
-    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
-    // On MIPS a lot of things cannot be implemented the same way so right
-    // now it makes a lot more sense to just do things manually.
-
-    // Save FCSR.
-    __ cfc1(scratch1, FCSR);
-    // Disable FPU exceptions.
-    __ ctc1(zero_reg, FCSR);
-    __ trunc_w_d(single_scratch, double_dst);
-    // Retrieve FCSR.
-    __ cfc1(scratch2, FCSR);
-    // Restore FCSR.
-    __ ctc1(scratch1, FCSR);
-
-    // Check for inexact conversion or exception.
-    __ And(scratch2, scratch2, kFCSRFlagMask);
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       single_scratch,
+                       double_dst,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
 
     // Jump to not_int32 if the operation did not succeed.
-    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+    __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
 
     if (destination == kCoreRegisters) {
       __ Move(dst1, dst2, double_dst);
@@ -706,7 +840,7 @@
                                             Register scratch1,
                                             Register scratch2,
                                             Register scratch3,
-                                            FPURegister double_scratch,
+                                            DoubleRegister double_scratch,
                                             Label* not_int32) {
   ASSERT(!dst.is(object));
   ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -716,10 +850,7 @@
 
   Label done;
 
-  // Untag the object into the destination register.
-  __ SmiUntag(dst, object);
-  // Just return if the object is a smi.
-  __ JumpIfSmi(object, &done);
+  __ UntagAndJumpIfSmi(dst, object, &done);
 
   if (FLAG_debug_code) {
     __ AbortIfNotRootValue(heap_number_map,
@@ -735,27 +866,19 @@
     // Load the double value.
     __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
 
-    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
-    // On MIPS a lot of things cannot be implemented the same way so right
-    // now it makes a lot more sense to just do things manually.
-
-    // Save FCSR.
-    __ cfc1(scratch1, FCSR);
-    // Disable FPU exceptions.
-    __ ctc1(zero_reg, FCSR);
-    __ trunc_w_d(double_scratch, double_scratch);
-    // Retrieve FCSR.
-    __ cfc1(scratch2, FCSR);
-    // Restore FCSR.
-    __ ctc1(scratch1, FCSR);
-
-    // Check for inexact conversion or exception.
-    __ And(scratch2, scratch2, kFCSRFlagMask);
+    FPURegister single_scratch = double_scratch.low();
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       single_scratch,
+                       double_scratch,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
 
     // Jump to not_int32 if the operation did not succeed.
-    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+    __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
     // Get the result in the destination register.
-    __ mfc1(dst, double_scratch);
+    __ mfc1(dst, single_scratch);
 
   } else {
     // Load the double value in the destination registers.
@@ -832,7 +955,7 @@
   // non zero bits left. So we need the (30 - exponent) last bits of the
   // 31 higher bits of the mantissa to be null.
   // Because bits [21:0] are null, we can check instead that the
-  // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+  // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
 
   // Get the 32 higher bits of the mantissa in dst.
   __ Ext(dst,
@@ -881,9 +1004,11 @@
     __ Move(f12, a0, a1);
     __ Move(f14, a2, a3);
   }
-  // Call C routine that may not cause GC or other trouble.
-  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
-                   4);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm);
+    __ CallCFunction(
+        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+  }
   // Store answer in the overwritable heap number.
   if (!IsMipsSoftFloatABI) {
     CpuFeatures::Scope scope(FPU);
@@ -895,9 +1020,38 @@
     __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
   }
   // Place heap_number_result in v0 and return to the pushed return address.
-  __ mov(v0, heap_number_result);
   __ pop(ra);
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, heap_number_result);
+}
+
+
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+  // These variants are compiled ahead of time.  See next method.
+  if (the_int_.is(a1) &&
+      the_heap_number_.is(v0) &&
+      scratch_.is(a2) &&
+      sign_.is(a3)) {
+    return true;
+  }
+  if (the_int_.is(a2) &&
+      the_heap_number_.is(v0) &&
+      scratch_.is(a3) &&
+      sign_.is(a0)) {
+    return true;
+  }
+  // Other register combinations are generated as and when they are needed,
+  // so it is unsafe to call them from stubs (we can't generate a stub while
+  // we are generating a stub).
+  return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+  WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
+  WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
+  stub1.GetCode()->set_is_pregenerated(true);
+  stub2.GetCode()->set_is_pregenerated(true);
 }
 
 
@@ -920,7 +1074,7 @@
   __ or_(scratch_, scratch_, sign_);
   // Subtract from 0 if the value was negative.
   __ subu(at, zero_reg, the_int_);
-  __ movn(the_int_, at, sign_);
+  __ Movn(the_int_, at, sign_);
   // We should be masking the implict first digit of the mantissa away here,
   // but it just ends up combining harmlessly with the last digit of the
   // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
@@ -1004,6 +1158,7 @@
   }
 
   __ bind(&return_equal);
+
   if (cc == less) {
     __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
   } else if (cc == greater) {
@@ -1068,16 +1223,15 @@
          (lhs.is(a1) && rhs.is(a0)));
 
   Label lhs_is_smi;
-  __ And(t0, lhs, Operand(kSmiTagMask));
-  __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
+  __ JumpIfSmi(lhs, &lhs_is_smi);
   // Rhs is a Smi.
   // Check whether the non-smi is a heap number.
   __ GetObjectType(lhs, t4, t4);
   if (strict) {
     // If lhs was not a number and rhs was a Smi then strict equality cannot
     // succeed. Return non-equal (lhs is already not zero).
+    __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
     __ mov(v0, lhs);
-    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
   } else {
     // Smi compared non-strictly with a non-Smi non-heap-number. Call
     // the runtime.
@@ -1115,8 +1269,8 @@
   if (strict) {
     // If lhs was not a number and rhs was a Smi then strict equality cannot
     // succeed. Return non-equal.
+    __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
     __ li(v0, Operand(1));
-    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
   } else {
     // Smi compared non-strictly with a non-Smi non-heap-number. Call
     // the runtime.
@@ -1196,12 +1350,13 @@
   __ bind(&one_is_nan);
   // NaN comparisons always fail.
   // Load whatever we need in v0 to make the comparison fail.
+
   if (cc == lt || cc == le) {
     __ li(v0, Operand(GREATER));
   } else {
     __ li(v0, Operand(LESS));
   }
-  __ Ret();  // Return.
+  __ Ret();
 
   __ bind(&neither_is_nan);
 }
@@ -1250,6 +1405,7 @@
     __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
 
     __ bind(&return_result_equal);
+
     __ li(v0, Operand(EQUAL));
     __ Ret();
   }
@@ -1258,7 +1414,7 @@
 
   if (!CpuFeatures::IsSupported(FPU)) {
     __ push(ra);
-    __ PrepareCallCFunction(4, t4);  // Two doubles count as 4 arguments.
+    __ PrepareCallCFunction(0, 2, t4);
     if (!IsMipsSoftFloatABI) {
       // We are not using MIPS FPU instructions, and parameters for the runtime
       // function call are prepaired in a0-a3 registers, but function we are
@@ -1268,21 +1424,20 @@
       __ Move(f12, a0, a1);
       __ Move(f14, a2, a3);
     }
-    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+
+    AllowExternalCallThatCantCauseGC scope(masm);
+    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
+       0, 2);
     __ pop(ra);  // Because this function returns int, result is in v0.
     __ Ret();
   } else {
     CpuFeatures::Scope scope(FPU);
     Label equal, less_than;
-    __ c(EQ, D, f12, f14);
-    __ bc1t(&equal);
-    __ nop();
-
-    __ c(OLT, D, f12, f14);
-    __ bc1t(&less_than);
-    __ nop();
+    __ BranchF(&equal, NULL, eq, f12, f14);
+    __ BranchF(&less_than, NULL, lt, f12, f14);
 
     // Not equal, not less, not NaN, must be greater.
+
     __ li(v0, Operand(GREATER));
     __ Ret();
 
@@ -1303,7 +1458,7 @@
     // If either operand is a JS object or an oddball value, then they are
     // not equal since their pointers are different.
     // There is no test for undetectability in strict equality.
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
     Label first_non_object;
     // Get the type of the first operand into a2 and compare it with
     // FIRST_SPEC_OBJECT_TYPE.
@@ -1313,8 +1468,8 @@
     // Return non-zero.
     Label return_not_equal;
     __ bind(&return_not_equal);
+    __ Ret(USE_DELAY_SLOT);
     __ li(v0, Operand(1));
-    __ Ret();
 
     __ bind(&first_non_object);
     // Check for oddballs: true, false, null, undefined.
@@ -1393,8 +1548,8 @@
 
   // Both are symbols. We already checked they weren't the same pointer
   // so they are not equal.
+  __ Ret(USE_DELAY_SLOT);
   __ li(v0, Operand(1));   // Non-zero indicates not equal.
-  __ Ret();
 
   __ bind(&object_test);
   __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
@@ -1409,8 +1564,8 @@
   __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
   __ and_(a0, a2, a3);
   __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
-  __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ xori(v0, a0, 1 << Map::kIsUndetectable);
 }
 
 
@@ -1473,9 +1628,7 @@
       __ JumpIfSmi(probe, not_found);
       __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
       __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
-      __ c(EQ, D, f12, f14);
-      __ bc1t(&load_result_from_cache);
-      __ nop();   // bc1t() requires explicit fill of branch delay slot.
+      __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
       __ Branch(not_found);
     } else {
       // Note that there is no cache check for non-FPU case, even though
@@ -1519,8 +1672,7 @@
 
   // Generate code to lookup number in the number string cache.
   GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
-  __ Addu(sp, sp, Operand(1 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(1);
 
   __ bind(&runtime);
   // Handle number to string in the runtime system if not found in the cache.
@@ -1542,8 +1694,8 @@
     __ JumpIfNotSmi(a2, &not_two_smis);
     __ sra(a1, a1, 1);
     __ sra(a0, a0, 1);
-    __ Subu(v0, a1, a0);
-    __ Ret();
+    __ Ret(USE_DELAY_SLOT);
+    __ subu(v0, a1, a0);
     __ bind(&not_two_smis);
   } else if (FLAG_debug_code) {
     __ Or(a2, a1, a0);
@@ -1591,22 +1743,20 @@
     __ li(t2, Operand(EQUAL));
 
     // Check if either rhs or lhs is NaN.
-    __ c(UN, D, f12, f14);
-    __ bc1t(&nan);
-    __ nop();
+    __ BranchF(NULL, &nan, eq, f12, f14);
 
     // Check if LESS condition is satisfied. If true, move conditionally
     // result to v0.
     __ c(OLT, D, f12, f14);
-    __ movt(v0, t0);
+    __ Movt(v0, t0);
     // Use previous check to store conditionally to v0 oposite condition
     // (GREATER). If rhs is equal to lhs, this will be corrected in next
     // check.
-    __ movf(v0, t1);
+    __ Movf(v0, t1);
     // Check if EQUAL condition is satisfied. If true, move conditionally
     // result to v0.
     __ c(EQ, D, f12, f14);
-    __ movt(v0, t2);
+    __ Movt(v0, t2);
 
     __ Ret();
 
@@ -1711,88 +1861,144 @@
 }
 
 
-// The stub returns zero for false, and a non-zero value for true.
+// The stub expects its argument in the tos_ register and returns its result in
+// it, too: zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
   // This stub uses FPU instructions.
   CpuFeatures::Scope scope(FPU);
 
-  Label false_result;
-  Label not_heap_number;
-  Register scratch0 = t5.is(tos_) ? t3 : t5;
+  Label patch;
+  const Register map = t5.is(tos_) ? t3 : t5;
 
-  // undefined -> false
-  __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
-  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+  // undefined -> false.
+  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
 
-  // Boolean -> its value
-  __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
-  __ Branch(&false_result, eq, tos_, Operand(scratch0));
-  __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
-  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
-  // return true if the equal condition is satisfied.
-  __ Ret(eq, tos_, Operand(scratch0));
+  // Boolean -> its value.
+  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
+  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
 
-  // Smis: 0 -> false, all other -> true
-  __ And(scratch0, tos_, tos_);
-  __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
-  __ And(scratch0, tos_, Operand(kSmiTagMask));
-  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
-  // return true if the not equal condition is satisfied.
-  __ Ret(eq, scratch0, Operand(zero_reg));
+  // 'null' -> false.
+  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
 
-  // 'null' -> false
-  __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
-  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+  if (types_.Contains(SMI)) {
+    // Smis: 0 -> false, all other -> true
+    __ And(at, tos_, kSmiTagMask);
+    // tos_ contains the correct return value already
+    __ Ret(eq, at, Operand(zero_reg));
+  } else if (types_.NeedsMap()) {
+    // If we need a map later and have a Smi -> patch.
+    __ JumpIfSmi(tos_, &patch);
+  }
 
-  // HeapNumber => false if +0, -0, or NaN.
-  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&not_heap_number, ne, scratch0, Operand(at));
+  if (types_.NeedsMap()) {
+    __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
 
-  __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
-  __ fcmp(f12, 0.0, UEQ);
+    if (types_.CanBeUndetectable()) {
+      __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+      __ And(at, at, Operand(1 << Map::kIsUndetectable));
+      // Undetectable -> false.
+      __ Movn(tos_, zero_reg, at);
+      __ Ret(ne, at, Operand(zero_reg));
+    }
+  }
 
-  // "tos_" is a register, and contains a non zero value by default.
-  // Hence we only need to overwrite "tos_" with zero to return false for
-  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
-  __ movt(tos_, zero_reg);
-  __ Ret();
+  if (types_.Contains(SPEC_OBJECT)) {
+    // Spec object -> true.
+    __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+    // tos_ contains the correct non-zero return value already.
+    __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+  }
 
-  __ bind(&not_heap_number);
+  if (types_.Contains(STRING)) {
+    // String value -> false iff empty.
+    __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+    Label skip;
+    __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
+    __ Ret(USE_DELAY_SLOT);  // the string length is OK as the return value
+    __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+    __ bind(&skip);
+  }
 
-  // It can be an undetectable object.
-  // Undetectable => false.
-  __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
-  __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
-  __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
+  if (types_.Contains(HEAP_NUMBER)) {
+    // Heap number -> false iff +0, -0, or NaN.
+    Label not_heap_number;
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    __ Branch(&not_heap_number, ne, map, Operand(at));
+    Label zero_or_nan, number;
+    __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+    __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
+    // "tos_" is a register, and contains a non zero value by default.
+    // Hence we only need to overwrite "tos_" with zero to return false for
+    // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+    __ bind(&zero_or_nan);
+    __ mov(tos_, zero_reg);
+    __ bind(&number);
+    __ Ret();
+    __ bind(&not_heap_number);
+  }
 
-  // JavaScript object => true.
-  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+  __ bind(&patch);
+  GenerateTypeTransition(masm);
+}
 
-  // "tos_" is a register and contains a non-zero value.
-  // Hence we implicitly return true if the greater than
-  // condition is satisfied.
-  __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  // Check for string.
-  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
-  // "tos_" is a register and contains a non-zero value.
-  // Hence we implicitly return true if the greater than
-  // condition is satisfied.
-  __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
+void ToBooleanStub::CheckOddball(MacroAssembler* masm,
+                                 Type type,
+                                 Heap::RootListIndex value,
+                                 bool result) {
+  if (types_.Contains(type)) {
+    // If we see an expected oddball, return its ToBoolean value tos_.
+    __ LoadRoot(at, value);
+    __ Subu(at, at, tos_);  // This is a check for equality for the movz below.
+    // The value of a root is never NULL, so we can avoid loading a non-null
+    // value into tos_ when we want to return 'true'.
+    if (!result) {
+      __ Movz(tos_, zero_reg, at);
+    }
+    __ Ret(eq, at, Operand(zero_reg));
+  }
+}
 
-  // String value => false iff empty, i.e., length is zero.
-  __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
-  // If length is zero, "tos_" contains zero ==> false.
-  // If length is not zero, "tos_" contains a non-zero value ==> true.
-  __ Ret();
 
-  // Return 0 in "tos_" for false.
-  __ bind(&false_result);
-  __ mov(tos_, zero_reg);
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ Move(a3, tos_);
+  __ li(a2, Operand(Smi::FromInt(tos_.code())));
+  __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
+  __ Push(a3, a2, a1);
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
+      3,
+      1);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ MultiPush(kJSCallerSaved | ra.bit());
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(FPU);
+    __ MultiPushFPU(kCallerSavedFPU);
+  }
+  const int argument_count = 1;
+  const int fp_argument_count = 0;
+  const Register scratch = a1;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+  __ li(a0, Operand(ExternalReference::isolate_address()));
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(FPU);
+    __ MultiPopFPU(kCallerSavedFPU);
+  }
+
+  __ MultiPop(kJSCallerSaved | ra.bit());
   __ Ret();
 }
 
@@ -1884,8 +2090,8 @@
   __ Branch(slow, eq, t0, Operand(zero_reg));
 
   // Return '0 - value'.
-  __ Subu(v0, zero_reg, a0);
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ subu(v0, zero_reg, a0);
 }
 
 
@@ -1951,12 +2157,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(a0);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(a1, v0);
-    __ pop(a0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(a0);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(a1, v0);
+      __ pop(a0);
+    }
 
     __ bind(&heapnumber_allocated);
     __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
@@ -1998,13 +2205,14 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(v0);  // Push the heap number, not the untagged int32.
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(a2, v0);  // Move the new heap number into a2.
-    // Get the heap number into v0, now that the new heap number is in a2.
-    __ pop(v0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(v0);  // Push the heap number, not the untagged int32.
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(a2, v0);  // Move the new heap number into a2.
+      // Get the heap number into v0, now that the new heap number is in a2.
+      __ pop(v0);
+    }
 
     // Convert the heap number in v0 to an untagged integer in a1.
     // This can't go slow-case because it's the same number we already
@@ -2115,6 +2323,9 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -2210,8 +2421,8 @@
       // Negating it results in 'lt'.
       __ Branch(&skip, lt, scratch2, Operand(zero_reg));
       ASSERT(Smi::FromInt(0) == 0);
-      __ mov(v0, zero_reg);
-      __ Ret();  // Return smi 0 if the non-zero one was positive.
+      __ Ret(USE_DELAY_SLOT);
+      __ mov(v0, zero_reg);  // Return smi 0 if the non-zero one was positive.
       __ bind(&skip);
       // We fall through here if we multiplied a negative number with 0, because
       // that would mean we should produce -0.
@@ -2266,23 +2477,23 @@
       }
       break;
     case Token::BIT_OR:
-      __ Or(v0, left, Operand(right));
-      __ Ret();
+      __ Ret(USE_DELAY_SLOT);
+      __ or_(v0, left, right);
       break;
     case Token::BIT_AND:
-      __ And(v0, left, Operand(right));
-      __ Ret();
+      __ Ret(USE_DELAY_SLOT);
+      __ and_(v0, left, right);
       break;
     case Token::BIT_XOR:
-      __ Xor(v0, left, Operand(right));
-      __ Ret();
+      __ Ret(USE_DELAY_SLOT);
+      __ xor_(v0, left, right);
       break;
     case Token::SAR:
       // Remove tags from right operand.
       __ GetLeastBitsFromSmi(scratch1, right, 5);
       __ srav(scratch1, left, scratch1);
       // Smi tag result.
-      __ And(v0, scratch1, Operand(~kSmiTagMask));
+      __ And(v0, scratch1, ~kSmiTagMask);
       __ Ret();
       break;
     case Token::SHR:
@@ -2394,8 +2605,8 @@
         // kValueOffset. On MIPS this workaround is built into sdc1 so
         // there's no point in generating even more instructions.
         __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
+        __ Ret(USE_DELAY_SLOT);
         __ mov(v0, result);
-        __ Ret();
       } else {
         // Call the C function to handle the double operation.
         FloatingPointHelper::CallCCodeForDoubleOperation(masm,
@@ -2717,26 +2928,16 @@
           // Otherwise return a heap number if allowed, or jump to type
           // transition.
 
-          // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
-          // On MIPS a lot of things cannot be implemented the same way so right
-          // now it makes a lot more sense to just do things manually.
-
-          // Save FCSR.
-          __ cfc1(scratch1, FCSR);
-          // Disable FPU exceptions.
-          __ ctc1(zero_reg, FCSR);
-          __ trunc_w_d(single_scratch, f10);
-          // Retrieve FCSR.
-          __ cfc1(scratch2, FCSR);
-          // Restore FCSR.
-          __ ctc1(scratch1, FCSR);
-
-          // Check for inexact conversion or exception.
-          __ And(scratch2, scratch2, kFCSRFlagMask);
+          Register except_flag = scratch2;
+          __ EmitFPUTruncate(kRoundToZero,
+                             single_scratch,
+                             f10,
+                             scratch1,
+                             except_flag);
 
           if (result_type_ <= BinaryOpIC::INT32) {
-            // If scratch2 != 0, result does not fit in a 32-bit integer.
-            __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+            // If except_flag != 0, result does not fit in a 32-bit integer.
+            __ Branch(&transition, ne, except_flag, Operand(zero_reg));
           }
 
           // Check if the result fits in a smi.
@@ -2929,9 +3130,9 @@
         __ Ret();
       } else {
         // Tail call that writes the int32 in a2 to the heap number in v0, using
-        // a3 and a1 as scratch. v0 is preserved and returned.
+        // a3 and a0 as scratch. v0 is preserved and returned.
         __ mov(a0, t1);
-        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
+        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
         __ TailCallStub(&stub);
       }
 
@@ -3225,10 +3426,12 @@
     __ lw(t0, MemOperand(cache_entry, 0));
     __ lw(t1, MemOperand(cache_entry, 4));
     __ lw(t2, MemOperand(cache_entry, 8));
-    __ Addu(cache_entry, cache_entry, 12);
     __ Branch(&calculate, ne, a2, Operand(t0));
     __ Branch(&calculate, ne, a3, Operand(t1));
     // Cache hit. Load result, cleanup and return.
+    Counters* counters = masm->isolate()->counters();
+    __ IncrementCounter(
+        counters->transcendental_cache_hit(), 1, scratch0, scratch1);
     if (tagged) {
       // Pop input value from stack and load result into v0.
       __ Drop(1);
@@ -3241,6 +3444,9 @@
   }  // if (CpuFeatures::IsSupported(FPU))
 
   __ bind(&calculate);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(
+      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
   if (tagged) {
     __ bind(&invalid_cache);
     __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
@@ -3259,13 +3465,13 @@
     // Register a0 holds precalculated cache entry address; preserve
     // it on the stack and pop it into register cache_entry after the
     // call.
-    __ push(cache_entry);
+    __ Push(cache_entry, a2, a3);
     GenerateCallCFunction(masm, scratch0);
     __ GetCFunctionDoubleResult(f4);
 
     // Try to update the cache. If we cannot allocate a
     // heap number, we return the result without updating.
-    __ pop(cache_entry);
+    __ Pop(cache_entry, a2, a3);
     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
     __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
@@ -3274,8 +3480,8 @@
     __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
     __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
 
+    __ Ret(USE_DELAY_SLOT);
     __ mov(v0, cache_entry);
-    __ Ret();
 
     __ bind(&invalid_cache);
     // The cache is invalid. Call runtime which will recreate the
@@ -3283,10 +3489,11 @@
     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
     __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
-    __ EnterInternalFrame();
-    __ push(a0);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(a0);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -3299,14 +3506,15 @@
 
     // We return the value in f4 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Allocate an aligned object larger than a HeapNumber.
-    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
-    __ li(scratch0, Operand(4 * kPointerSize));
-    __ push(scratch0);
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+      // Allocate an aligned object larger than a HeapNumber.
+      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+      __ li(scratch0, Operand(4 * kPointerSize));
+      __ push(scratch0);
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 }
@@ -3317,22 +3525,31 @@
   __ push(ra);
   __ PrepareCallCFunction(2, scratch);
   if (IsMipsSoftFloatABI) {
-    __ Move(v0, v1, f4);
+    __ Move(a0, a1, f4);
   } else {
     __ mov_d(f12, f4);
   }
+  AllowExternalCallThatCantCauseGC scope(masm);
+  Isolate* isolate = masm->isolate();
   switch (type_) {
     case TranscendentalCache::SIN:
       __ CallCFunction(
-          ExternalReference::math_sin_double_function(masm->isolate()), 2);
+          ExternalReference::math_sin_double_function(isolate),
+          0, 1);
       break;
     case TranscendentalCache::COS:
       __ CallCFunction(
-          ExternalReference::math_cos_double_function(masm->isolate()), 2);
+          ExternalReference::math_cos_double_function(isolate),
+          0, 1);
+      break;
+    case TranscendentalCache::TAN:
+      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
+          0, 1);
       break;
     case TranscendentalCache::LOG:
       __ CallCFunction(
-          ExternalReference::math_log_double_function(masm->isolate()), 2);
+          ExternalReference::math_log_double_function(isolate),
+          0, 1);
       break;
     default:
       UNIMPLEMENTED();
@@ -3347,6 +3564,7 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -3360,106 +3578,224 @@
 }
 
 
+void InterruptStub::Generate(MacroAssembler* masm) {
+  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
 void MathPowStub::Generate(MacroAssembler* masm) {
-  Label call_runtime;
+  CpuFeatures::Scope fpu_scope(FPU);
+  const Register base = a1;
+  const Register exponent = a2;
+  const Register heapnumbermap = t1;
+  const Register heapnumber = v0;
+  const DoubleRegister double_base = f2;
+  const DoubleRegister double_exponent = f4;
+  const DoubleRegister double_result = f0;
+  const DoubleRegister double_scratch = f6;
+  const FPURegister single_scratch = f8;
+  const Register scratch = t5;
+  const Register scratch2 = t3;
 
-  if (CpuFeatures::IsSupported(FPU)) {
-    CpuFeatures::Scope scope(FPU);
-
-    Label base_not_smi;
-    Label exponent_not_smi;
-    Label convert_exponent;
-
-    const Register base = a0;
-    const Register exponent = a2;
-    const Register heapnumbermap = t1;
-    const Register heapnumber = s0;  // Callee-saved register.
-    const Register scratch = t2;
-    const Register scratch2 = t3;
-
-    // Alocate FP values in the ABI-parameter-passing regs.
-    const DoubleRegister double_base = f12;
-    const DoubleRegister double_exponent = f14;
-    const DoubleRegister double_result = f0;
-    const DoubleRegister double_scratch = f2;
-
-    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+  Label call_runtime, done, int_exponent;
+  if (exponent_type_ == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack to double registers.
     __ lw(base, MemOperand(sp, 1 * kPointerSize));
     __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
 
-    // Convert base to double value and store it in f0.
-    __ JumpIfNotSmi(base, &base_not_smi);
-    // Base is a Smi. Untag and convert it.
-    __ SmiUntag(base);
-    __ mtc1(base, double_scratch);
-    __ cvt_d_w(double_base, double_scratch);
-    __ Branch(&convert_exponent);
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
 
-    __ bind(&base_not_smi);
+    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
     __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
-    // Base is a heapnumber. Load it into double register.
+
     __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+    __ jmp(&unpack_exponent);
 
-    __ bind(&convert_exponent);
-    __ JumpIfNotSmi(exponent, &exponent_not_smi);
-    __ SmiUntag(exponent);
+    __ bind(&base_is_smi);
+    __ mtc1(scratch, single_scratch);
+    __ cvt_d_w(double_base, single_scratch);
+    __ bind(&unpack_exponent);
 
-    // The base is in a double register and the exponent is
-    // an untagged smi. Allocate a heap number and call a
-    // C function for integer exponents. The register containing
-    // the heap number is callee-saved.
-    __ AllocateHeapNumber(heapnumber,
-                          scratch,
-                          scratch2,
-                          heapnumbermap,
-                          &call_runtime);
-    __ push(ra);
-    __ PrepareCallCFunction(3, scratch);
-    __ SetCallCDoubleArguments(double_base, exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(masm->isolate()), 3);
-    __ pop(ra);
-    __ GetCFunctionDoubleResult(double_result);
-    __ sdc1(double_result,
-            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    __ mov(v0, heapnumber);
-    __ DropAndRet(2 * kPointerSize);
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
 
-    __ bind(&exponent_not_smi);
     __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
-    // Exponent is a heapnumber. Load it into double register.
     __ ldc1(double_exponent,
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type_ == TAGGED) {
+    // Base is already in double_base.
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
 
-    // The base and the exponent are in double registers.
-    // Allocate a heap number and call a C function for
-    // double exponents. The register containing
-    // the heap number is callee-saved.
-    __ AllocateHeapNumber(heapnumber,
-                          scratch,
-                          scratch2,
-                          heapnumbermap,
-                          &call_runtime);
-    __ push(ra);
-    __ PrepareCallCFunction(4, scratch);
-    // ABI (o32) for func(double a, double b): a in f12, b in f14.
-    ASSERT(double_base.is(f12));
-    ASSERT(double_exponent.is(f14));
-    __ SetCallCDoubleArguments(double_base, double_exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(masm->isolate()), 4);
-    __ pop(ra);
-    __ GetCFunctionDoubleResult(double_result);
-    __ sdc1(double_result,
-            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    __ mov(v0, heapnumber);
-    __ DropAndRet(2 * kPointerSize);
+    __ ldc1(double_exponent,
+            FieldMemOperand(exponent, HeapNumber::kValueOffset));
   }
 
-  __ bind(&call_runtime);
-  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+  if (exponent_type_ != INTEGER) {
+    Label int_exponent_convert;
+    // Detect integer exponents stored as double.
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       single_scratch,
+                       double_exponent,
+                       scratch,
+                       scratch2,
+                       kCheckForInexactConversion);
+    // scratch2 == 0 means there was no conversion error.
+    __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
+
+    if (exponent_type_ == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label not_plus_half;
+
+      // Test for 0.5.
+      __ Move(double_scratch, 0.5);
+      __ BranchF(USE_DELAY_SLOT,
+                 &not_plus_half,
+                 NULL,
+                 ne,
+                 double_exponent,
+                 double_scratch);
+      // double_scratch can be overwritten in the delay slot.
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      __ Move(double_scratch, -V8_INFINITY);
+      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+      __ neg_d(double_result, double_scratch);
+
+      // Add +0 to convert -0 to +0.
+      __ add_d(double_scratch, double_base, kDoubleRegZero);
+      __ sqrt_d(double_result, double_scratch);
+      __ jmp(&done);
+
+      __ bind(&not_plus_half);
+      __ Move(double_scratch, -0.5);
+      __ BranchF(USE_DELAY_SLOT,
+                 &call_runtime,
+                 NULL,
+                 ne,
+                 double_exponent,
+                 double_scratch);
+      // double_scratch can be overwritten in the delay slot.
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      __ Move(double_scratch, -V8_INFINITY);
+      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+      __ Move(double_result, kDoubleRegZero);
+
+      // Add +0 to convert -0 to +0.
+      __ add_d(double_scratch, double_base, kDoubleRegZero);
+      __ Move(double_result, 1);
+      __ sqrt_d(double_scratch, double_scratch);
+      __ div_d(double_result, double_result, double_scratch);
+      __ jmp(&done);
+    }
+
+    __ push(ra);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ SetCallCDoubleArguments(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0, 2);
+    }
+    __ pop(ra);
+    __ GetCFunctionDoubleResult(double_result);
+    __ jmp(&done);
+
+    __ bind(&int_exponent_convert);
+    __ mfc1(scratch, single_scratch);
+  }
+
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+
+  // Get two copies of exponent in the registers scratch and exponent.
+  if (exponent_type_ == INTEGER) {
+    __ mov(scratch, exponent);
+  } else {
+    // Exponent has previously been stored into scratch as untagged integer.
+    __ mov(exponent, scratch);
+  }
+
+  __ mov_d(double_scratch, double_base);  // Back up base.
+  __ Move(double_result, 1.0);
+
+  // Get absolute value of exponent.
+  Label positive_exponent;
+  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
+  __ Subu(scratch, zero_reg, scratch);
+  __ bind(&positive_exponent);
+
+  Label while_true, no_carry, loop_end;
+  __ bind(&while_true);
+
+  __ And(scratch2, scratch, 1);
+
+  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
+  __ mul_d(double_result, double_result, double_scratch);
+  __ bind(&no_carry);
+
+  __ sra(scratch, scratch, 1);
+
+  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
+  __ mul_d(double_scratch, double_scratch, double_scratch);
+
+  __ Branch(&while_true);
+
+  __ bind(&loop_end);
+
+  __ Branch(&done, ge, exponent, Operand(zero_reg));
+  __ Move(double_scratch, 1.0);
+  __ div_d(double_result, double_scratch, double_result);
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
+
+  // double_exponent may not contain the exponent value if the input was a
+  // smi.  We set it with exponent value before bailing out.
+  __ mtc1(exponent, single_scratch);
+  __ cvt_d_w(double_exponent, single_scratch);
+
+  // Returning or bailing out.
+  Counters* counters = masm->isolate()->counters();
+  if (exponent_type_ == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in exponent.
+    __ bind(&done);
+    __ AllocateHeapNumber(
+        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
+    __ sdc1(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    ASSERT(heapnumber.is(v0));
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ DropAndRet(2);
+  } else {
+    __ push(ra);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ SetCallCDoubleArguments(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0, 2);
+    }
+    __ pop(ra);
+    __ GetCFunctionDoubleResult(double_result);
+
+    __ bind(&done);
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ Ret();
+  }
 }
 
 
@@ -3468,14 +3804,34 @@
 }
 
 
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
-  __ Throw(v0);
+bool CEntryStub::IsPregenerated() {
+  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+          result_size_ == 1;
 }
 
 
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
-                                          UncatchableExceptionType type) {
-  __ ThrowUncatchable(type, v0);
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  Handle<Code> code = save_doubles.GetCode();
+  code->set_is_pregenerated(true);
+  StoreBufferOverflowStub stub(kSaveFPRegs);
+  stub.GetCode()->set_is_pregenerated(true);
+  code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  Handle<Code> code = stub.GetCode();
+  code->set_is_pregenerated(true);
 }
 
 
@@ -3490,16 +3846,17 @@
   // s1: pointer to the first argument          (C callee-saved)
   // s2: pointer to builtin function            (C callee-saved)
 
+  Isolate* isolate = masm->isolate();
+
   if (do_gc) {
     // Move result passed in v0 into a0 to call PerformGC.
     __ mov(a0, v0);
-    __ PrepareCallCFunction(1, a1);
-    __ CallCFunction(
-        ExternalReference::perform_gc_function(masm->isolate()), 1);
+    __ PrepareCallCFunction(1, 0, a1);
+    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
   }
 
   ExternalReference scope_depth =
-      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
+      ExternalReference::heap_always_allocate_scope_depth(isolate);
   if (always_allocate) {
     __ li(a0, Operand(scope_depth));
     __ lw(a1, MemOperand(a0));
@@ -3507,9 +3864,10 @@
     __ sw(a1, MemOperand(a0));
   }
 
-  // Prepare arguments for C routine: a0 = argc, a1 = argv
+  // Prepare arguments for C routine.
+  // a0 = argc
   __ mov(a0, s0);
-  __ mov(a1, s1);
+  // a1 = argv (set in the delay slot after find_ra below).
 
   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
   // also need to reserve the 4 argument slots on the stack.
@@ -3529,30 +3887,28 @@
     // coverage code can interfere with the proper calculation of ra.
     Label find_ra;
     masm->bal(&find_ra);  // bal exposes branch delay slot.
-    masm->nop();  // Branch delay slot nop.
+    masm->mov(a1, s1);
     masm->bind(&find_ra);
 
     // Adjust the value in ra to point to the correct return location, 2nd
     // instruction past the real call into C code (the jalr(t9)), and push it.
     // This is the return address of the exit frame.
-    const int kNumInstructionsToJump = 6;
+    const int kNumInstructionsToJump = 5;
     masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
     masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
-    masm->Subu(sp, sp, kCArgsSlotsSize);
+    // Stack space reservation moved to the branch delay slot below.
     // Stack is still aligned.
 
     // Call the C routine.
     masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
     masm->jalr(t9);
-    masm->nop();    // Branch delay slot nop.
+    // Set up sp in the delay slot.
+    masm->addiu(sp, sp, -kCArgsSlotsSize);
     // Make sure the stored 'ra' points to this position.
     ASSERT_EQ(kNumInstructionsToJump,
               masm->InstructionsGeneratedSince(&find_ra));
   }
 
-  // Restore stack (remove arg slots).
-  __ Addu(sp, sp, kCArgsSlotsSize);
-
   if (always_allocate) {
     // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
     __ li(a2, Operand(scope_depth));
@@ -3566,14 +3922,16 @@
   STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
   __ addiu(a2, v0, 1);
   __ andi(t0, a2, kFailureTagMask);
-  __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
+  __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
+  // Restore stack (remove arg slots) in branch delay slot.
+  __ addiu(sp, sp, kCArgsSlotsSize);
+
 
   // Exit C frame and return.
   // v0:v1: result
   // sp: stack pointer
   // fp: frame pointer
-  __ LeaveExitFrame(save_doubles_, s0);
-  __ Ret();
+  __ LeaveExitFrame(save_doubles_, s0, true);
 
   // Check if we should retry or throw exception.
   Label retry;
@@ -3584,22 +3942,25 @@
 
   // Special handling of out of memory exceptions.
   Failure* out_of_memory = Failure::OutOfMemoryException();
-  __ Branch(throw_out_of_memory_exception, eq,
-            v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+  __ Branch(USE_DELAY_SLOT,
+            throw_out_of_memory_exception,
+            eq,
+            v0,
+            Operand(reinterpret_cast<int32_t>(out_of_memory)));
+  // If we throw the OOM exception, the value of a3 doesn't matter.
+  // Any instruction can be in the delay slot that's not a jump.
 
   // Retrieve the pending exception and clear the variable.
-  __ li(t0,
-        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
-  __ lw(a3, MemOperand(t0));
+  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ lw(v0, MemOperand(t0));
   __ sw(a3, MemOperand(t0));
 
   // Special handling of termination exceptions which are uncatchable
   // by javascript code.
-  __ Branch(throw_termination_exception, eq,
-            v0, Operand(masm->isolate()->factory()->termination_exception()));
+  __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
+  __ Branch(throw_termination_exception, eq, v0, Operand(t0));
 
   // Handle normal exception.
   __ jmp(throw_normal_exception);
@@ -3611,8 +3972,9 @@
 
 void CEntryStub::Generate(MacroAssembler* masm) {
   // Called from JavaScript; parameters are on stack as if calling JS function
-  // a0: number of arguments including receiver
-  // a1: pointer to builtin function
+  // s0: number of arguments including receiver
+  // s1: size of arguments excluding receiver
+  // s2: pointer to builtin function
   // fp: frame pointer    (restored after C call)
   // sp: stack pointer    (restored as callee's sp after C call)
   // cp: current context  (C callee-saved)
@@ -3622,18 +3984,18 @@
   // this by performing a garbage collection and retrying the
   // builtin once.
 
+  // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
+  // The reason for this is that these arguments would need to be saved anyway
+  // so it's faster to set them up directly.
+  // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
+
   // Compute the argv pointer in a callee-saved register.
-  __ sll(s1, a0, kPointerSizeLog2);
   __ Addu(s1, sp, s1);
-  __ Subu(s1, s1, Operand(kPointerSize));
 
   // Enter the exit frame that transitions from JavaScript to C++.
+  FrameScope scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(save_doubles_);
 
-  // Setup argc and the builtin function in callee-saved registers.
-  __ mov(s0, a0);
-  __ mov(s2, a1);
-
   // s0: number of arguments (C callee-saved)
   // s1: pointer to first argument (C callee-saved)
   // s2: pointer to builtin function (C callee-saved)
@@ -3669,23 +4031,38 @@
                true);
 
   __ bind(&throw_out_of_memory_exception);
-  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+  // Set external caught exception to false.
+  Isolate* isolate = masm->isolate();
+  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+                                    isolate);
+  __ li(a0, Operand(false, RelocInfo::NONE));
+  __ li(a2, Operand(external_caught));
+  __ sw(a0, MemOperand(a2));
+
+  // Set pending exception and v0 to out of memory exception.
+  Failure* out_of_memory = Failure::OutOfMemoryException();
+  __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                      isolate)));
+  __ sw(v0, MemOperand(a2));
+  // Fall through to the next label.
 
   __ bind(&throw_termination_exception);
-  GenerateThrowUncatchable(masm, TERMINATION);
+  __ ThrowUncatchable(v0);
 
   __ bind(&throw_normal_exception);
-  GenerateThrowTOS(masm);
+  __ Throw(v0);
 }
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  Label invoke, exit;
+  Label invoke, handler_entry, exit;
+  Isolate* isolate = masm->isolate();
 
   // Registers:
   // a0: entry address
   // a1: function
-  // a2: reveiver
+  // a2: receiver
   // a3: argc
   //
   // Stack:
@@ -3699,14 +4076,18 @@
     CpuFeatures::Scope scope(FPU);
     // Save callee-saved FPU registers.
     __ MultiPushFPU(kCalleeSavedFPU);
+    // Set up the reserved register for 0.0.
+    __ Move(kDoubleRegZero, 0.0);
   }
 
+
   // Load argv in s0 register.
   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
   if (CpuFeatures::IsSupported(FPU)) {
     offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
   }
 
+  __ InitializeRootRegister();
   __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
 
   // We build an EntryFrame.
@@ -3715,16 +4096,16 @@
   __ li(t2, Operand(Smi::FromInt(marker)));
   __ li(t1, Operand(Smi::FromInt(marker)));
   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ lw(t0, MemOperand(t0));
   __ Push(t3, t2, t1, t0);
-  // Setup frame pointer for the frame to be pushed.
+  // Set up frame pointer for the frame to be pushed.
   __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
 
   // Registers:
   // a0: entry_address
   // a1: function
-  // a2: reveiver_pointer
+  // a2: receiver_pointer
   // a3: argc
   // s0: argv
   //
@@ -3739,8 +4120,7 @@
 
   // If this is the outermost JS call, set js_entry_sp value.
   Label non_outermost_js;
-  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
-                                masm->isolate());
+  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
   __ li(t1, Operand(ExternalReference(js_entry_sp)));
   __ lw(t2, MemOperand(t1));
   __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
@@ -3754,35 +4134,35 @@
   __ bind(&cont);
   __ push(t0);
 
-  // Call a faked try-block that does the invoke.
-  __ bal(&invoke);  // bal exposes branch delay slot.
-  __ nop();   // Branch delay slot nop.
-
-  // Caught exception: Store result (exception) in the pending
-  // exception field in the JSEnv and return a failure sentinel.
-  // Coming in here the fp will be invalid because the PushTryHandler below
-  // sets it to 0 to signal the existence of the JSEntry frame.
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ jmp(&invoke);
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.  Coming in here the
+  // fp will be invalid because the PushTryHandler below sets it to 0 to
+  // signal the existence of the JSEntry frame.
   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
   __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
   __ b(&exit);  // b exposes branch delay slot.
   __ nop();   // Branch delay slot nop.
 
-  // Invoke: Link this frame into the handler chain.
+  // Invoke: Link this frame into the handler chain.  There's only one
+  // handler block in this code object, so its index is 0.
   __ bind(&invoke);
-  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
   // If an exception not caught by another handler occurs, this handler
   // returns control to the code after the bal(&invoke) above, which
   // restores all kCalleeSaved registers (including cp and fp) to their
   // saved values before returning a failure to C.
 
   // Clear any pending exceptions.
-  __ li(t0,
-        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
-  __ lw(t1, MemOperand(t0));
+  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ sw(t1, MemOperand(t0));
 
   // Invoke the function by calling through JS entry trampoline builtin.
@@ -3792,7 +4172,7 @@
   // Registers:
   // a0: entry_address
   // a1: function
-  // a2: reveiver_pointer
+  // a2: receiver_pointer
   // a3: argc
   // s0: argv
   //
@@ -3805,7 +4185,7 @@
 
   if (is_construct) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
-                                      masm->isolate());
+                                      isolate);
     __ li(t0, Operand(construct_entry));
   } else {
     ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
@@ -3824,7 +4204,9 @@
   // Check if the current stack frame is marked as the outermost JS frame.
   Label non_outermost_js_2;
   __ pop(t1);
-  __ Branch(&non_outermost_js_2, ne, t1,
+  __ Branch(&non_outermost_js_2,
+            ne,
+            t1,
             Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   __ li(t1, Operand(ExternalReference(js_entry_sp)));
   __ sw(zero_reg, MemOperand(t1));
@@ -3833,7 +4215,7 @@
   // Restore the top frame descriptors from the stack.
   __ pop(t1);
   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ sw(t1, MemOperand(t0));
 
   // Reset the stack to the callee saved registers.
@@ -3857,11 +4239,10 @@
 // * object: a0 or at sp + 1 * kPointerSize.
 // * function: a1 or at sp.
 //
-// Inlined call site patching is a crankshaft-specific feature that is not
-// implemented on MIPS.
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed on the stack,
+// in the safepoint slot for register t0.
 void InstanceofStub::Generate(MacroAssembler* masm) {
-  // This is a crankshaft-specific feature that has not been implemented yet.
-  ASSERT(!HasCallSiteInlineCheck());
   // Call site inlining and patching implies arguments in registers.
   ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
   // ReturnTrueFalse is only implemented for inlined call sites.
@@ -3875,6 +4256,8 @@
   const Register inline_site = t5;
   const Register scratch = a2;
 
+  const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
+
   Label slow, loop, is_instance, is_not_instance, not_js_object;
 
   if (!HasArgsInRegisters()) {
@@ -3890,10 +4273,10 @@
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     Label miss;
-    __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
-    __ Branch(&miss, ne, function, Operand(t1));
-    __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
-    __ Branch(&miss, ne, map, Operand(t1));
+    __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
+    __ Branch(&miss, ne, function, Operand(at));
+    __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
+    __ Branch(&miss, ne, map, Operand(at));
     __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
     __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
@@ -3901,7 +4284,7 @@
   }
 
   // Get the prototype of the function.
-  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(prototype, &slow);
@@ -3913,7 +4296,16 @@
     __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
   } else {
-    UNIMPLEMENTED_MIPS();
+    ASSERT(HasArgsInRegisters());
+    // Patch the (relocated) inlined map check.
+
+    // The offset was stored in t0 safepoint slot.
+    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+    __ LoadFromSafepointRegisterSlot(scratch, t0);
+    __ Subu(inline_site, ra, scratch);
+    // Get the map location in scratch and patch it.
+    __ GetRelocatedValue(inline_site, scratch, v1);  // v1 used as scratch.
+    __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
   }
 
   // Register mapping: a3 is object map and t0 is function prototype.
@@ -3939,7 +4331,16 @@
     __ mov(v0, zero_reg);
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
-    UNIMPLEMENTED_MIPS();
+    // Patch the call site to return true.
+    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // Get the boolean result location in scratch and patch it.
+    __ PatchRelocatedValue(inline_site, scratch, v0);
+
+    if (!ReturnTrueFalseObject()) {
+      ASSERT_EQ(Smi::FromInt(0), 0);
+      __ mov(v0, zero_reg);
+    }
   }
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
@@ -3948,8 +4349,17 @@
     __ li(v0, Operand(Smi::FromInt(1)));
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
-    UNIMPLEMENTED_MIPS();
+    // Patch the call site to return false.
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // Get the boolean result location in scratch and patch it.
+    __ PatchRelocatedValue(inline_site, scratch, v0);
+
+    if (!ReturnTrueFalseObject()) {
+      __ li(v0, Operand(Smi::FromInt(1)));
+    }
   }
+
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
   Label object_not_null, object_not_null_or_smi;
@@ -3961,8 +4371,10 @@
   __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
 
   // Null is not instance of anything.
-  __ Branch(&object_not_null, ne, scratch,
-      Operand(masm->isolate()->factory()->null_value()));
+  __ Branch(&object_not_null,
+            ne,
+            scratch,
+            Operand(masm->isolate()->factory()->null_value()));
   __ li(v0, Operand(Smi::FromInt(1)));
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
@@ -3986,10 +4398,11 @@
     }
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
-    __ EnterInternalFrame();
-    __ Push(a0, a1);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(a0, a1);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
     __ mov(a0, v0);
     __ LoadRoot(v0, Heap::kTrueValueRootIndex);
     __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
@@ -4066,8 +4479,10 @@
   Label runtime;
   __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
-  __ Branch(&runtime, ne,
-            a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ Branch(&runtime,
+            ne,
+            a2,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Patch the arguments.length and the parameters pointer in the current frame.
   __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -4099,7 +4514,9 @@
   Label adaptor_frame, try_allocate;
   __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
-  __ Branch(&adaptor_frame, eq, a2,
+  __ Branch(&adaptor_frame,
+            eq,
+            a2,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // No adaptor, parameter count = argument count.
@@ -4178,7 +4595,7 @@
     __ sw(a3, FieldMemOperand(v0, i));
   }
 
-  // Setup the callee in-object property.
+  // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ lw(a3, MemOperand(sp, 2 * kPointerSize));
   const int kCalleeOffset = JSObject::kHeaderSize +
@@ -4191,7 +4608,7 @@
       Heap::kArgumentsLengthIndex * kPointerSize;
   __ sw(a2, FieldMemOperand(v0, kLengthOffset));
 
-  // Setup the elements pointer in the allocated arguments object.
+  // Set up the elements pointer in the allocated arguments object.
   // If we allocated a parameter map, t0 will point there, otherwise
   // it will point to the backing store.
   __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
@@ -4289,11 +4706,10 @@
   __ Branch(&arguments_loop, lt, t5, Operand(a2));
 
   // Return and remove the on-stack parameters.
-  __ Addu(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(3);
 
   // Do the runtime call to allocate the arguments object.
-  // a2 = argument count (taggged)
+  // a2 = argument count (tagged)
   __ bind(&runtime);
   __ sw(a2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
   __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -4368,7 +4784,7 @@
   // Get the parameters pointer from the stack.
   __ lw(a2, MemOperand(sp, 1 * kPointerSize));
 
-  // Setup the elements pointer in the allocated arguments object and
+  // Set up the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
   __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
   __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
@@ -4380,7 +4796,7 @@
 
   // Copy the fixed array slots.
   Label loop;
-  // Setup t0 to point to the first array slot.
+  // Set up t0 to point to the first array slot.
   __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ bind(&loop);
   // Pre-decrement a2 with kPointerSize on each iteration.
@@ -4395,8 +4811,7 @@
 
   // Return and remove the on-stack parameters.
   __ bind(&done);
-  __ Addu(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(3);
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
@@ -4411,10 +4826,6 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
-  if (!FLAG_regexp_entry_native) {
-    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-    return;
-  }
 
   // Stack frame on entry.
   //  sp[0]: last_match_info (expected JSArray)
@@ -4427,6 +4838,8 @@
   static const int kSubjectOffset = 2 * kPointerSize;
   static const int kJSRegExpOffset = 3 * kPointerSize;
 
+  Isolate* isolate = masm->isolate();
+
   Label runtime, invoke_regexp;
 
   // Allocation of registers for this function. These are in callee save
@@ -4442,9 +4855,9 @@
   // Ensure that a RegExp stack is allocated.
   ExternalReference address_of_regexp_stack_memory_address =
       ExternalReference::address_of_regexp_stack_memory_address(
-          masm->isolate());
+          isolate);
   ExternalReference address_of_regexp_stack_memory_size =
-      ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+      ExternalReference::address_of_regexp_stack_memory_size(isolate);
   __ li(a0, Operand(address_of_regexp_stack_memory_size));
   __ lw(a0, MemOperand(a0, 0));
   __ Branch(&runtime, eq, a0, Operand(zero_reg));
@@ -4508,8 +4921,7 @@
   // Check that the third argument is a positive smi less than the subject
   // string length. A negative value will be greater (unsigned comparison).
   __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
-  __ And(at, a0, Operand(kSmiTagMask));
-  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ JumpIfNotSmi(a0, &runtime);
   __ Branch(&runtime, ls, a3, Operand(a0));
 
   // a2: Number of capture registers
@@ -4525,7 +4937,7 @@
          FieldMemOperand(a0, JSArray::kElementsOffset));
   __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   __ Branch(&runtime, ne, a0, Operand(
-      masm->isolate()->factory()->fixed_array_map()));
+      isolate->factory()->fixed_array_map()));
   // Check that the last match info has space for the capture registers and the
   // additional information.
   __ lw(a0,
@@ -4542,25 +4954,38 @@
   Label seq_string;
   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
-  // First check for flat string.
-  __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+  // First check for flat string.  None of the following string type tests will
+  // succeed if subject is not a string or a short external string.
+  __ And(a1,
+         a0,
+         Operand(kIsNotStringMask |
+                 kStringRepresentationMask |
+                 kShortExternalStringMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ Branch(&seq_string, eq, a1, Operand(zero_reg));
 
   // subject: Subject string
   // a0: instance type if Subject string
   // regexp_data: RegExp data (FixedArray)
+  // a1: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, check_encoding;
+  Label cons_string, external_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
-  __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
+  __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
+
+  // Catch non-string subject or short external string.
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
 
   // String is sliced.
   __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
@@ -4580,7 +5005,7 @@
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSeqStringTag == 0);
   __ And(at, a0, Operand(kStringRepresentationMask));
-  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ Branch(&external_string, ne, at, Operand(zero_reg));
 
   __ bind(&seq_string);
   // subject: Subject string
@@ -4590,11 +5015,11 @@
   STATIC_ASSERT(kAsciiStringTag == 4);
   STATIC_ASSERT(kTwoByteStringTag == 0);
   // Find the code object based on the assumptions above.
-  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ascii.
+  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ASCII.
   __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
-  __ sra(a3, a0, 2);  // a3 is 1 for ascii, 0 for UC16 (usyed below).
+  __ sra(a3, a0, 2);  // a3 is 1 for ASCII, 0 for UC16 (used below).
   __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
-  __ movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+  __ Movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
 
   // Check that the irregexp code has been generated for the actual string
   // encoding. If it has, the field contains a code object otherwise it contains
@@ -4616,7 +5041,7 @@
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
   // All checks done. Now push arguments for native regexp code.
-  __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
+  __ IncrementCounter(isolate->counters()->regexp_entry_native(),
                       1, a0, a2);
 
   // Isolates: note we add an additional parameter here (isolate pointer).
@@ -4656,13 +5081,12 @@
 
   // Argument 5: static offsets vector buffer.
   __ li(a0, Operand(
-        ExternalReference::address_of_static_offsets_vector(masm->isolate())));
+        ExternalReference::address_of_static_offsets_vector(isolate)));
   __ sw(a0, MemOperand(sp, 1 * kPointerSize));
 
   // For arguments 4 and 3 get string length, calculate start of string data
   // and calculate the shift of the index (0 for ASCII and 1 for two byte).
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
   // Load the length from the original subject string from the previous stack
   // frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4703,23 +5127,18 @@
   // Check the result.
 
   Label success;
-  __ Branch(&success, eq,
-            v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+  __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
   Label failure;
-  __ Branch(&failure, eq,
-            v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
   // If not exception it can only be retry. Handle that in the runtime system.
-  __ Branch(&runtime, ne,
-            v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
   // Result must now be exception. If there is no pending exception already a
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
-  __ li(a1, Operand(
-      ExternalReference::the_hole_value_location(masm->isolate())));
-  __ lw(a1, MemOperand(a1, 0));
+  __ li(a1, Operand(isolate->factory()->the_hole_value()));
   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ lw(v0, MemOperand(a2, 0));
   __ Branch(&runtime, eq, v0, Operand(a1));
 
@@ -4730,16 +5149,15 @@
   Label termination_exception;
   __ Branch(&termination_exception, eq, v0, Operand(a0));
 
-  __ Throw(v0);  // Expects thrown value in v0.
+  __ Throw(v0);
 
   __ bind(&termination_exception);
-  __ ThrowUncatchable(TERMINATION, v0);  // Expects thrown value in v0.
+  __ ThrowUncatchable(v0);
 
   __ bind(&failure);
   // For failure and exception return null.
-  __ li(v0, Operand(masm->isolate()->factory()->null_value()));
-  __ Addu(sp, sp, Operand(4 * kPointerSize));
-  __ Ret();
+  __ li(v0, Operand(isolate->factory()->null_value()));
+  __ DropAndRet(4);
 
   // Process the result from the native regexp code.
   __ bind(&success);
@@ -4757,20 +5175,29 @@
   __ sw(a2, FieldMemOperand(last_match_info_elements,
                              RegExpImpl::kLastCaptureCountOffset));
   // Store last subject and last input.
-  __ mov(a3, last_match_info_elements);  // Moved up to reduce latency.
   __ sw(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastSubjectOffset));
-  __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
+  __ mov(a2, subject);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastSubjectOffset,
+                      a2,
+                      t3,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs);
   __ sw(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastInputOffset));
-  __ mov(a3, last_match_info_elements);
-  __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastInputOffset,
+                      subject,
+                      t3,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
-      ExternalReference::address_of_static_offsets_vector(masm->isolate());
+      ExternalReference::address_of_static_offsets_vector(isolate);
   __ li(a2, Operand(address_of_static_offsets_vector));
 
   // a1: number of capture registers
@@ -4791,14 +5218,36 @@
   __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
   __ sw(a3, MemOperand(a0, 0));
   __ Branch(&next_capture, USE_DELAY_SLOT);
-  __ addiu(a0, a0, kPointerSize);   // In branch delay slot.
+  __ addiu(a0, a0, kPointerSize);  // In branch delay slot.
 
   __ bind(&done);
 
   // Return last match info.
   __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
-  __ Addu(sp, sp, Operand(4 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(4);
+
+  // External string.  Short external strings have already been ruled out.
+  // a0: scratch
+  __ bind(&external_string);
+  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ And(at, a0, Operand(kIsIndirectStringMask));
+    __ Assert(eq,
+              "external string expected, but not found",
+              at,
+              Operand(zero_reg));
+  }
+  __ lw(subject,
+        FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ Subu(subject,
+          subject,
+          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+  __ jmp(&seq_string);
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
@@ -4852,11 +5301,11 @@
 
   // Set input, index and length fields from arguments.
   __ lw(a1, MemOperand(sp, kPointerSize * 0));
+  __ lw(a2, MemOperand(sp, kPointerSize * 1));
+  __ lw(t2, MemOperand(sp, kPointerSize * 2));
   __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
-  __ lw(a1, MemOperand(sp, kPointerSize * 1));
-  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
-  __ lw(a1, MemOperand(sp, kPointerSize * 2));
-  __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
+  __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
+  __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
 
   // Fill out the elements FixedArray.
   // v0: JSArray, tagged.
@@ -4887,16 +5336,59 @@
   __ addiu(a3, a3, kPointerSize);  // In branch delay slot.
 
   __ bind(&done);
-  __ Addu(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(3);
 
   __ bind(&slowcase);
   __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
 }
 
 
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+  // Cache the called function in a global property cell.  Cache states
+  // are uninitialized, monomorphic (indicated by a JSFunction), and
+  // megamorphic.
+  // a1 : the function to call
+  // a2 : cache cell for call target
+  Label done;
+
+  ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+            masm->isolate()->heap()->undefined_value());
+  ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+            masm->isolate()->heap()->the_hole_value());
+
+  // Load the cache state into a3.
+  __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+
+  // A monomorphic cache hit or an already megamorphic state: invoke the
+  // function without changing the state.
+  __ Branch(&done, eq, a3, Operand(a1));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&done, eq, a3, Operand(at));
+
+  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+  // megamorphic.
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+  __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
+  // An uninitialized cache is patched with the function.
+  // Store a1 in the delay slot. This may or may not get overwritten depending
+  // on the result of the comparison.
+  __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+  // No need for a write barrier here - cells are rescanned.
+
+  // MegamorphicSentinel is an immortal immovable object (undefined) so no
+  // write-barrier is needed.
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+
+  __ bind(&done);
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  Label slow;
+  // a1 : the function to call
+  // a2 : cache cell for call target
+  Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
   // indicated by passing the hole as the receiver to the call
@@ -4910,19 +5402,15 @@
     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
     __ Branch(&call, ne, t0, Operand(at));
     // Patch the receiver on the stack with the global receiver object.
-    __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
-    __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+    __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a2, MemOperand(sp, argc_ * kPointerSize));
     __ bind(&call);
   }
 
-  // Get the function to call from the stack.
-  // function, receiver [, arguments]
-  __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
   // Check that the function is really a JavaScript function.
   // a1: pushed function (to be verified)
-  __ JumpIfSmi(a1, &slow);
+  __ JumpIfSmi(a1, &non_function);
   // Get the map of the function object.
   __ GetObjectType(a1, a2, a2);
   __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
@@ -4950,10 +5438,24 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  // Check for function proxy.
+  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ push(a1);  // Put proxy as additional argument.
+  __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
+  __ li(a2, Operand(0, RelocInfo::NONE));
+  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  {
+    Handle<Code> adaptor =
+      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    __ Jump(adaptor, RelocInfo::CODE_TARGET);
+  }
+
   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
   // of the original receiver from the call site).
+  __ bind(&non_function);
   __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
-  __ li(a0, Operand(argc_));  // Setup the number of arguments.
+  __ li(a0, Operand(argc_));  // Set up the number of arguments.
   __ mov(a2, zero_reg);
   __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
   __ SetCallKind(t1, CALL_AS_METHOD);
@@ -4962,6 +5464,48 @@
 }
 
 
+void CallConstructStub::Generate(MacroAssembler* masm) {
+  // a0 : number of arguments
+  // a1 : the function to call
+  // a2 : cache cell for call target
+  Label slow, non_function_call;
+
+  // Check that the function is not a smi.
+  __ JumpIfSmi(a1, &non_function_call);
+  // Check that the function is a JSFunction.
+  __ GetObjectType(a1, a3, a3);
+  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+  if (RecordCallTarget()) {
+    GenerateRecordCallTarget(masm);
+  }
+
+  // Jump to the function-specific construct stub.
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
+  __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+
+  // a0: number of arguments
+  // a1: called object
+  // a3: object type
+  Label do_call;
+  __ bind(&slow);
+  __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
+  __ bind(&non_function_call);
+  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
+  // Set expected number of arguments to zero (not changing r0).
+  __ li(a2, Operand(0, RelocInfo::NONE));
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
 // Unfortunately you have to run without snapshots to see most of these
 // names in the profile since most compare stubs end up in the snapshot.
 void CompareStub::PrintName(StringStream* stream) {
@@ -5008,7 +5552,6 @@
   Label got_char_code;
   Label sliced_string;
 
-  ASSERT(!t0.is(scratch_));
   ASSERT(!t0.is(index_));
   ASSERT(!t0.is(result_));
   ASSERT(!t0.is(object_));
@@ -5026,102 +5569,41 @@
   // If the index is non-smi trigger the non-smi case.
   __ JumpIfNotSmi(index_, &index_not_smi_);
 
-  // Put smi-tagged index into scratch register.
-  __ mov(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
   __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
-  __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
+  __ Branch(index_out_of_range_, ls, t0, Operand(index_));
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(t0, result_, Operand(kStringRepresentationMask));
-  __ Branch(&flat_string, eq, t0, Operand(zero_reg));
+  __ sra(index_, index_, kSmiTagSize);
 
-  // Handle non-flat strings.
-  __ And(result_, result_, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
-  __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
+  StringCharLoadGenerator::Generate(masm,
+                                    object_,
+                                    index_,
+                                    result_,
+                                    &call_runtime_);
 
-  // ConsString.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  Label assure_seq_string;
-  __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
-  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
-  __ Branch(&call_runtime_, ne, result_, Operand(t0));
-
-  // Get the first of the two strings and load its instance type.
-  __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
-  __ jmp(&assure_seq_string);
-
-  // SlicedString, unpack and add offset.
-  __ bind(&sliced_string);
-  __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
-  __ addu(scratch_, scratch_, result_);
-  __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
-
-  // Assure that we are dealing with a sequential string. Go to runtime if not.
-  __ bind(&assure_seq_string);
-  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
-  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
-  // Check that parent is not an external string. Go to runtime otherwise.
-  STATIC_ASSERT(kSeqStringTag == 0);
-
-  __ And(t0, result_, Operand(kStringRepresentationMask));
-  __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
-
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ And(t0, result_, Operand(kStringEncodingMask));
-  __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
-
-  // 2-byte string.
-  // Load the 2-byte character code into the result register. We can
-  // add without shifting since the smi tag size is the log2 of the
-  // number of bytes in a two-byte character.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
-  __ Addu(scratch_, object_, Operand(scratch_));
-  __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
-  __ Branch(&got_char_code);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-
-  __ srl(t0, scratch_, kSmiTagSize);
-  __ Addu(scratch_, object_, t0);
-
-  __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
-
-  __ bind(&got_char_code);
   __ sll(result_, result_, kSmiTagSize);
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
   __ bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
   __ CheckMap(index_,
-              scratch_,
+              result_,
               Heap::kHeapNumberMapRootIndex,
               index_not_number_,
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   // Consumed by runtime conversion function:
-  __ Push(object_, index_, index_);
+  __ Push(object_, index_);
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   } else {
@@ -5133,16 +5615,14 @@
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
 
-  __ Move(scratch_, v0);
-
-  __ pop(index_);
+  __ Move(index_, v0);
   __ pop(object_);
   // Reload the instance type.
   __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  __ JumpIfNotSmi(index_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ Branch(&got_smi_index_);
 
@@ -5151,6 +5631,7 @@
   // is too complex (e.g., when the string needs to be flattened).
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
+  __ sll(index_, index_, kSmiTagSize);
   __ Push(object_, index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
 
@@ -5194,7 +5675,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5220,76 +5702,13 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
 
 
-class StringHelper : public AllStatic {
- public:
-  // Generate code for copying characters using a simple loop. This should only
-  // be used in places where the number of characters is small and the
-  // additional setup and checking in GenerateCopyCharactersLong adds too much
-  // overhead. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharacters(MacroAssembler* masm,
-                                     Register dest,
-                                     Register src,
-                                     Register count,
-                                     Register scratch,
-                                     bool ascii);
-
-  // Generate code for copying a large number of characters. This function
-  // is allowed to spend extra time setting up conditions to make copying
-  // faster. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharactersLong(MacroAssembler* masm,
-                                         Register dest,
-                                         Register src,
-                                         Register count,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Register scratch4,
-                                         Register scratch5,
-                                         int flags);
-
-
-  // Probe the symbol table for a two character string. If the string is
-  // not found by probing a jump to the label not_found is performed. This jump
-  // does not guarantee that the string is not in the symbol table. If the
-  // string is found the code falls through with the string in register r0.
-  // Contents of both c1 and c2 registers are modified. At the exit c1 is
-  // guaranteed to contain halfword with low and high bytes equal to
-  // initial contents of c1 and c2 respectively.
-  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
-                                                   Register c1,
-                                                   Register c2,
-                                                   Register scratch1,
-                                                   Register scratch2,
-                                                   Register scratch3,
-                                                   Register scratch4,
-                                                   Register scratch5,
-                                                   Label* not_found);
-
-  // Generate string hash.
-  static void GenerateHashInit(MacroAssembler* masm,
-                               Register hash,
-                               Register character);
-
-  static void GenerateHashAddCharacter(MacroAssembler* masm,
-                                       Register hash,
-                                       Register character);
-
-  static void GenerateHashGetHash(MacroAssembler* masm,
-                                  Register hash);
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
                                           Register dest,
                                           Register src,
@@ -5465,10 +5884,8 @@
   __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
   __ Or(c1, c1, scratch1);
   __ bind(&tmp);
-  __ Branch(not_found,
-            Uless_equal,
-            scratch,
-            Operand(static_cast<int>('9' - '0')));
+  __ Branch(
+      not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
 
   __ bind(&not_array_index);
   // Calculate the two character string hash.
@@ -5540,10 +5957,10 @@
     __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
 
     __ Branch(not_found, eq, undefined, Operand(candidate));
-    // Must be null (deleted entry).
+    // Must be the hole (deleted entry).
     if (FLAG_debug_code) {
-      __ LoadRoot(scratch, Heap::kNullValueRootIndex);
-      __ Assert(eq, "oddball in symbol table is not undefined or null",
+      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+      __ Assert(eq, "oddball in symbol table is not undefined or the hole",
           scratch, Operand(candidate));
     }
     __ jmp(&next_probe[i]);
@@ -5587,7 +6004,7 @@
   __ sll(at, hash, 10);
   __ addu(hash, hash, at);
   // hash ^= hash >> 6;
-  __ sra(at, hash, 6);
+  __ srl(at, hash, 6);
   __ xor_(hash, hash, at);
 }
 
@@ -5601,7 +6018,7 @@
   __ sll(at, hash, 10);
   __ addu(hash, hash, at);
   // hash ^= hash >> 6;
-  __ sra(at, hash, 6);
+  __ srl(at, hash, 6);
   __ xor_(hash, hash, at);
 }
 
@@ -5612,20 +6029,23 @@
   __ sll(at, hash, 3);
   __ addu(hash, hash, at);
   // hash ^= hash >> 11;
-  __ sra(at, hash, 11);
+  __ srl(at, hash, 11);
   __ xor_(hash, hash, at);
   // hash += hash << 15;
   __ sll(at, hash, 15);
   __ addu(hash, hash, at);
 
+  __ li(at, Operand(String::kHashBitMask));
+  __ and_(hash, hash, at);
+
   // if (hash == 0) hash = 27;
-  __ ori(at, zero_reg, 27);
-  __ movz(hash, at, hash);
+  __ ori(at, zero_reg, StringHasher::kZeroHash);
+  __ Movz(hash, at, hash);
 }
 
 
 void SubStringStub::Generate(MacroAssembler* masm) {
-  Label sub_string_runtime;
+  Label runtime;
   // Stack frame on entry.
   //  ra: return address
   //  sp[0]: to
@@ -5643,53 +6063,31 @@
   static const int kFromOffset = 1 * kPointerSize;
   static const int kStringOffset = 2 * kPointerSize;
 
-  Register to = t2;
-  Register from = t3;
-
-  // Check bounds and smi-ness.
-  __ lw(to, MemOperand(sp, kToOffset));
-  __ lw(from, MemOperand(sp, kFromOffset));
+  __ lw(a2, MemOperand(sp, kToOffset));
+  __ lw(a3, MemOperand(sp, kFromOffset));
   STATIC_ASSERT(kFromOffset == kToOffset + 4);
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
 
-  __ JumpIfNotSmi(from, &sub_string_runtime);
-  __ JumpIfNotSmi(to, &sub_string_runtime);
+  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
+  // safe in this case.
+  __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
+  __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
+  // Both a2 and a3 are untagged integers.
 
-  __ sra(a3, from, kSmiTagSize);  // Remove smi tag.
-  __ sra(t5, to, kSmiTagSize);  // Remove smi tag.
+  __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
 
-  // a3: from index (untagged smi)
-  // t5: to index (untagged smi)
+  __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
+  __ Subu(a2, a2, a3);
 
-  __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg));  // From < 0.
-
-  __ subu(a2, t5, a3);
-  __ Branch(&sub_string_runtime, gt, a3, Operand(t5));  // Fail if from > to.
-
-  // Special handling of sub-strings of length 1 and 2. One character strings
-  // are handled in the runtime system (looked up in the single character
-  // cache). Two character strings are looked for in the symbol cache in
-  // generated code.
-  __ Branch(&sub_string_runtime, lt, a2, Operand(2));
-
-  // Both to and from are smis.
-
-  // a2: result string length
-  // a3: from index (untagged smi)
-  // t2: (a.k.a. to): to (smi)
-  // t3: (a.k.a. from): from offset (smi)
-  // t5: to index (untagged smi)
-
-  // Make sure first argument is a sequential (or flat) string.
+  // Make sure first argument is a string.
   __ lw(v0, MemOperand(sp, kStringOffset));
-  __ Branch(&sub_string_runtime, eq, v0, Operand(kSmiTagMask));
-
+  __ JumpIfSmi(v0, &runtime);
   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  __ And(t4, v0, Operand(kIsNotStringMask));
+  __ And(t0, a1, Operand(kIsNotStringMask));
 
-  __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
+  __ Branch(&runtime, ne, t0, Operand(zero_reg));
 
   // Short-cut for the case of trivial substring.
   Label return_v0;
@@ -5699,74 +6097,16 @@
   __ sra(t0, t0, 1);
   __ Branch(&return_v0, eq, a2, Operand(t0));
 
-  Label create_slice;
-  if (FLAG_string_slices) {
-    __ Branch(&create_slice, ge, a2, Operand(SlicedString::kMinLength));
-  }
-
-  // v0: original string
-  // a1: instance type
-  // a2: result string length
-  // a3: from index (untagged smi)
-  // t2: (a.k.a. to): to (smi)
-  // t3: (a.k.a. from): from offset (smi)
-  // t5: to index (untagged smi)
-
-  Label seq_string;
-  __ And(t0, a1, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
-
-  // Slices and external strings go to runtime.
-  __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
-
-  // Sequential strings are handled directly.
-  __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
-
-  // Cons string. Try to recurse (once) on the first substring.
-  // (This adds a little more generality than necessary to handle flattened
-  // cons strings, but not much).
-  __ lw(v0, FieldMemOperand(v0, ConsString::kFirstOffset));
-  __ lw(t0, FieldMemOperand(v0, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kSeqStringTag == 0);
-  // Cons, slices and external strings go to runtime.
-  __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
-
-  // Definitly a sequential string.
-  __ bind(&seq_string);
-
-  // v0: original string
-  // a1: instance type
-  // a2: result string length
-  // a3: from index (untagged smi)
-  // t2: (a.k.a. to): to (smi)
-  // t3: (a.k.a. from): from offset (smi)
-  // t5: to index (untagged smi)
-
-  __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
-  __ Branch(&sub_string_runtime, lt, t0, Operand(to));  // Fail if to > length.
-  to = no_reg;
-
-  // v0: original string or left hand side of the original cons string.
-  // a1: instance type
-  // a2: result string length
-  // a3: from index (untagged smi)
-  // t3: (a.k.a. from): from offset (smi)
-  // t5: to index (untagged smi)
-
-  // Check for flat ASCII string.
-  Label non_ascii_flat;
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-
-  __ And(t4, a1, Operand(kStringEncodingMask));
-  __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
 
   Label result_longer_than_two;
-  __ Branch(&result_longer_than_two, gt, a2, Operand(2));
+  // Check for special case of two character ASCII string, in which case
+  // we do a lookup in the symbol table first.
+  __ li(t0, 2);
+  __ Branch(&result_longer_than_two, gt, a2, Operand(t0));
+  __ Branch(&runtime, lt, a2, Operand(t0));
 
-  // Sub string of length 2 requested.
+  __ JumpIfInstanceTypeIsNotSequentialAscii(a1, a1, &runtime);
+
   // Get the two characters forming the sub string.
   __ Addu(v0, v0, Operand(a3));
   __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
@@ -5776,31 +6116,126 @@
   Label make_two_character_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
       masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
-  Counters* counters = masm->isolate()->counters();
   __ jmp(&return_v0);
 
   // a2: result string length.
   // a3: two characters combined into halfword in little endian byte order.
   __ bind(&make_two_character_string);
-  __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
+  __ AllocateAsciiString(v0, a2, t0, t1, t4, &runtime);
   __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
   __ jmp(&return_v0);
 
   __ bind(&result_longer_than_two);
 
-  // Locate 'from' character of string.
-  __ Addu(t1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ sra(t4, from, 1);
-  __ Addu(t1, t1, t4);
+  // Deal with different string types: update the index if necessary
+  // and put the underlying string into t1.
+  // v0: original string
+  // a1: instance type
+  // a2: length
+  // a3: from index (untagged)
+  Label underlying_unpacked, sliced_string, seq_or_external_string;
+  // If the string is not indirect, it can only be sequential or external.
+  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+  STATIC_ASSERT(kIsIndirectStringMask != 0);
+  __ And(t0, a1, Operand(kIsIndirectStringMask));
+  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
+  // t0 is used as a scratch register and can be overwritten in either case.
+  __ And(t0, a1, Operand(kSlicedNotConsMask));
+  __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
+  // Cons string.  Check whether it is flat, then fetch first part.
+  __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
+  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+  __ Branch(&runtime, ne, t1, Operand(t0));
+  __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
+  // Update instance type.
+  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+  __ jmp(&underlying_unpacked);
 
-  // Allocate the result.
-  __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
+  __ bind(&sliced_string);
+  // Sliced string.  Fetch parent and correct start index by offset.
+  __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
+  __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+  __ sra(t0, t0, 1);  // Add offset to index.
+  __ Addu(a3, a3, t0);
+  // Update instance type.
+  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+  __ jmp(&underlying_unpacked);
 
-  // v0: result string
-  // a2: result string length
-  // a3: from index (untagged smi)
-  // t1: first character of substring to copy
-  // t3: (a.k.a. from): from offset (smi)
+  __ bind(&seq_or_external_string);
+  // Sequential or external string.  Just move string to the expected register.
+  __ mov(t1, v0);
+
+  __ bind(&underlying_unpacked);
+
+  if (FLAG_string_slices) {
+    Label copy_routine;
+    // t1: underlying subject string
+    // a1: instance type of underlying subject string
+    // a2: length
+    // a3: adjusted start index (untagged)
+    // Short slice.  Copy instead of slicing.
+    __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ And(t0, a1, Operand(kStringEncodingMask));
+    __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
+    __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
+    __ jmp(&set_slice_header);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
+    __ bind(&set_slice_header);
+    __ sll(a3, a3, 1);
+    __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
+    __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+    __ jmp(&return_v0);
+
+    __ bind(&copy_routine);
+  }
+
+  // t1: underlying subject string
+  // a1: instance type of underlying subject string
+  // a2: length
+  // a3: adjusted start index (untagged)
+  Label two_byte_sequential, sequential_string, allocate_result;
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(t0, a1, Operand(kExternalStringTag));
+  __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
+
+  // Handle external string.
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ And(t0, a1, Operand(kShortExternalStringTag));
+  __ Branch(&runtime, ne, t0, Operand(zero_reg));
+  __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
+  // t1 already points to the first character of underlying string.
+  __ jmp(&allocate_result);
+
+  __ bind(&sequential_string);
+  // Locate first character of underlying subject string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+  __ bind(&allocate_result);
+  // Sequential acii string.  Allocate the result.
+  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+  __ And(t0, a1, Operand(kStringEncodingMask));
+  __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
+
+  // Allocate and copy the resulting ASCII string.
+  __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
+
+  // Locate first character of substring to copy.
+  __ Addu(t1, t1, a3);
+
   // Locate first character of result.
   __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
 
@@ -5813,30 +6248,17 @@
       masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
   __ jmp(&return_v0);
 
-  __ bind(&non_ascii_flat);
-  // a2: result string length
-  // t1: string
-  // t3: (a.k.a. from): from offset (smi)
-  // Check for flat two byte string.
+  // Allocate and copy the resulting two-byte string.
+  __ bind(&two_byte_sequential);
+  __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
 
-  // Locate 'from' character of string.
-  __ Addu(t1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // As "from" is a smi it is 2 times the value which matches the size of a two
-  // byte character.
+  // Locate first character of substring to copy.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ Addu(t1, t1, Operand(from));
-
-  // Allocate the result.
-  __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
-
-  // v0: result string
-  // a2: result string length
-  // t1: first character of substring to copy
+  __ sll(t0, a3, 1);
+  __ Addu(t1, t1, t0);
   // Locate first character of result.
   __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
 
-  from = no_reg;
-
   // v0: result string.
   // a1: first character of result.
   // a2: result length.
@@ -5844,77 +6266,14 @@
   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   StringHelper::GenerateCopyCharactersLong(
       masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
-  __ jmp(&return_v0);
-
-  if (FLAG_string_slices) {
-    __ bind(&create_slice);
-    // v0: original string
-    // a1: instance type
-    // a2: length
-    // a3: from index (untagged smi)
-    // t2 (a.k.a. to): to (smi)
-    // t3 (a.k.a. from): from offset (smi)
-    Label allocate_slice, sliced_string, seq_string;
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ And(t4, a1, Operand(kStringRepresentationMask));
-    __ Branch(&seq_string, eq, t4, Operand(zero_reg));
-    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-    STATIC_ASSERT(kIsIndirectStringMask != 0);
-    __ And(t4, a1, Operand(kIsIndirectStringMask));
-    // External string.  Jump to runtime.
-    __ Branch(&sub_string_runtime, eq, t4, Operand(zero_reg));
-
-    __ And(t4, a1, Operand(kSlicedNotConsMask));
-    __ Branch(&sliced_string, ne, t4, Operand(zero_reg));
-    // Cons string.  Check whether it is flat, then fetch first part.
-    __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
-    __ LoadRoot(t5, Heap::kEmptyStringRootIndex);
-    __ Branch(&sub_string_runtime, ne, t1, Operand(t5));
-    __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
-    __ jmp(&allocate_slice);
-
-    __ bind(&sliced_string);
-    // Sliced string.  Fetch parent and correct start index by offset.
-    __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
-    __ addu(t3, t3, t1);
-    __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
-    __ jmp(&allocate_slice);
-
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
-    __ mov(t1, v0);
-
-    __ bind(&allocate_slice);
-    // a1: instance type of original string
-    // a2: length
-    // t1: underlying subject string
-    // t3 (a.k.a. from): from offset (smi)
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ And(t4, a1, Operand(kStringEncodingMask));
-    __ Branch(&two_byte_slice, eq, t4, Operand(zero_reg));
-    __ AllocateAsciiSlicedString(v0, a2, a3, t0, &sub_string_runtime);
-    __ jmp(&set_slice_header);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(v0, a2, a3, t0, &sub_string_runtime);
-    __ bind(&set_slice_header);
-    __ sw(t3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
-    __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
-  }
 
   __ bind(&return_v0);
+  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
-  __ Addu(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(3);
 
   // Just jump to runtime to create the sub string.
-  __ bind(&sub_string_runtime);
+  __ bind(&runtime);
   __ TailCallRuntime(Runtime::kSubString, 3, 1);
 }
 
@@ -5971,7 +6330,7 @@
   __ Subu(scratch3, scratch1, Operand(scratch2));
   Register length_delta = scratch3;
   __ slt(scratch4, scratch2, scratch1);
-  __ movn(scratch1, scratch2, scratch4);
+  __ Movn(scratch1, scratch2, scratch4);
   Register min_length = scratch1;
   STATIC_ASSERT(kSmiTag == 0);
   __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
@@ -6053,8 +6412,7 @@
   STATIC_ASSERT(kSmiTag == 0);
   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
-  __ Addu(sp, sp, Operand(2 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(2);
 
   __ bind(&not_same);
 
@@ -6072,7 +6430,7 @@
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  Label string_add_runtime, call_builtin;
+  Label call_runtime, call_builtin;
   Builtins::JavaScript builtin_id = Builtins::ADD;
 
   Counters* counters = masm->isolate()->counters();
@@ -6087,7 +6445,7 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (flags_ == NO_STRING_ADD_FLAGS) {
-    __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
+    __ JumpIfEitherSmi(a0, a1, &call_runtime);
     // Load instance types.
     __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
     __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
@@ -6097,7 +6455,7 @@
     // If either is not a string, go to runtime.
     __ Or(t4, t0, Operand(t1));
     __ And(t4, t4, Operand(kIsNotStringMask));
-    __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+    __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
   } else {
     // Here at least one of the arguments is definitely a string.
     // We convert the one that is not known to be a string.
@@ -6129,15 +6487,14 @@
     __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
     __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
     __ mov(v0, a0);       // Assume we'll return first string (from a0).
-    __ movz(v0, a1, a2);  // If first is empty, return second (from a1).
+    __ Movz(v0, a1, a2);  // If first is empty, return second (from a1).
     __ slt(t4, zero_reg, a2);   // if (a2 > 0) t4 = 1.
     __ slt(t5, zero_reg, a3);   // if (a3 > 0) t5 = 1.
     __ and_(t4, t4, t5);        // Branch if both strings were non-empty.
     __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
 
     __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-    __ Addu(sp, sp, Operand(2 * kPointerSize));
-    __ Ret();
+    __ DropAndRet(2);
 
     __ bind(&strings_not_empty);
   }
@@ -6170,7 +6527,7 @@
     __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
   }
   __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
-                                                 &string_add_runtime);
+                                                 &call_runtime);
 
   // Get the two characters forming the sub string.
   __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
@@ -6180,10 +6537,9 @@
   // just allocate a new one.
   Label make_two_character_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
+      masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-  __ Addu(sp, sp, Operand(2 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(2);
 
   __ bind(&make_two_character_string);
   // Resulting string has length 2 and first chars of two strings
@@ -6192,21 +6548,19 @@
   // halfword store instruction (which assumes that processor is
   // in a little endian mode).
   __ li(t2, Operand(2));
-  __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
+  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
   __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-  __ Addu(sp, sp, Operand(2 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(2);
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ Branch(&string_add_flat_result, lt, t2,
-           Operand(String::kMinNonFlatLength));
+  __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
   // Handle exceptionally long strings in the runtime system.
   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   ASSERT(IsPowerOf2(String::kMaxLength + 1));
   // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
-  __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
+  __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
 
   // If result is not supposed to be flat, allocate a cons string object.
   // If both strings are ASCII the result is an ASCII cons string.
@@ -6218,22 +6572,20 @@
   }
   Label non_ascii, allocated, ascii_data;
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  // Branch to non_ascii if either string-encoding field is zero (non-ascii).
+  // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
   __ And(t4, t0, Operand(t1));
   __ And(t4, t4, Operand(kStringEncodingMask));
   __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
 
   // Allocate an ASCII cons string.
   __ bind(&ascii_data);
-  __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
+  __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
-  __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
-  __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
-  __ mov(v0, t3);
+  __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
+  __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-  __ Addu(sp, sp, Operand(2 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(2);
 
   __ bind(&non_ascii);
   // At least one of the strings is two-byte. Check whether it happens
@@ -6251,11 +6603,13 @@
   __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
 
   // Allocate a two byte cons string.
-  __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
+  __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
   __ Branch(&allocated);
 
-  // Handle creating a flat result. First check that both strings are
-  // sequential and that they have the same encoding.
+  // We cannot encounter sliced strings or cons strings here since:
+  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
+  // Handle creating a flat result from either external or sequential strings.
+  // Locate the first characters' locations.
   // a0: first string
   // a1: second string
   // a2: length of first string
@@ -6263,6 +6617,7 @@
   // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   // t2: sum of lengths.
+  Label first_prepared, second_prepared;
   __ bind(&string_add_flat_result);
   if (flags_ != NO_STRING_ADD_FLAGS) {
     __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
@@ -6270,101 +6625,86 @@
     __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
     __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
   }
-  // Check that both strings are sequential, meaning that we
-  // branch to runtime if either string tag is non-zero.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ Or(t4, t0, Operand(t1));
-  __ And(t4, t4, Operand(kStringRepresentationMask));
-  __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+  // Check whether both strings have same encoding
+  __ Xor(t3, t0, Operand(t1));
+  __ And(t3, t3, Operand(kStringEncodingMask));
+  __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
 
-  // Now check if both strings have the same encoding (ASCII/Two-byte).
-  // a0: first string
-  // a1: second string
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(t4, t0, Operand(kStringRepresentationMask));
+
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  Label skip_first_add;
+  __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
+  __ Branch(USE_DELAY_SLOT, &first_prepared);
+  __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ bind(&skip_first_add);
+  // External string: rule out short external string and load string resource.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ And(t4, t0, Operand(kShortExternalStringMask));
+  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
+  __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
+  __ bind(&first_prepared);
+
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(t4, t1, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  Label skip_second_add;
+  __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
+  __ Branch(USE_DELAY_SLOT, &second_prepared);
+  __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ bind(&skip_second_add);
+  // External string: rule out short external string and load string resource.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ And(t4, t1, Operand(kShortExternalStringMask));
+  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
+  __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
+  __ bind(&second_prepared);
+
+  Label non_ascii_string_add_flat_result;
+  // t3: first character of first string
+  // a1: first character of second string
   // a2: length of first string
   // a3: length of second string
-  // t0: first string instance type
-  // t1: second string instance type
   // t2: sum of lengths.
-  Label non_ascii_string_add_flat_result;
-  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
-  __ xor_(t3, t1, t0);
-  __ And(t3, t3, Operand(kStringEncodingMask));
-  __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
-  // And see if it's ASCII (0) or two-byte (1).
-  __ And(t3, t0, Operand(kStringEncodingMask));
-  __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
+  // Both strings have the same encoding.
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ And(t4, t1, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
 
-  // Both strings are sequential ASCII strings. We also know that they are
-  // short (since the sum of the lengths is less than kMinNonFlatLength).
-  // t2: length of resulting flat string
-  __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
-  // Locate first character of result.
-  __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // Locate first character of first argument.
-  __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // a0: first character of first string.
-  // a1: second string.
+  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
+  __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // v0: result string.
+  // t3: first character of first string.
+  // a1: first character of second string
   // a2: length of first string.
   // a3: length of second string.
   // t2: first character of result.
-  // t3: result string.
-  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
 
-  // Load second argument and locate first character.
-  __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // a1: first character of second string.
-  // a3: length of second string.
+  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
   // t2: next character of result.
-  // t3: result string.
   StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
-  __ mov(v0, t3);
   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-  __ Addu(sp, sp, Operand(2 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(2);
 
   __ bind(&non_ascii_string_add_flat_result);
-  // Both strings are sequential two byte strings.
-  // a0: first string.
-  // a1: second string.
-  // a2: length of first string.
-  // a3: length of second string.
-  // t2: sum of length of strings.
-  __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
-  // a0: first string.
-  // a1: second string.
-  // a2: length of first string.
-  // a3: length of second string.
-  // t3: result string.
-
-  // Locate first character of result.
-  __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // Locate first character of first argument.
-  __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  // a0: first character of first string.
-  // a1: second string.
+  __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
+  __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // v0: result string.
+  // t3: first character of first string.
+  // a1: first character of second string.
   // a2: length of first string.
   // a3: length of second string.
   // t2: first character of result.
-  // t3: result string.
-  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
-
-  // Locate first character of second argument.
-  __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  // a1: first character of second string.
-  // a3: length of second string.
-  // t2: next character of result (after copy of first string).
-  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
+  // t2: next character of result.
   StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
 
-  __ mov(v0, t3);
   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-  __ Addu(sp, sp, Operand(2 * kPointerSize));
-  __ Ret();
+  __ DropAndRet(2);
 
   // Just jump to runtime to add the two strings.
-  __ bind(&string_add_runtime);
+  __ bind(&call_runtime);
   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 
   if (call_builtin.is_linked()) {
@@ -6446,15 +6786,15 @@
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
   Label generic_stub;
-  Label unordered;
+  Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss;
   __ And(a2, a1, Operand(a0));
   __ JumpIfSmi(a2, &generic_stub);
 
   __ GetObjectType(a0, a2, a2);
-  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
   __ GetObjectType(a1, a2, a2);
-  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or FPU is unsupported.
@@ -6467,28 +6807,18 @@
     __ Subu(a2, a0, Operand(kHeapObjectTag));
     __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
 
-    Label fpu_eq, fpu_lt, fpu_gt;
-    // Compare operands (test if unordered).
-    __ c(UN, D, f0, f2);
-    // Don't base result on status bits when a NaN is involved.
-    __ bc1t(&unordered);
-    __ nop();
+    // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
+    Label fpu_eq, fpu_lt;
+    // Test if equal, and also handle the unordered/NaN case.
+    __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
 
-    // Test if equal.
-    __ c(EQ, D, f0, f2);
-    __ bc1t(&fpu_eq);
-    __ nop();
+    // Test if less (unordered case is already handled).
+    __ BranchF(&fpu_lt, NULL, lt, f0, f2);
 
-    // Test if unordered or less (unordered case is already handled).
-    __ c(ULT, D, f0, f2);
-    __ bc1t(&fpu_lt);
-    __ nop();
+    // Otherwise it's greater, so just fall thru, and return.
+    __ li(v0, Operand(GREATER));
+    __ Ret();
 
-    // Otherwise it's greater.
-    __ bc1f(&fpu_gt);
-    __ nop();
-
-    // Return a result of -1, 0, or 1.
     __ bind(&fpu_eq);
     __ li(v0, Operand(EQUAL));
     __ Ret();
@@ -6496,18 +6826,29 @@
     __ bind(&fpu_lt);
     __ li(v0, Operand(LESS));
     __ Ret();
-
-    __ bind(&fpu_gt);
-    __ li(v0, Operand(GREATER));
-    __ Ret();
-
-    __ bind(&unordered);
   }
 
+  __ bind(&unordered);
+
   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
   __ bind(&generic_stub);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
+  __ bind(&maybe_undefined1);
+  if (Token::IsOrderedRelationalCompareOp(op_)) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(&miss, ne, a0, Operand(at));
+    __ GetObjectType(a1, a2, a2);
+    __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
+    __ jmp(&unordered);
+  }
+
+  __ bind(&maybe_undefined2);
+  if (Token::IsOrderedRelationalCompareOp(op_)) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(&unordered, eq, a1, Operand(at));
+  }
+
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -6555,6 +6896,8 @@
   ASSERT(state_ == CompareIC::STRINGS);
   Label miss;
 
+  bool equality = Token::IsEqualityOp(op_);
+
   // Registers containing left and right operands respectively.
   Register left = a1;
   Register right = a0;
@@ -6582,41 +6925,52 @@
   Label left_ne_right;
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
-  __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
+  __ Branch(&left_ne_right, ne, left, Operand(right));
+  __ Ret(USE_DELAY_SLOT);
   __ mov(v0, zero_reg);  // In the delay slot.
-  __ Ret();
   __ bind(&left_ne_right);
 
   // Handle not identical strings.
 
   // Check that both strings are symbols. If they are, we're done
   // because we already know they are not identical.
-  ASSERT(GetCondition() == eq);
-  STATIC_ASSERT(kSymbolTag != 0);
-  __ And(tmp3, tmp1, Operand(tmp2));
-  __ And(tmp5, tmp3, Operand(kIsSymbolMask));
-  Label is_symbol;
-  __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
-  __ mov(v0, a0);  // In the delay slot.
-  // Make sure a0 is non-zero. At this point input operands are
-  // guaranteed to be non-zero.
-  ASSERT(right.is(a0));
-  __ Ret();
-  __ bind(&is_symbol);
+  if (equality) {
+    ASSERT(GetCondition() == eq);
+    STATIC_ASSERT(kSymbolTag != 0);
+    __ And(tmp3, tmp1, Operand(tmp2));
+    __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+    Label is_symbol;
+    __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
+    // Make sure a0 is non-zero. At this point input operands are
+    // guaranteed to be non-zero.
+    ASSERT(right.is(a0));
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(v0, a0);  // In the delay slot.
+    __ bind(&is_symbol);
+  }
 
   // Check that both strings are sequential ASCII.
   Label runtime;
-  __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
-                                                  &runtime);
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
+      tmp1, tmp2, tmp3, tmp4, &runtime);
 
   // Compare flat ASCII strings. Returns when done.
-  StringCompareStub::GenerateFlatAsciiStringEquals(
-      masm, left, right, tmp1, tmp2, tmp3);
+  if (equality) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(
+        masm, left, right, tmp1, tmp2, tmp3);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(
+        masm, left, right, tmp1, tmp2, tmp3, tmp4);
+  }
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
   __ Push(left, right);
-  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+  if (equality) {
+    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+  } else {
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+  }
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -6635,33 +6989,48 @@
   __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
 
   ASSERT(GetCondition() == eq);
-  __ Subu(v0, a0, Operand(a1));
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ subu(v0, a0, a1);
 
   __ bind(&miss);
   GenerateMiss(masm);
 }
 
 
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  __ Push(a1, a0);
-  __ push(ra);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+  Label miss;
+  __ And(a2, a1, a0);
+  __ JumpIfSmi(a2, &miss);
+  __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a2, Operand(known_map_));
+  __ Branch(&miss, ne, a3, Operand(known_map_));
 
-  // Call the runtime system in a fresh internal frame.
-  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
-                                             masm->isolate());
-  __ EnterInternalFrame();
-  __ Push(a1, a0);
-  __ li(t0, Operand(Smi::FromInt(op_)));
-  __ push(t0);
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
-  // Compute the entry point of the rewritten stub.
-  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  // Restore registers.
-  __ pop(ra);
-  __ pop(a0);
-  __ pop(a1);
+  __ Ret(USE_DELAY_SLOT);
+  __ subu(v0, a0, a1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+  {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss =
+        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a0);
+    __ push(ra);
+    __ Push(a1, a0);
+    __ li(t0, Operand(Smi::FromInt(op_)));
+    __ addiu(sp, sp, -kPointerSize);
+    __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
+    __ sw(t0, MemOperand(sp));  // In the delay slot.
+    // Compute the entry point of the rewritten stub.
+    __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Restore registers.
+    __ Pop(a1, a0, ra);
+  }
   __ Jump(a2);
 }
 
@@ -6672,7 +7041,7 @@
   // The saved ra is after the reserved stack space for the 4 args.
   __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
 
-  if (FLAG_debug_code && EnableSlowAsserts()) {
+  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
     // In case of an error the return address may point to a memory area
     // filled with kZapValue by the GC.
     // Dereference the address and check for this.
@@ -6713,8 +7082,10 @@
   // Push return address (accessible to GC through exit frame pc).
   // This spot for ra was reserved in EnterExitFrame.
   masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
-  masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
-                    RelocInfo::CODE_TARGET), true);
+  masm->li(ra,
+           Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+                   RelocInfo::CODE_TARGET),
+           CONSTANT_SIZE);
   // Call the function.
   masm->Jump(t9);
   // Make sure the stored 'ra' points to this position.
@@ -6722,19 +7093,18 @@
 }
 
 
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register receiver,
-    Register properties,
-    String* name,
-    Register scratch0) {
-// If names of slots in range from 1 to kProbes - 1 for the hash value are
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register receiver,
+                                                        Register properties,
+                                                        Handle<String> name,
+                                                        Register scratch0) {
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
   // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
+  // (their names are the hole value).
   for (int i = 0; i < kInlinedProbes; i++) {
     // scratch0 points to properties hash.
     // Compute the masked index: (hash + i + i * i) & mask.
@@ -6743,20 +7113,17 @@
     __ lw(index, FieldMemOperand(properties, kCapacityOffset));
     __ Subu(index, index, Operand(1));
     __ And(index, index, Operand(
-         Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+        Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
     ASSERT(StringDictionary::kEntrySize == 3);
-    // index *= 3.
-    __ mov(at, index);
-    __ sll(index, index, 1);
+    __ sll(at, index, 1);
     __ Addu(index, index, at);
 
     Register entity_name = scratch0;
     // Having undefined at this place means the name is not contained.
     ASSERT_EQ(kSmiTagSize, 1);
     Register tmp = properties;
-
     __ sll(scratch0, index, 1);
     __ Addu(tmp, properties, scratch0);
     __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
@@ -6766,9 +7133,15 @@
     __ Branch(done, eq, entity_name, Operand(tmp));
 
     if (i != kInlinedProbes - 1) {
+      // Load the hole ready for use below:
+      __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
+
       // Stop if found the property.
       __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
 
+      Label the_hole;
+      __ Branch(&the_hole, eq, entity_name, Operand(tmp));
+
       // Check if the entry name is not a symbol.
       __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
       __ lbu(entity_name,
@@ -6776,6 +7149,8 @@
       __ And(scratch0, entity_name, Operand(kIsSymbolMask));
       __ Branch(miss, eq, scratch0, Operand(zero_reg));
 
+      __ bind(&the_hole);
+
       // Restore the properties.
       __ lw(properties,
             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -6784,19 +7159,18 @@
 
   const int spill_mask =
       (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
-       a2.bit() | a1.bit() | a0.bit());
+       a2.bit() | a1.bit() | a0.bit() | v0.bit());
 
   __ MultiPush(spill_mask);
   __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   __ li(a1, Operand(Handle<String>(name)));
   StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
+  __ CallStub(&stub);
+  __ mov(at, v0);
   __ MultiPop(spill_mask);
 
-  __ Branch(done, eq, v0, Operand(zero_reg));
-  __ Branch(miss, ne, v0, Operand(zero_reg));
-  return result;
+  __ Branch(done, eq, at, Operand(zero_reg));
+  __ Branch(miss, ne, at, Operand(zero_reg));
 }
 
 
@@ -6811,6 +7185,11 @@
                                                         Register name,
                                                         Register scratch1,
                                                         Register scratch2) {
+  ASSERT(!elements.is(scratch1));
+  ASSERT(!elements.is(scratch2));
+  ASSERT(!name.is(scratch1));
+  ASSERT(!name.is(scratch2));
+
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -6841,8 +7220,7 @@
     ASSERT(StringDictionary::kEntrySize == 3);
     // scratch2 = scratch2 * 3.
 
-    __ mov(at, scratch2);
-    __ sll(scratch2, scratch2, 1);
+    __ sll(at, scratch2, 1);
     __ Addu(scratch2, scratch2, at);
 
     // Check if the key is identical to the name.
@@ -6854,23 +7232,32 @@
 
   const int spill_mask =
       (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
-       a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
+       a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
       ~(scratch1.bit() | scratch2.bit());
 
   __ MultiPush(spill_mask);
-  __ Move(a0, elements);
-  __ Move(a1, name);
+  if (name.is(a0)) {
+    ASSERT(!elements.is(a1));
+    __ Move(a1, name);
+    __ Move(a0, elements);
+  } else {
+    __ Move(a0, elements);
+    __ Move(a1, name);
+  }
   StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
   __ CallStub(&stub);
   __ mov(scratch2, a2);
+  __ mov(at, v0);
   __ MultiPop(spill_mask);
 
-  __ Branch(done, ne, v0, Operand(zero_reg));
-  __ Branch(miss, eq, v0, Operand(zero_reg));
+  __ Branch(done, ne, at, Operand(zero_reg));
+  __ Branch(miss, eq, at, Operand(zero_reg));
 }
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Registers:
   //  result: StringDictionary to probe
   //  a1: key
@@ -6950,17 +7337,354 @@
   // treated as a lookup success. For positive lookup probing failure
   // should be treated as lookup failure.
   if (mode_ == POSITIVE_LOOKUP) {
+    __ Ret(USE_DELAY_SLOT);
     __ mov(result, zero_reg);
-    __ Ret();
   }
 
   __ bind(&in_dictionary);
+  __ Ret(USE_DELAY_SLOT);
   __ li(result, 1);
-  __ Ret();
 
   __ bind(&not_in_dictionary);
+  __ Ret(USE_DELAY_SLOT);
   __ mov(result, zero_reg);
+}
+
+
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { s2, s0, t3, EMIT_REMEMBERED_SET },
+  { s2, a2, t3, EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+  // Also used in KeyedStoreIC::GenerateGeneric.
+  { a3, t0, t1, EMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal.
+  { t0, a1, a2, OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { a1, a2, a3, EMIT_REMEMBERED_SET },
+  { a3, a2, a1, EMIT_REMEMBERED_SET },
+  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { a2, a1, a3, EMIT_REMEMBERED_SET },
+  { a3, a1, a2, EMIT_REMEMBERED_SET },
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { a3, a2, t0, EMIT_REMEMBERED_SET },
+  { a2, a3, t0, EMIT_REMEMBERED_SET },
+  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+  // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+  // and ElementsTransitionGenerator::GenerateDoubleToObject
+  { a2, a3, t5, EMIT_REMEMBERED_SET },
+  { a2, a3, t5, OMIT_REMEMBERED_SET },
+  // ElementsTransitionGenerator::GenerateDoubleToObject
+  { t2, a2, a0, EMIT_REMEMBERED_SET },
+  { a2, t2, t5, EMIT_REMEMBERED_SET },
+  // StoreArrayLiteralElementStub::Generate
+  { t1, a0, t2, EMIT_REMEMBERED_SET },
+  // Null termination.
+  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two branch+nop instructions are generated with labels so as to
+  // get the offset fixed up correctly by the bind(Label*) call.  We patch it
+  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
+  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
+  // incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
+  __ nop();
+  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
+  __ nop();
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  }
   __ Ret();
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+
+  PatchBranchIntoNop(masm, 0);
+  PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     ne,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  Register address =
+      a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+  ASSERT(!address.is(regs_.object()));
+  ASSERT(!address.is(a0));
+  __ Move(address, regs_.address());
+  __ Move(a0, regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ Move(a1, address);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ lw(a1, MemOperand(address, 0));
+  }
+  __ li(a2, Operand(ExternalReference::isolate_address()));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_scratch;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     eq,
+                     &ensure_not_white);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     eq,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need extra registers for this, so we push the object and the address
+  // register temporarily.
+  __ Push(regs_.object(), regs_.address());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    regs_.address(),  // Scratch.
+                    &need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : element value to store
+  //  -- a1    : array literal
+  //  -- a2    : map of array literal
+  //  -- a3    : element index as smi
+  //  -- t0    : array literal index in function as smi
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label fast_elements;
+
+  __ CheckFastElements(a2, t1, &double_elements);
+  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+  __ JumpIfSmi(a0, &smi_element);
+  __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+  __ bind(&slow_elements);
+  // call.
+  __ Push(a1, a3, a0);
+  __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
+  __ Push(t1, t0);
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t2, t1, t2);
+  __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sw(a0, MemOperand(t2, 0));
+  // Update the write barrier for the array store.
+  __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+
+  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+  // FAST_ELEMENTS, and value is Smi.
+  __ bind(&smi_element);
+  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t2, t1, t2);
+  __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+
+  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
+                                 &slow_elements);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
 }
 
 
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index aa224bc..e0954d8 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -59,6 +59,25 @@
 };
 
 
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -118,7 +137,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -217,7 +236,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -226,6 +245,70 @@
 };
 
 
+class StringHelper : public AllStatic {
+ public:
+  // Generate code for copying characters using a simple loop. This should only
+  // be used in places where the number of characters is small and the
+  // additional setup and checking in GenerateCopyCharactersLong adds too much
+  // overhead. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharacters(MacroAssembler* masm,
+                                     Register dest,
+                                     Register src,
+                                     Register count,
+                                     Register scratch,
+                                     bool ascii);
+
+  // Generate code for copying a large number of characters. This function
+  // is allowed to spend extra time setting up conditions to make copying
+  // faster. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharactersLong(MacroAssembler* masm,
+                                         Register dest,
+                                         Register src,
+                                         Register count,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Register scratch3,
+                                         Register scratch4,
+                                         Register scratch5,
+                                         int flags);
+
+
+  // Probe the symbol table for a two character string. If the string is
+  // not found by probing a jump to the label not_found is performed. This jump
+  // does not guarantee that the string is not in the symbol table. If the
+  // string is found the code falls through with the string in register r0.
+  // Contents of both c1 and c2 registers are modified. At the exit c1 is
+  // guaranteed to contain halfword with low and high bytes equal to
+  // initial contents of c1 and c2 respectively.
+  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                   Register c1,
+                                                   Register c2,
+                                                   Register scratch1,
+                                                   Register scratch2,
+                                                   Register scratch3,
+                                                   Register scratch4,
+                                                   Register scratch5,
+                                                   Label* not_found);
+
+  // Generate string hash.
+  static void GenerateHashInit(MacroAssembler* masm,
+                               Register hash,
+                               Register character);
+
+  static void GenerateHashAddCharacter(MacroAssembler* masm,
+                                       Register hash,
+                                       Register character);
+
+  static void GenerateHashGetHash(MacroAssembler* masm,
+                                  Register hash);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
 // Flag that indicates how to generate code for the stub StringAddStub.
 enum StringAddFlags {
   NO_STRING_ADD_FLAGS = 0,
@@ -324,7 +407,15 @@
       : the_int_(the_int),
         the_heap_number_(the_heap_number),
         scratch_(scratch),
-        sign_(scratch2) { }
+        sign_(scratch2) {
+    ASSERT(IntRegisterBits::is_valid(the_int_.code()));
+    ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
+    ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
+    ASSERT(SignRegisterBits::is_valid(sign_.code()));
+  }
+
+  bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
 
  private:
   Register the_int_;
@@ -336,13 +427,15 @@
   class IntRegisterBits: public BitField<int, 0, 4> {};
   class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
   class ScratchRegisterBits: public BitField<int, 8, 4> {};
+  class SignRegisterBits: public BitField<int, 12, 4> {};
 
   Major MajorKey() { return WriteInt32ToHeapNumber; }
   int MinorKey() {
     // Encode the parameters in a unique 16 bit value.
     return IntRegisterBits::encode(the_int_.code())
            | HeapNumberRegisterBits::encode(the_heap_number_.code())
-           | ScratchRegisterBits::encode(scratch_.code());
+           | ScratchRegisterBits::encode(scratch_.code())
+           | SignRegisterBits::encode(sign_.code());
   }
 
   void Generate(MacroAssembler* masm);
@@ -375,6 +468,208 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+    masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
+        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+    ASSERT(Assembler::IsBne(masm->instr_at(pos)));
+  }
+
+  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+    masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
+        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+    ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
+  }
+
+  static Mode GetMode(Code* stub) {
+    Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+                                                   2 * Assembler::kInstrSize);
+
+    if (Assembler::IsBeq(first_instruction)) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(Assembler::IsBne(first_instruction));
+
+    if (Assembler::IsBeq(second_instruction)) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(Assembler::IsBne(second_instruction));
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    MacroAssembler masm(NULL,
+                        stub->instruction_start(),
+                        stub->instruction_size());
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        PatchBranchIntoNop(&masm, 0);
+        PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, 0);
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers.  The input is
+  // two registers that must be preserved and one scratch register provided by
+  // the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      // We don't have to save scratch0_ because it was given to us as
+      // a scratch register.
+      masm->push(scratch1_);
+    }
+
+    void Restore(MacroAssembler* masm) {
+      masm->pop(scratch1_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The scratch registers
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(FPU);
+        masm->MultiPushFPU(kCallerSavedFPU);
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(FPU);
+        masm->MultiPopFPU(kCallerSavedFPU);
+      }
+      masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+
+    Register GetRegThatIsNotOneOf(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 5> {};
+  class ValueBits: public BitField<int, 5, 5> {};
+  class AddressBits: public BitField<int, 10, 5> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  Label slow_;
+  RegisterAllocation regs_;
+};
+
+
 // Enter C code from generated RegExp code in a way that allows
 // the C code to fix the return address in case of a GC.
 // Currently only needed on ARM and MIPS.
@@ -561,14 +856,13 @@
 
   void Generate(MacroAssembler* masm);
 
-  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register receiver,
-      Register properties,
-      String* name,
-      Register scratch0);
+  static void GenerateNegativeLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register receiver,
+                                     Register properties,
+                                     Handle<String> name,
+                                     Register scratch0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -578,6 +872,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -590,7 +886,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return LookupModeBits::encode(mode_);
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 4400b64..9acccdc 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,22 +30,423 @@
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "codegen.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
+
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
+  switch (type) {
+    case TranscendentalCache::SIN: return &sin;
+    case TranscendentalCache::COS: return &cos;
+    case TranscendentalCache::TAN: return &tan;
+    case TranscendentalCache::LOG: return &log;
+    default: UNIMPLEMENTED();
+  }
+  return NULL;
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+  return &sqrt;
+}
+
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : target map, scratch for subsequent call
+  //  -- t0    : scratch (elements)
+  // -----------------------------------
+  // Set transitioned map.
+  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ RecordWriteField(a2,
+                      HeapObject::kMapOffset,
+                      a3,
+                      t5,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : target map, scratch for subsequent call
+  //  -- t0    : scratch (elements)
+  // -----------------------------------
+  Label loop, entry, convert_hole, gc_required, only_change_map, done;
+  bool fpu_supported = CpuFeatures::IsSupported(FPU);
+
+  Register scratch = t6;
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+  __ Branch(&only_change_map, eq, at, Operand(t0));
+
+  __ push(ra);
+  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  // t0: source FixedArray
+  // t1: number of elements (smi-tagged)
+
+  // Allocate new FixedDoubleArray.
+  __ sll(scratch, t1, 2);
+  __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
+  __ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
+  // t2: destination FixedDoubleArray, not tagged as heap object
+  // Set destination FixedDoubleArray's length and map.
+  __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
+  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
+  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
+  // Update receiver's map.
+
+  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ RecordWriteField(a2,
+                      HeapObject::kMapOffset,
+                      a3,
+                      t5,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created FixedDoubleArray.
+  __ Addu(a3, t2, Operand(kHeapObjectTag));
+  __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ RecordWriteField(a2,
+                      JSObject::kElementsOffset,
+                      a3,
+                      t5,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+
+  // Prepare for conversion loop.
+  __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
+  __ sll(t2, t1, 2);
+  __ Addu(t2, t2, t3);
+  __ li(t0, Operand(kHoleNanLower32));
+  __ li(t1, Operand(kHoleNanUpper32));
+  // t0: kHoleNanLower32
+  // t1: kHoleNanUpper32
+  // t2: end of destination FixedDoubleArray, not tagged
+  // t3: begin of FixedDoubleArray element fields, not tagged
+
+  if (!fpu_supported) __ Push(a1, a0);
+
+  __ Branch(&entry);
+
+  __ bind(&only_change_map);
+  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ RecordWriteField(a2,
+                      HeapObject::kMapOffset,
+                      a3,
+                      t5,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ Branch(&done);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ pop(ra);
+  __ Branch(fail);
+
+  // Convert and copy elements.
+  __ bind(&loop);
+  __ lw(t5, MemOperand(a3));
+  __ Addu(a3, a3, kIntSize);
+  // t5: current element
+  __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
+
+  // Normal smi, convert to double and store.
+  if (fpu_supported) {
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(t5, f0);
+    __ cvt_d_w(f0, f0);
+    __ sdc1(f0, MemOperand(t3));
+    __ Addu(t3, t3, kDoubleSize);
+  } else {
+    FloatingPointHelper::ConvertIntToDouble(masm,
+                                            t5,
+                                            FloatingPointHelper::kCoreRegisters,
+                                            f0,
+                                            a0,
+                                            a1,
+                                            t7,
+                                            f0);
+    __ sw(a0, MemOperand(t3));  // mantissa
+    __ sw(a1, MemOperand(t3, kIntSize));  // exponent
+    __ Addu(t3, t3, kDoubleSize);
+  }
+  __ Branch(&entry);
+
+  // Hole found, store the-hole NaN.
+  __ bind(&convert_hole);
+  if (FLAG_debug_code) {
+    // Restore a "smi-untagged" heap object.
+    __ SmiTag(t5);
+    __ Or(t5, t5, Operand(1));
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Assert(eq, "object found in smi-only array", at, Operand(t5));
+  }
+  __ sw(t0, MemOperand(t3));  // mantissa
+  __ sw(t1, MemOperand(t3, kIntSize));  // exponent
+  __ Addu(t3, t3, kDoubleSize);
+
+  __ bind(&entry);
+  __ Branch(&loop, lt, t3, Operand(t2));
+
+  if (!fpu_supported) __ Pop(a1, a0);
+  __ pop(ra);
+  __ bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : target map, scratch for subsequent call
+  //  -- t0    : scratch (elements)
+  // -----------------------------------
+  Label entry, loop, convert_hole, gc_required, only_change_map;
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+  __ Branch(&only_change_map, eq, at, Operand(t0));
+
+  __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
+
+  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  // t0: source FixedArray
+  // t1: number of elements (smi-tagged)
+
+  // Allocate new FixedArray.
+  __ sll(a0, t1, 1);
+  __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
+  __ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
+  // t2: destination FixedArray, not tagged as heap object
+  // Set destination FixedDoubleArray's length and map.
+  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
+  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
+  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
+
+  // Prepare for conversion loop.
+  __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+  __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
+  __ Addu(t2, t2, Operand(kHeapObjectTag));
+  __ sll(t1, t1, 1);
+  __ Addu(t1, a3, t1);
+  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
+  // Using offsetted addresses.
+  // a3: begin of destination FixedArray element fields, not tagged
+  // t0: begin of source FixedDoubleArray element fields, not tagged, +4
+  // t1: end of destination FixedArray, not tagged
+  // t2: destination FixedArray
+  // t3: the-hole pointer
+  // t5: heap number map
+  __ Branch(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
+
+  __ Branch(fail);
+
+  __ bind(&loop);
+  __ lw(a1, MemOperand(t0));
+  __ Addu(t0, t0, kDoubleSize);
+  // a1: current element's upper 32 bit
+  // t0: address of next element's upper 32 bit
+  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
+
+  // Non-hole double, copy value into a heap number.
+  __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
+  // a2: new heap number
+  __ lw(a0, MemOperand(t0, -12));
+  __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
+  __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
+  __ mov(a0, a3);
+  __ sw(a2, MemOperand(a3));
+  __ Addu(a3, a3, kIntSize);
+  __ RecordWrite(t2,
+                 a0,
+                 a2,
+                 kRAHasBeenSaved,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ Branch(&entry);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ sw(t3, MemOperand(a3));
+  __ Addu(a3, a3, kIntSize);
+
+  __ bind(&entry);
+  __ Branch(&loop, lt, a3, Operand(t1));
+
+  __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ RecordWriteField(a2,
+                      JSObject::kElementsOffset,
+                      t2,
+                      t5,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(ra);
+
+  __ bind(&only_change_map);
+  // Update receiver's map.
+  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ RecordWriteField(a2,
+                      HeapObject::kMapOffset,
+                      a3,
+                      t5,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ And(at, result, Operand(kIsIndirectStringMask));
+  __ Branch(&check_sequential, eq, at, Operand(zero_reg));
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ And(at, result, Operand(kSlicedNotConsMask));
+  __ Branch(&cons_string, eq, at, Operand(zero_reg));
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ sra(at, result, kSmiTagSize);
+  __ Addu(index, index, at);
+  __ jmp(&indirect_string_loaded);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ LoadRoot(at, Heap::kEmptyStringRootIndex);
+  __ Branch(call_runtime, ne, result, Operand(at));
+  // Get the first of the two strings and load its instance type.
+  __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label external_string, check_encoding;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(at, result, Operand(kStringRepresentationMask));
+  __ Branch(&external_string, ne, at, Operand(zero_reg));
+
+  // Prepare sequential strings
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ Addu(string,
+          string,
+          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+  __ jmp(&check_encoding);
+
+  // Handle external strings.
+  __ bind(&external_string);
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ And(at, result, Operand(kIsIndirectStringMask));
+    __ Assert(eq, "external string expected, but not found",
+        at, Operand(zero_reg));
+  }
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ And(at, result, Operand(kShortExternalStringMask));
+  __ Branch(call_runtime, ne, at, Operand(zero_reg));
+  __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+  Label ascii, done;
+  __ bind(&check_encoding);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ And(at, result, Operand(kStringEncodingMask));
+  __ Branch(&ascii, ne, at, Operand(zero_reg));
+  // Two-byte string.
+  __ sll(at, index, 1);
+  __ Addu(at, string, at);
+  __ lhu(result, MemOperand(at));
+  __ jmp(&done);
+  __ bind(&ascii);
+  // Ascii string.
+  __ Addu(at, string, index);
+  __ lbu(result, MemOperand(at));
+  __ bind(&done);
+}
+
+#undef __
 
 } }  // namespace v8::internal
 
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index a8de9c8..e704c4f 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -31,7 +31,6 @@
 
 
 #include "ast.h"
-#include "code-stubs-mips.h"
 #include "ic-inl.h"
 
 namespace v8 {
@@ -71,26 +70,26 @@
                               int pos,
                               bool right_here = false);
 
-  // Constants related to patching of inlined load/store.
-  static int GetInlinedKeyedLoadInstructionsAfterPatch() {
-    // This is in correlation with the padding in MacroAssembler::Abort.
-    return FLAG_debug_code ? 45 : 20;
-  }
-
-  static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
-
-  static int GetInlinedNamedStoreInstructionsAfterPatch() {
-    ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
-    // Magic number 5: instruction count after patched map load:
-    //  li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
-    return Isolate::Current()->inlined_write_barrier_size() + 5;
-  }
-
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
 
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index d0a7af5..7d654f6 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -302,7 +302,7 @@
           return kRegisterType;
       };
       break;
-    // 16 bits Immediate type instructions. eg: addi dest, src, imm16.
+    // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
     case REGIMM:
     case BEQ:
     case BNE:
@@ -337,7 +337,7 @@
     case SWC1:
     case SDC1:
       return kImmediateType;
-    // 26 bits immediate type instructions. eg: j imm26.
+    // 26 bits immediate type instructions. e.g.: j imm26.
     case J:
     case JAL:
       return kJumpType;
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index d76ae59..fd04722 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -39,24 +39,33 @@
 
 #define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
 
+enum ArchVariants {
+  kMips32r2,
+  kMips32r1,
+  kLoongson
+};
 
 #ifdef _MIPS_ARCH_MIPS32R2
-  #define mips32r2 1
+  static const ArchVariants kArchVariant = kMips32r2;
+#elif _MIPS_ARCH_LOONGSON
+// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
+// which predates (and is a subset of) the mips32r2 and r1 architectures.
+  static const ArchVariants kArchVariant = kLoongson;
 #else
-  #define mips32r2 0
+  static const ArchVariants kArchVariant = kMips32r1;
 #endif
 
 
 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
 // Use floating-point coprocessor instructions. This flag is raised when
 // -mhard-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = false;
+const bool IsMipsSoftFloatABI = false;
 #elif(defined(__mips_soft_float) && __mips_soft_float != 0)
 // Not using floating-point coprocessor instructions. This flag is raised when
 // -msoft-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = true;
+const bool IsMipsSoftFloatABI = true;
 #else
-static const bool IsMipsSoftFloatABI = true;
+const bool IsMipsSoftFloatABI = true;
 #endif
 
 
@@ -74,46 +83,45 @@
 // Registers and FPURegisters.
 
 // Number of general purpose registers.
-static const int kNumRegisters = 32;
-static const int kInvalidRegister = -1;
+const int kNumRegisters = 32;
+const int kInvalidRegister = -1;
 
 // Number of registers with HI, LO, and pc.
-static const int kNumSimuRegisters = 35;
+const int kNumSimuRegisters = 35;
 
 // In the simulator, the PC register is simulated as the 34th register.
-static const int kPCRegister = 34;
+const int kPCRegister = 34;
 
 // Number coprocessor registers.
-static const int kNumFPURegisters = 32;
-static const int kInvalidFPURegister = -1;
+const int kNumFPURegisters = 32;
+const int kInvalidFPURegister = -1;
 
 // FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
-static const int kFCSRRegister = 31;
-static const int kInvalidFPUControlRegister = -1;
-static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
 
 // FCSR constants.
-static const uint32_t kFCSRInexactFlagBit = 2;
-static const uint32_t kFCSRUnderflowFlagBit = 3;
-static const uint32_t kFCSROverflowFlagBit = 4;
-static const uint32_t kFCSRDivideByZeroFlagBit = 5;
-static const uint32_t kFCSRInvalidOpFlagBit = 6;
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
 
-static const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
-static const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
-static const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
-static const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
-static const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
 
-static const uint32_t kFCSRFlagMask =
+const uint32_t kFCSRFlagMask =
     kFCSRInexactFlagMask |
     kFCSRUnderflowFlagMask |
     kFCSROverflowFlagMask |
     kFCSRDivideByZeroFlagMask |
     kFCSRInvalidOpFlagMask;
 
-static const uint32_t kFCSRExceptionFlagMask =
-    kFCSRFlagMask ^ kFCSRInexactFlagMask;
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
 
 // Helper functions for converting between register numbers and names.
 class Registers {
@@ -126,7 +134,7 @@
 
   struct RegisterAlias {
     int reg;
-    const char *name;
+    const char* name;
   };
 
   static const int32_t kMaxValue = 0x7fffffff;
@@ -148,7 +156,7 @@
 
   struct RegisterAlias {
     int creg;
-    const char *name;
+    const char* name;
   };
 
  private:
@@ -177,67 +185,66 @@
 //   instructions (see Assembler::stop()).
 // - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
 //   debugger.
-static const uint32_t kMaxWatchpointCode = 31;
-static const uint32_t kMaxStopCode = 127;
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
 STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
 
 
 // ----- Fields offset and length.
-static const int kOpcodeShift   = 26;
-static const int kOpcodeBits    = 6;
-static const int kRsShift       = 21;
-static const int kRsBits        = 5;
-static const int kRtShift       = 16;
-static const int kRtBits        = 5;
-static const int kRdShift       = 11;
-static const int kRdBits        = 5;
-static const int kSaShift       = 6;
-static const int kSaBits        = 5;
-static const int kFunctionShift = 0;
-static const int kFunctionBits  = 6;
-static const int kLuiShift      = 16;
+const int kOpcodeShift   = 26;
+const int kOpcodeBits    = 6;
+const int kRsShift       = 21;
+const int kRsBits        = 5;
+const int kRtShift       = 16;
+const int kRtBits        = 5;
+const int kRdShift       = 11;
+const int kRdBits        = 5;
+const int kSaShift       = 6;
+const int kSaBits        = 5;
+const int kFunctionShift = 0;
+const int kFunctionBits  = 6;
+const int kLuiShift      = 16;
 
-static const int kImm16Shift = 0;
-static const int kImm16Bits  = 16;
-static const int kImm26Shift = 0;
-static const int kImm26Bits  = 26;
-static const int kImm28Shift = 0;
-static const int kImm28Bits  = 28;
+const int kImm16Shift = 0;
+const int kImm16Bits  = 16;
+const int kImm26Shift = 0;
+const int kImm26Bits  = 26;
+const int kImm28Shift = 0;
+const int kImm28Bits  = 28;
 
 // In branches and jumps immediate fields point to words, not bytes,
 // and are therefore shifted by 2.
-static const int kImmFieldShift = 2;
+const int kImmFieldShift = 2;
 
-static const int kFsShift       = 11;
-static const int kFsBits        = 5;
-static const int kFtShift       = 16;
-static const int kFtBits        = 5;
-static const int kFdShift       = 6;
-static const int kFdBits        = 5;
-static const int kFCccShift     = 8;
-static const int kFCccBits      = 3;
-static const int kFBccShift     = 18;
-static const int kFBccBits      = 3;
-static const int kFBtrueShift   = 16;
-static const int kFBtrueBits    = 1;
+const int kFsShift       = 11;
+const int kFsBits        = 5;
+const int kFtShift       = 16;
+const int kFtBits        = 5;
+const int kFdShift       = 6;
+const int kFdBits        = 5;
+const int kFCccShift     = 8;
+const int kFCccBits      = 3;
+const int kFBccShift     = 18;
+const int kFBccBits      = 3;
+const int kFBtrueShift   = 16;
+const int kFBtrueBits    = 1;
 
 // ----- Miscellaneous useful masks.
 // Instruction bit masks.
-static const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
-static const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
-static const int  kImm26Mask    = ((1 << kImm26Bits) - 1) << kImm26Shift;
-static const int  kImm28Mask    = ((1 << kImm28Bits) - 1) << kImm28Shift;
-static const int  kRsFieldMask  = ((1 << kRsBits) - 1) << kRsShift;
-static const int  kRtFieldMask  = ((1 << kRtBits) - 1) << kRtShift;
-static const int  kRdFieldMask  = ((1 << kRdBits) - 1) << kRdShift;
-static const int  kSaFieldMask  = ((1 << kSaBits) - 1) << kSaShift;
-static const int  kFunctionFieldMask =
-    ((1 << kFunctionBits) - 1) << kFunctionShift;
+const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int  kImm26Mask    = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int  kImm28Mask    = ((1 << kImm28Bits) - 1) << kImm28Shift;
+const int  kRsFieldMask  = ((1 << kRsBits) - 1) << kRsShift;
+const int  kRtFieldMask  = ((1 << kRtBits) - 1) << kRtShift;
+const int  kRdFieldMask  = ((1 << kRdBits) - 1) << kRdShift;
+const int  kSaFieldMask  = ((1 << kSaBits) - 1) << kSaShift;
+const int  kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
 // Misc masks.
-static const int  kHiMask       =   0xffff << 16;
-static const int  kLoMask       =   0xffff;
-static const int  kSignMask     =   0x80000000;
-static const int  kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
+const int  kHiMask       =   0xffff << 16;
+const int  kLoMask       =   0xffff;
+const int  kSignMask     =   0x80000000;
+const int  kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
 
 // ----- MIPS Opcodes and Function Fields.
 // We use this presentation to stay close to the table representation in
@@ -529,7 +536,7 @@
   kRoundToMinusInf = RM
 };
 
-static const uint32_t kFPURoundingModeMask = 3 << 0;
+const uint32_t kFPURoundingModeMask = 3 << 0;
 
 enum CheckForInexactConversion {
   kCheckForInexactConversion,
@@ -749,7 +756,7 @@
 
   // Say if the instruction should not be used in a branch delay slot.
   bool IsForbiddenInBranchDelay() const;
-  // Say if the instruction 'links'. eg: jal, bal.
+  // Say if the instruction 'links'. e.g. jal, bal.
   bool IsLinkingInstruction() const;
   // Say if the instruction is a break or a trap.
   bool IsTrap() const;
@@ -772,18 +779,18 @@
 // MIPS assembly various constants.
 
 // C/C++ argument slots size.
-static const int kCArgSlotCount = 4;
-static const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
+const int kCArgSlotCount = 4;
+const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
 // JS argument slots size.
-static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
+const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
 // Assembly builtins argument slots size.
-static const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
+const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
 
-static const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
 
-static const int kDoubleAlignmentBits = 3;
-static const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
-static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
+const int kDoubleAlignmentBits = 3;
+const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
+const int kDoubleAlignmentMask = kDoubleAlignment - 1;
 
 
 } }   // namespace v8::internal
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index 26e95fb..93ebeda 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,7 +47,7 @@
 namespace internal {
 
 
-void CPU::Setup() {
+void CPU::SetUp() {
   CpuFeatures::Probe();
 }
 
@@ -64,15 +64,19 @@
   }
 
 #if !defined (USE_SIMULATOR)
+#if defined(ANDROID)
+  // Bionic cacheflush can typically run in userland, avoiding kernel call.
+  char *end = reinterpret_cast<char *>(start) + size;
+  cacheflush(
+    reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
+#else  // ANDROID
   int res;
-
   // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
   res = syscall(__NR_cacheflush, start, size, ICACHE);
-
   if (res) {
     V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
   }
-
+#endif  // ANDROID
 #else  // USE_SIMULATOR.
   // Not generating mips instructions for C-code. This means that we are
   // building a mips emulator based target.  We should notify the simulator
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index e323c50..83f5f50 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -124,55 +124,58 @@
 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
                                           RegList object_regs,
                                           RegList non_object_regs) {
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as a smi causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  if ((object_regs | non_object_regs) != 0) {
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        if (FLAG_debug_code) {
-          __ And(at, reg, 0xc0000000);
-          __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    if ((object_regs | non_object_regs) != 0) {
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          if (FLAG_debug_code) {
+            __ And(at, reg, 0xc0000000);
+            __ Assert(
+                eq, "Unable to encode value as smi", at, Operand(zero_reg));
+          }
+          __ sll(reg, reg, kSmiTagSize);
         }
-        __ sll(reg, reg, kSmiTagSize);
       }
+      __ MultiPush(object_regs | non_object_regs);
     }
-    __ MultiPush(object_regs | non_object_regs);
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ mov(a0, zero_reg);  // No arguments.
-  __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+    __ PrepareCEntryArgs(0);  // No arguments.
+    __ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values from the expression stack.
-  if ((object_regs | non_object_regs) != 0) {
-    __ MultiPop(object_regs | non_object_regs);
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        __ srl(reg, reg, kSmiTagSize);
-      }
-      if (FLAG_debug_code &&
-          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
-        __ li(reg, kDebugZapValue);
+    // Restore the register values from the expression stack.
+    if ((object_regs | non_object_regs) != 0) {
+      __ MultiPop(object_regs | non_object_regs);
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          __ srl(reg, reg, kSmiTagSize);
+        }
+        if (FLAG_debug_code &&
+            (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+          __ li(reg, kDebugZapValue);
+        }
       }
     }
-  }
 
-  __ LeaveInternalFrame();
+    // Leave the internal frame.
+  }
 
   // Now that the break point has been handled, resume normal execution by
   // jumping to the target address intended by the caller and that was
@@ -240,14 +243,6 @@
 }
 
 
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
-  // Calling convention for construct call (from builtins-mips.cc).
-  //  -- a0     : number of arguments (not smi)
-  //  -- a1     : constructor function
-  Generate_DebugBreakCallHelper(masm, a1.bit(), a0.bit());
-}
-
-
 void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
   // In places other than IC call sites it is expected that v0 is TOS which
   // is an object - this is not generally the case so this should be used with
@@ -256,11 +251,43 @@
 }
 
 
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+  // Register state for CallFunctionStub (from code-stubs-mips.cc).
   // ----------- S t a t e -------------
-  //  No registers used on entry.
+  //  -- a1 : function
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, 0, 0);
+  Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
+}
+
+
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+  // Register state for CallFunctionStub (from code-stubs-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a1 : function
+  //  -- a2 : cache cell for call target
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+  // Calling convention for CallConstructStub (from code-stubs-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments (not smi)
+  //  -- a1     : constructor function
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit());
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+  // Calling convention for CallConstructStub (from code-stubs-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments (not smi)
+  //  -- a1     : constructor function
+  //  -- a2     : cache cell for call target
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
 }
 
 
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 18b6231..51c2e46 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -32,65 +32,961 @@
 #include "full-codegen.h"
 #include "safepoint-table.h"
 
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
 namespace v8 {
 namespace internal {
 
 
-const int Deoptimizer::table_entry_size_ = 10;
-
-
 int Deoptimizer::patch_size() {
-  const int kCallInstructionSizeInWords = 3;
+  const int kCallInstructionSizeInWords = 4;
   return kCallInstructionSizeInWords * Assembler::kInstrSize;
 }
 
 
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
-  UNIMPLEMENTED();
+  HandleScope scope;
+  AssertNoAllocation no_allocation;
+
+  if (!function->IsOptimized()) return;
+
+  // Get the optimized code.
+  Code* code = function->code();
+  Address code_start_address = code->instruction_start();
+
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  // For each LLazyBailout instruction insert a call to the corresponding
+  // deoptimization entry.
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+#ifdef DEBUG
+  Address prev_call_address = NULL;
+#endif
+  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+    if (deopt_data->Pc(i)->value() == -1) continue;
+    Address call_address = code_start_address + deopt_data->Pc(i)->value();
+    Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+    int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
+                                                      RelocInfo::NONE);
+    int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+    ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
+    ASSERT(call_size_in_bytes <= patch_size());
+    CodePatcher patcher(call_address, call_size_in_words);
+    patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+    ASSERT(prev_call_address == NULL ||
+           call_address >= prev_call_address + patch_size());
+    ASSERT(call_address + patch_size() <= code->instruction_end());
+
+#ifdef DEBUG
+    prev_call_address = call_address;
+#endif
+  }
+
+  Isolate* isolate = code->GetIsolate();
+
+  // Add the deoptimizing code to the list.
+  DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+  DeoptimizerData* data = isolate->deoptimizer_data();
+  node->set_next(data->deoptimizing_code_list_);
+  data->deoptimizing_code_list_ = node;
+
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
+  // Set the code for the function to non-optimized version.
+  function->ReplaceCode(function->shared()->code());
+
+  if (FLAG_trace_deopt) {
+    PrintF("[forced deoptimization: ");
+    function->PrintName();
+    PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+    if (FLAG_print_code) {
+      code->PrintLn();
+    }
+#endif
+  }
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
-  UNIMPLEMENTED();
+  const int kInstrSize = Assembler::kInstrSize;
+  // This structure comes from FullCodeGenerator::EmitStackCheck.
+  // The call of the stack guard check has the following form:
+  // sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
+  // beq at, zero_reg, ok
+  // lui t9, <stack guard address> upper
+  // ori t9, <stack guard address> lower
+  // jalr t9
+  // nop
+  // ----- pc_after points here
+
+  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+
+  // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
+  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+  patcher.masm()->addiu(at, zero_reg, 1);
+
+  // Replace the stack check address in the load-immediate (lui/ori pair)
+  // with the entry address of the replacement code.
+  ASSERT(reinterpret_cast<uint32_t>(
+      Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+      reinterpret_cast<uint32_t>(check_code->entry()));
+  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+                                   replacement_code->entry());
+
+  // We patched the code to the following form:
+  // addiu at, zero_reg, 1
+  // beq at, zero_reg, ok  ;; Not changed
+  // lui t9, <on-stack replacement address> upper
+  // ori t9, <on-stack replacement address> lower
+  // jalr t9  ;; Not changed
+  // nop  ;; Not changed
+  // ----- pc_after points here
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+                                         Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
-  UNIMPLEMENTED();
+  // Exact opposite of the function above.
+  const int kInstrSize = Assembler::kInstrSize;
+  ASSERT(Assembler::IsAddImmediate(
+      Assembler::instr_at(pc_after - 6 * kInstrSize)));
+  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+
+  // Restore the sltu instruction so beq can be taken again.
+  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+  if (FLAG_count_based_interrupts) {
+    patcher.masm()->slt(at, a3, zero_reg);
+  } else {
+    patcher.masm()->sltu(at, sp, t0);
+  }
+
+  // Replace the on-stack replacement address in the load-immediate (lui/ori
+  // pair) with the entry address of the normal stack-check code.
+  ASSERT(reinterpret_cast<uint32_t>(
+      Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+      reinterpret_cast<uint32_t>(replacement_code->entry()));
+  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+                                   check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_after - 4 * kInstrSize, check_code);
+}
+
+
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+  ByteArray* translations = data->TranslationByteArray();
+  int length = data->DeoptCount();
+  for (int i = 0; i < length; i++) {
+    if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+      TranslationIterator it(translations,  data->TranslationIndex(i)->value());
+      int value = it.Next();
+      ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+      // Read the number of frames.
+      value = it.Next();
+      if (value == 1) return i;
+    }
+  }
+  UNREACHABLE();
+  return -1;
 }
 
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
-  UNIMPLEMENTED();
+  DeoptimizationInputData* data = DeoptimizationInputData::cast(
+      optimized_code_->deoptimization_data());
+  unsigned ast_id = data->OsrAstId()->value();
+
+  int bailout_id = LookupBailoutId(data, ast_id);
+  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+  ByteArray* translations = data->TranslationByteArray();
+
+  TranslationIterator iterator(translations, translation_index);
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator.Next());
+  ASSERT(Translation::BEGIN == opcode);
+  USE(opcode);
+  int count = iterator.Next();
+  iterator.Skip(1);  // Drop JS frame count.
+  ASSERT(count == 1);
+  USE(count);
+
+  opcode = static_cast<Translation::Opcode>(iterator.Next());
+  USE(opcode);
+  ASSERT(Translation::JS_FRAME == opcode);
+  unsigned node_id = iterator.Next();
+  USE(node_id);
+  ASSERT(node_id == ast_id);
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+  USE(function);
+  ASSERT(function == function_);
+  unsigned height = iterator.Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  USE(height_in_bytes);
+
+  unsigned fixed_size = ComputeFixedSize(function_);
+  unsigned input_frame_size = input_->GetFrameSize();
+  ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+  unsigned outgoing_size = outgoing_height * kPointerSize;
+  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+  ASSERT(outgoing_size == 0);  // OSR does not happen in the middle of a call.
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
+    PrintF(" => node=%u, frame=%d->%d]\n",
+           ast_id,
+           input_frame_size,
+           output_frame_size);
+  }
+
+  // There's only one output frame in the OSR case.
+  output_count_ = 1;
+  output_ = new FrameDescription*[1];
+  output_[0] = new(output_frame_size) FrameDescription(
+      output_frame_size, function_);
+  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
+
+  // Clear the incoming parameters in the optimized frame to avoid
+  // confusing the garbage collector.
+  unsigned output_offset = output_frame_size - kPointerSize;
+  int parameter_count = function_->shared()->formal_parameter_count() + 1;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_[0]->SetFrameSlot(output_offset, 0);
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the incoming parameters. This may overwrite some of the
+  // incoming argument slots we've just cleared.
+  int input_offset = input_frame_size - kPointerSize;
+  bool ok = true;
+  int limit = input_offset - (parameter_count * kPointerSize);
+  while (ok && input_offset > limit) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Set them up explicitly.
+  for (int i =  StandardFrameConstants::kCallerPCOffset;
+       ok && i >=  StandardFrameConstants::kMarkerOffset;
+       i -= kPointerSize) {
+    uint32_t input_value = input_->GetFrameSlot(input_offset);
+    if (FLAG_trace_osr) {
+      const char* name = "UNKNOWN";
+      switch (i) {
+        case StandardFrameConstants::kCallerPCOffset:
+          name = "caller's pc";
+          break;
+        case StandardFrameConstants::kCallerFPOffset:
+          name = "fp";
+          break;
+        case StandardFrameConstants::kContextOffset:
+          name = "context";
+          break;
+        case StandardFrameConstants::kMarkerOffset:
+          name = "function";
+          break;
+      }
+      PrintF("    [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
+             output_offset,
+             input_value,
+             input_offset,
+             name);
+    }
+
+    output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+    input_offset -= kPointerSize;
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the rest of the frame.
+  while (ok && input_offset >= 0) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // If translation of any command failed, continue using the input frame.
+  if (!ok) {
+    delete output_[0];
+    output_[0] = input_;
+    output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
+  } else {
+    // Set up the frame pointer and the context pointer.
+    output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
+    output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
+
+    unsigned pc_offset = data->OsrPcOffset()->value();
+    uint32_t pc = reinterpret_cast<uint32_t>(
+        optimized_code_->entry() + pc_offset);
+    output_[0]->SetPc(pc);
+  }
+  Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
+  output_[0]->SetContinuation(
+      reinterpret_cast<uint32_t>(continuation->entry()));
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+           ok ? "finished" : "aborted",
+           reinterpret_cast<intptr_t>(function));
+    function->PrintName();
+    PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
+  }
 }
 
 
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
-                                 int frame_index) {
-  UNIMPLEMENTED();
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+                                                 int frame_index) {
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating arguments adaptor => height=%d\n", height_in_bytes);
+  }
+
+  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+  // Arguments adaptor can not be topmost or bottommost.
+  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address of the frame is computed from the previous
+  // frame's top and this frame's size.
+  uint32_t top_address;
+  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = height;
+  unsigned output_offset = output_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+
+  // Read caller's PC from the previous frame.
+  output_offset -= kPointerSize;
+  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+  output_frame->SetFrameSlot(output_offset, callers_pc);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, callers_pc);
+  }
+
+  // Read caller's FP from the previous frame, and set this frame's FP.
+  output_offset -= kPointerSize;
+  intptr_t value = output_[frame_index - 1]->GetFp();
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  output_frame->SetFp(fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // A marker value is used in place of the context.
+  output_offset -= kPointerSize;
+  intptr_t context = reinterpret_cast<intptr_t>(
+      Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  output_frame->SetFrameSlot(output_offset, context);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
+           top_address + output_offset, output_offset, context);
+  }
+
+  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(function);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Number of incoming arguments.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+           top_address + output_offset, output_offset, value, height - 1);
+  }
+
+  ASSERT(0 == output_offset);
+
+  Builtins* builtins = isolate_->builtins();
+  Code* adaptor_trampoline =
+      builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+  uint32_t pc = reinterpret_cast<uint32_t>(
+      adaptor_trampoline->instruction_start() +
+      isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+  output_frame->SetPc(pc);
 }
 
 
+void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
+                                              int frame_index) {
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating construct stub => height=%d\n", height_in_bytes);
+  }
+
+  unsigned fixed_frame_size = 7 * kPointerSize;
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+  output_frame->SetFrameType(StackFrame::CONSTRUCT);
+
+  // Construct stub can not be topmost or bottommost.
+  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address of the frame is computed from the previous
+  // frame's top and this frame's size.
+  uint32_t top_address;
+  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = height;
+  unsigned output_offset = output_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+
+  // Read caller's PC from the previous frame.
+  output_offset -= kPointerSize;
+  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+  output_frame->SetFrameSlot(output_offset, callers_pc);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, callers_pc);
+  }
+
+  // Read caller's FP from the previous frame, and set this frame's FP.
+  output_offset -= kPointerSize;
+  intptr_t value = output_[frame_index - 1]->GetFp();
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  output_frame->SetFp(fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // The context can be gotten from the previous frame.
+  output_offset -= kPointerSize;
+  value = output_[frame_index - 1]->GetContext();
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // A marker value is used in place of the function.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Number of incoming arguments.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+           top_address + output_offset, output_offset, value, height - 1);
+  }
+
+  // Constructor function being invoked by the stub.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(function);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The newly allocated object was passed as receiver in the artificial
+  // constructor stub environment created by HEnvironment::CopyForInlining().
+  output_offset -= kPointerSize;
+  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  ASSERT(0 == output_offset);
+
+  Builtins* builtins = isolate_->builtins();
+  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+  uint32_t pc = reinterpret_cast<uint32_t>(
+      construct_stub->instruction_start() +
+      isolate_->heap()->construct_stub_deopt_pc_offset()->value());
+  output_frame->SetPc(pc);
+}
+
+
+// This code is very similar to ia32/arm code, but relies on register names
+// (fp, sp) and how the frame is laid out.
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+                                   int frame_index) {
+  // Read the ast node id, function, and frame height for this output frame.
+  int node_id = iterator->Next();
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating ");
+    function->PrintName();
+    PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+  }
+
+  // The 'fixed' part of the frame consists of the incoming parameters and
+  // the part described by JavaScriptFrameConstants.
+  unsigned fixed_frame_size = ComputeFixedSize(function);
+  unsigned input_frame_size = input_->GetFrameSize();
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
+
+  bool is_bottommost = (0 == frame_index);
+  bool is_topmost = (output_count_ - 1 == frame_index);
+  ASSERT(frame_index >= 0 && frame_index < output_count_);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address for the bottommost output frame can be computed from
+  // the input frame pointer and the output frame's height.  For all
+  // subsequent output frames, it can be computed from the previous one's
+  // top address and the current frame's size.
+  uint32_t top_address;
+  if (is_bottommost) {
+    // 2 = context and function in the frame.
+    top_address =
+        input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
+  } else {
+    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  }
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = function->shared()->formal_parameter_count() + 1;
+  unsigned output_offset = output_frame_size;
+  unsigned input_offset = input_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  input_offset -= (parameter_count * kPointerSize);
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Synthesize their values and set them up
+  // explicitly.
+  //
+  // The caller's pc for the bottommost output frame is the same as in the
+  // input frame.  For all subsequent output frames, it can be read from the
+  // previous one.  This frame's pc can be computed from the non-optimized
+  // function code and AST id of the bailout.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  intptr_t value;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetPc();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The caller's frame pointer for the bottommost output frame is the same
+  // as in the input frame.  For all subsequent output frames, it can be
+  // read from the previous one.  Also compute and set this frame's frame
+  // pointer.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetFp();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
+  output_frame->SetFp(fp_value);
+  if (is_topmost) {
+    output_frame->SetRegister(fp.code(), fp_value);
+  }
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // For the bottommost output frame the context can be gotten from the input
+  // frame. For all subsequent output frames it can be gotten from the function
+  // so long as we don't inline functions that need local contexts.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = reinterpret_cast<intptr_t>(function->context());
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  output_frame->SetContext(value);
+  if (is_topmost) output_frame->SetRegister(cp.code(), value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The function was mentioned explicitly in the BEGIN_FRAME.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function);
+  // The function for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Translate the rest of the frame.
+  for (unsigned i = 0; i < height; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  ASSERT(0 == output_offset);
+
+  // Compute this frame's PC, state, and continuation.
+  Code* non_optimized_code = function->shared()->code();
+  FixedArray* raw_data = non_optimized_code->deoptimization_data();
+  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+  Address start = non_optimized_code->instruction_start();
+  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
+  output_frame->SetPc(pc_value);
+
+  FullCodeGenerator::State state =
+      FullCodeGenerator::StateField::decode(pc_and_state);
+  output_frame->SetState(Smi::FromInt(state));
+
+
+  // Set the continuation for the topmost frame.
+  if (is_topmost && bailout_type_ != DEBUGGER) {
+    Builtins* builtins = isolate_->builtins();
+    Code* continuation = (bailout_type_ == EAGER)
+        ? builtins->builtin(Builtins::kNotifyDeoptimized)
+        : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+    output_frame->SetContinuation(
+        reinterpret_cast<uint32_t>(continuation->entry()));
+  }
+}
+
 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  UNIMPLEMENTED();
+  // Set the register values. The values are not important as there are no
+  // callee saved registers in JavaScript frames, so all registers are
+  // spilled. Registers fp and sp are set to the correct values though.
+
+  for (int i = 0; i < Register::kNumRegisters; i++) {
+    input_->SetRegister(i, i * 4);
+  }
+  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+    input_->SetDoubleRegister(i, 0.0);
+  }
+
+  // Fill the frame content from the actual data on the frame.
+  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+  }
 }
 
 
+#define __ masm()->
+
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
 void Deoptimizer::EntryGenerator::Generate() {
-  UNIMPLEMENTED();
+  GeneratePrologue();
+
+  Isolate* isolate = masm()->isolate();
+
+  CpuFeatures::Scope scope(FPU);
+  // Unlike on ARM we don't save all the registers, just the useful ones.
+  // For the rest, there are gaps on the stack, so the offsets remain the same.
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+  RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+  const int kDoubleRegsSize =
+      kDoubleSize * FPURegister::kNumAllocatableRegisters;
+
+  // Save all FPU registers before messing with them.
+  __ Subu(sp, sp, Operand(kDoubleRegsSize));
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+    FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ sdc1(fpu_reg, MemOperand(sp, offset));
+  }
+
+  // Push saved_regs (needed to populate FrameDescription::registers_).
+  // Leave gaps for other registers.
+  __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
+  for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+    if ((saved_regs & (1 << i)) != 0) {
+      __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
+    }
+  }
+
+  const int kSavedRegistersAreaSize =
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
+
+  // Get the address of the location in the code object if possible (a3) (return
+  // address for lazy deoptimization) and compute the fp-to-sp delta in
+  // register t0.
+  if (type() == EAGER) {
+    __ mov(a3, zero_reg);
+    // Correct one word for bailout id.
+    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else if (type() == OSR) {
+    __ mov(a3, ra);
+    // Correct one word for bailout id.
+    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else {
+    __ mov(a3, ra);
+    // Correct two words for bailout id and return address.
+    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+  }
+
+  __ Subu(t0, fp, t0);
+
+  // Allocate a new deoptimizer object.
+  // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+  __ PrepareCallCFunction(6, t1);
+  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ li(a1, Operand(type()));  // bailout type,
+  // a2: bailout id already loaded.
+  // a3: code address or 0 already loaded.
+  __ sw(t0, CFunctionArgumentOperand(5));  // Fp-to-sp delta.
+  __ li(t1, Operand(ExternalReference::isolate_address()));
+  __ sw(t1, CFunctionArgumentOperand(6));  // Isolate.
+  // Call Deoptimizer::New().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
+
+  // Preserve "deoptimizer" object in register v0 and get the input
+  // frame descriptor pointer to a1 (deoptimizer->input_);
+  // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+  __ mov(a0, v0);
+  __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+  // Copy core registers into FrameDescription::registers_[kNumRegisters].
+  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    if ((saved_regs & (1 << i)) != 0) {
+      __ lw(a2, MemOperand(sp, i * kPointerSize));
+      __ sw(a2, MemOperand(a1, offset));
+    } else if (FLAG_debug_code) {
+      __ li(a2, kDebugZapValue);
+      __ sw(a2, MemOperand(a1, offset));
+    }
+  }
+
+  // Copy FPU registers to
+  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ ldc1(f0, MemOperand(sp, src_offset));
+    __ sdc1(f0, MemOperand(a1, dst_offset));
+  }
+
+  // Remove the bailout id, eventually return address, and the saved registers
+  // from the stack.
+  if (type() == EAGER || type() == OSR) {
+    __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else {
+    __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+  }
+
+  // Compute a pointer to the unwinding limit in register a2; that is
+  // the first stack slot not part of the input frame.
+  __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+  __ Addu(a2, a2, sp);
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  __ bind(&pop_loop);
+  __ pop(t0);
+  __ sw(t0, MemOperand(a3, 0));
+  __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
+  __ addiu(a3, a3, sizeof(uint32_t));  // In delay slot.
+
+  // Compute the output frame in the deoptimizer.
+  __ push(a0);  // Preserve deoptimizer object across call.
+  // a0: deoptimizer object; a1: scratch.
+  __ PrepareCallCFunction(1, a1);
+  // Call Deoptimizer::ComputeOutputFrames().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 1);
+  }
+  __ pop(a0);  // Restore deoptimizer object (class Deoptimizer).
+
+  // Replace the current (input) frame with the output frames.
+  Label outer_push_loop, inner_push_loop;
+  // Outer loop state: a0 = current "FrameDescription** output_",
+  // a1 = one past the last FrameDescription**.
+  __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+  __ lw(a0, MemOperand(a0, Deoptimizer::output_offset()));  // a0 is output_.
+  __ sll(a1, a1, kPointerSizeLog2);  // Count to offset.
+  __ addu(a1, a0, a1);  // a1 = one past the last FrameDescription**.
+  __ bind(&outer_push_loop);
+  // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+  __ lw(a2, MemOperand(a0, 0));  // output_[ix]
+  __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+  __ bind(&inner_push_loop);
+  __ Subu(a3, a3, Operand(sizeof(uint32_t)));
+  __ Addu(t2, a2, Operand(a3));
+  __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
+  __ push(t3);
+  __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+  __ Addu(a0, a0, Operand(kPointerSize));
+  __ Branch(&outer_push_loop, lt, a0, Operand(a1));
+
+
+  // Push state, pc, and continuation from the last output frame.
+  if (type() != OSR) {
+    __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
+    __ push(t2);
+  }
+
+  __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
+  __ push(t2);
+  __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
+  __ push(t2);
+
+
+  // Technically restoring 'at' should work unless zero_reg is also restored
+  // but it's safer to check for this.
+  ASSERT(!(at.bit() & restored_regs));
+  // Restore the registers from the last output frame.
+  __ mov(at, a2);
+  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    if ((restored_regs & (1 << i)) != 0) {
+      __ lw(ToRegister(i), MemOperand(at, offset));
+    }
+  }
+
+  __ InitializeRootRegister();
+
+  __ pop(at);  // Get continuation, leave pc on stack.
+  __ pop(ra);
+  __ Jump(at);
+  __ stop("Unreachable.");
 }
 
 
+// Maximum size of a table entry generated below.
+const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize;
+
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
-  UNIMPLEMENTED();
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+
+  // Create a sequence of deoptimization entries. Note that any
+  // registers may be still live.
+  Label table_start;
+  __ bind(&table_start);
+  for (int i = 0; i < count(); i++) {
+    Label start;
+    __ bind(&start);
+    if (type() != EAGER) {
+      // Emulate ia32 like call by pushing return address to stack.
+      __ addiu(sp, sp, -2 * kPointerSize);
+      __ sw(ra, MemOperand(sp, 1 * kPointerSize));
+    } else {
+      __ addiu(sp, sp, -1 * kPointerSize);
+    }
+    // Jump over the remaining deopt entries (including this one).
+    // This code is always reached by calling Jump, which puts the target (label
+    // start) into t9.
+    const int remaining_entries = (count() - i) * table_entry_size_;
+    __ Addu(t9, t9, remaining_entries);
+    // 'at' was clobbered so we can only load the current entry value here.
+    __ li(at, i);
+    __ jr(t9);  // Expose delay slot.
+    __ sw(at, MemOperand(sp, 0 * kPointerSize));  // In the delay slot.
+
+    // Pad the rest of the code.
+    while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
+      __ nop();
+    }
+
+    ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
+  }
+
+  ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+      count() * table_entry_size_);
 }
 
+#undef __
+
 
 } }  // namespace v8::internal
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index fde0c58..1d40c2c 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -515,7 +515,7 @@
               Format(instr, "cvt.w.d 'fd, 'fs");
               break;
             case CVT_L_D: {
-              if (mips32r2) {
+              if (kArchVariant == kMips32r2) {
                 Format(instr, "cvt.l.d 'fd, 'fs");
               } else {
                 Unknown(instr);
@@ -526,7 +526,7 @@
               Format(instr, "trunc.w.d 'fd, 'fs");
               break;
             case TRUNC_L_D: {
-              if (mips32r2) {
+              if (kArchVariant == kMips32r2) {
                 Format(instr, "trunc.l.d 'fd, 'fs");
               } else {
                 Unknown(instr);
@@ -592,7 +592,7 @@
         case L:
           switch (instr->FunctionFieldRaw()) {
             case CVT_D_L: {
-              if (mips32r2) {
+              if (kArchVariant == kMips32r2) {
                 Format(instr, "cvt.d.l 'fd, 'fs");
               } else {
                 Unknown(instr);
@@ -600,7 +600,7 @@
               break;
             }
             case CVT_S_L: {
-              if (mips32r2) {
+              if (kArchVariant == kMips32r2) {
                 Format(instr, "cvt.s.l 'fd, 'fs");
               } else {
                 Unknown(instr);
@@ -636,7 +636,7 @@
           if (instr->RsValue() == 0) {
             Format(instr, "srl     'rd, 'rt, 'sa");
           } else {
-            if (mips32r2) {
+            if (kArchVariant == kMips32r2) {
               Format(instr, "rotr    'rd, 'rt, 'sa");
             } else {
               Unknown(instr);
@@ -653,7 +653,7 @@
           if (instr->SaValue() == 0) {
             Format(instr, "srlv    'rd, 'rt, 'rs");
           } else {
-            if (mips32r2) {
+            if (kArchVariant == kMips32r2) {
               Format(instr, "rotrv   'rd, 'rt, 'rs");
             } else {
               Unknown(instr);
@@ -770,7 +770,7 @@
     case SPECIAL3:
       switch (instr->FunctionFieldRaw()) {
         case INS: {
-          if (mips32r2) {
+          if (kArchVariant == kMips32r2) {
             Format(instr, "ins     'rt, 'rs, 'sa, 'ss2");
           } else {
             Unknown(instr);
@@ -778,7 +778,7 @@
           break;
         }
         case EXT: {
-          if (mips32r2) {
+          if (kArchVariant == kMips32r2) {
             Format(instr, "ext     'rt, 'rs, 'sa, 'ss1");
           } else {
             Unknown(instr);
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 2c83893..2ed358a 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -36,9 +36,9 @@
 // Register lists.
 // Note that the bit values must match those used in actual instruction
 // encoding.
-static const int kNumRegs = 32;
+const int kNumRegs = 32;
 
-static const RegList kJSCallerSaved =
+const RegList kJSCallerSaved =
   1 << 2  |  // v0
   1 << 3  |  // v1
   1 << 4  |  // a0
@@ -54,7 +54,7 @@
   1 << 14 |  // t6
   1 << 15;   // t7
 
-static const int kNumJSCallerSaved = 14;
+const int kNumJSCallerSaved = 14;
 
 
 // Return the code of the n-th caller-saved register available to JavaScript
@@ -63,7 +63,7 @@
 
 
 // Callee-saved registers preserved when switching from C to JavaScript.
-static const RegList kCalleeSaved =
+const RegList kCalleeSaved =
   1 << 16 |  // s0
   1 << 17 |  // s1
   1 << 18 |  // s2
@@ -74,9 +74,9 @@
   1 << 23 |  // s7 (cp in Javascript code)
   1 << 30;   // fp/s8
 
-static const int kNumCalleeSaved = 9;
+const int kNumCalleeSaved = 9;
 
-static const RegList kCalleeSavedFPU =
+const RegList kCalleeSavedFPU =
   1 << 20 |  // f20
   1 << 22 |  // f22
   1 << 24 |  // f24
@@ -84,23 +84,37 @@
   1 << 28 |  // f28
   1 << 30;   // f30
 
-static const int kNumCalleeSavedFPU = 6;
+const int kNumCalleeSavedFPU = 6;
+
+const RegList kCallerSavedFPU =
+  1 << 0  |  // f0
+  1 << 2  |  // f2
+  1 << 4  |  // f4
+  1 << 6  |  // f6
+  1 << 8  |  // f8
+  1 << 10 |  // f10
+  1 << 12 |  // f12
+  1 << 14 |  // f14
+  1 << 16 |  // f16
+  1 << 18;   // f18
+
+
 // Number of registers for which space is reserved in safepoints. Must be a
 // multiple of 8.
-static const int kNumSafepointRegisters = 24;
+const int kNumSafepointRegisters = 24;
 
 // Define the list of registers actually saved at safepoints.
 // Note that the number of saved registers may be smaller than the reserved
 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-static const int kNumSafepointSavedRegisters =
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters =
     kNumJSCallerSaved + kNumCalleeSaved;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
-static const int kUndefIndex = -1;
+const int kUndefIndex = -1;
 // Map with indexes on stack that corresponds to codes of saved registers.
-static const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+const int kSafepointRegisterStackIndexMap[kNumRegs] = {
   kUndefIndex,  // zero_reg
   kUndefIndex,  // at
   0,   // v0
@@ -140,13 +154,13 @@
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset    = 0 * kPointerSize;
-  static const int kStateOffset   = 1 * kPointerSize;
-  static const int kContextOffset = 2 * kPointerSize;
-  static const int kFPOffset      = 3 * kPointerSize;
-  static const int kPCOffset      = 4 * kPointerSize;
+  static const int kNextOffset     = 0 * kPointerSize;
+  static const int kCodeOffset     = 1 * kPointerSize;
+  static const int kStateOffset    = 2 * kPointerSize;
+  static const int kContextOffset  = 3 * kPointerSize;
+  static const int kFPOffset       = 4 * kPointerSize;
 
-  static const int kSize = kPCOffset + kPointerSize;
+  static const int kSize = kFPOffset + kPointerSize;
 };
 
 
@@ -181,6 +195,9 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
+  // Fixed part of the frame consists of return address, caller fp,
+  // context and function.
+  static const int kFixedFrameSize    =  4 * kPointerSize;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
@@ -216,6 +233,8 @@
 class ArgumentsAdaptorFrameConstants : public AllStatic {
  public:
   static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+  static const int kFrameSize =
+      StandardFrameConstants::kFixedFrameSize + kPointerSize;
 };
 
 
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 9a210c4..657bee8 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,11 +42,13 @@
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
+#include "isolate-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "stub-cache.h"
 
 #include "mips/code-stubs-mips.h"
+#include "mips/macro-assembler-mips.h"
 
 namespace v8 {
 namespace internal {
@@ -54,17 +56,14 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
-// bit immediate value is used) is the delta from the pc to the first
+// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
+// (raw 16 bit immediate value is used) is the delta from the pc to the first
 // instruction of the patchable code.
+// The marker instruction is effectively a NOP (dest is zero_reg) and will
+// never be emitted by normal code.
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -103,7 +102,7 @@
     if (patch_site_.is_bound()) {
       int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
       Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
-      __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+      __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
 #ifdef DEBUG
       info_emitted_ = true;
 #endif
@@ -121,13 +120,20 @@
 };
 
 
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
+int FullCodeGenerator::self_optimization_header_size() {
+  UNREACHABLE();
+  return 10 * Instruction::kInstrSize;
+}
+
+
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right.  The actual
 // argument count matches the formal parameter count expected by the
 // function.
 //
 // The live registers are:
-//   o a1: the JS function object being called (ie, ourselves)
+//   o a1: the JS function object being called (i.e. ourselves)
 //   o cp: our context
 //   o fp: our caller's frame pointer
 //   o sp: stack pointer
@@ -135,10 +141,12 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-mips.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
-  ASSERT(info_ == NULL);
-  info_ = info;
-  scope_ = info->scope();
+void FullCodeGenerator::Generate() {
+  CompilationInfo* info = info_;
+  handler_table_ =
+      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+  profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -153,7 +161,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). t1 is zero for method calls and non-zero for
   // function calls.
-  if (info->is_strict_mode() || info->is_native()) {
+  if (!info->is_classic_mode() || info->is_native()) {
     Label ok;
     __ Branch(&ok, eq, t1, Operand(zero_reg));
     int receiver_offset = info->scope()->num_parameters() * kPointerSize;
@@ -162,6 +170,11 @@
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   int locals_count = info->scope()->num_stack_slots();
 
   __ Push(ra, fp, cp, a1);
@@ -207,14 +220,12 @@
         // Load parameter from stack.
         __ lw(a0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        __ li(a1, Operand(Context::SlotOffset(var->index())));
-        __ addu(a2, cp, a1);
-        __ sw(a0, MemOperand(a2, 0));
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use two more registers to avoid
-        // clobbering cp.
-        __ mov(a2, cp);
-        __ RecordWrite(a2, a1, a3);
+        MemOperand target = ContextOperand(cp, var->index());
+        __ sw(a0, target);
+
+        // Update the write barrier.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
       }
     }
   }
@@ -242,7 +253,7 @@
     // The stub will rewrite receiever and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub::Type type;
-    if (is_strict_mode()) {
+    if (!is_classic_mode()) {
       type = ArgumentsAccessStub::NEW_STRICT;
     } else if (function()->has_duplicate_parameters()) {
       type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -271,8 +282,11 @@
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        int ignored = 0;
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+        VariableProxy* proxy = scope()->function();
+        ASSERT(proxy->var()->mode() == CONST ||
+               proxy->var()->mode() == CONST_HARMONY);
+        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
+        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -309,18 +323,74 @@
 }
 
 
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+  __ li(a2, Operand(profiling_counter_));
+  __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+  __ Subu(a3, a3, Operand(Smi::FromInt(delta)));
+  __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+  int reset_value = FLAG_interrupt_budget;
+  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
+    // Self-optimization is a one-off thing: if it fails, don't try again.
+    reset_value = Smi::kMaxValue;
+  }
+  if (isolate()->IsDebuggerActive()) {
+    // Detect debug break requests as soon as possible.
+    reset_value = 10;
+  }
+  __ li(a2, Operand(profiling_counter_));
+  __ li(a3, Operand(Smi::FromInt(reset_value)));
+  __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 142;
+
+
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+                                       Label* back_edge_target) {
+  // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
+  // to make sure it is constant. Branch may emit a skip-or-jump sequence
+  // instead of the normal Branch. It seems that the "skip" part of that
+  // sequence is about as long as this Branch would be so it is safe to ignore
+  // that.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
-  __ LoadRoot(t0, Heap::kStackLimitRootIndex);
-  __ Branch(&ok, hs, sp, Operand(t0));
-  StackCheckStub stub;
+  if (FLAG_count_based_interrupts) {
+    int weight = 1;
+    if (FLAG_weighted_back_edges) {
+      ASSERT(back_edge_target->is_bound());
+      int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+      weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kBackEdgeDistanceDivisor));
+    }
+    EmitProfilingCounterDecrement(weight);
+    __ slt(at, a3, zero_reg);
+    __ beq(at, zero_reg, &ok);
+    // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+    InterruptStub stub;
+    __ CallStub(&stub);
+  } else {
+    __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+    __ sltu(at, sp, t0);
+    __ beq(at, zero_reg, &ok);
+    // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+    StackCheckStub stub;
+    __ CallStub(&stub);
+  }
   // Record a mapping of this PC offset to the OSR id.  This is used to find
   // the AST id from the unoptimized code in order to use it as a key into
   // the deoptimization input data found in the optimized code.
   RecordStackCheck(stmt->OsrEntryId());
+  if (FLAG_count_based_interrupts) {
+    EmitProfilingCounterReset();
+  }
 
-  __ CallStub(&stub);
   __ bind(&ok);
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
@@ -342,6 +412,32 @@
       __ push(v0);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
+    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
+      // Pretend that the exit is a backwards jump to the entry.
+      int weight = 1;
+      if (info_->ShouldSelfOptimize()) {
+        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+      } else if (FLAG_weighted_back_edges) {
+        int distance = masm_->pc_offset();
+        weight = Min(kMaxBackEdgeWeight,
+                     Max(1, distance / kBackEdgeDistanceDivisor));
+      }
+      EmitProfilingCounterDecrement(weight);
+      Label ok;
+      __ Branch(&ok, ge, a3, Operand(zero_reg));
+      __ push(v0);
+      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
+        __ lw(a2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+        __ push(a2);
+        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
+      } else {
+        InterruptStub stub;
+        __ CallStub(&stub);
+      }
+      __ pop(v0);
+      EmitProfilingCounterReset();
+      __ bind(&ok);
+    }
 
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
@@ -393,7 +489,7 @@
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -416,7 +512,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -451,7 +547,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -510,7 +606,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -577,7 +673,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -670,15 +766,17 @@
   __ sw(src, location);
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
-    __ RecordWrite(scratch0,
-                   Operand(Context::SlotOffset(var->index())),
-                   scratch1,
-                   src);
+    __ RecordWriteContextSlot(scratch0,
+                              location.offset(),
+                              src,
+                              scratch1,
+                              kRAHasBeenSaved,
+                              kDontSaveFPRegs);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -689,13 +787,7 @@
 
   Label skip;
   if (should_normalize) __ Branch(&skip);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ LoadRoot(t0, Heap::kTrueValueRootIndex);
     Split(eq, a0, Operand(t0), if_true, if_false, NULL);
@@ -705,16 +797,17 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        Variable::Mode mode,
-                                        FunctionLiteral* function,
-                                        int* global_count) {
+                                        VariableMode mode,
+                                        FunctionLiteral* function) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++(*global_count);
+      ++global_count_;
       break;
 
     case Variable::PARAMETER:
@@ -723,7 +816,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ sw(result_register(), StackOperand(variable));
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
           Comment cmnt(masm_, "[ Declaration");
           __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
           __ sw(t0, StackOperand(variable));
@@ -750,10 +843,16 @@
         __ sw(result_register(), ContextOperand(cp, variable->index()));
         int offset = Context::SlotOffset(variable->index());
         // We know that we have written a function, which is not a smi.
-        __ mov(a1, cp);
-        __ RecordWrite(a1, Operand(offset), a2, result_register());
+        __ RecordWriteContextSlot(cp,
+                                  offset,
+                                  result_register(),
+                                  a2,
+                                  kRAHasBeenSaved,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
           Comment cmnt(masm_, "[ Declaration");
           __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
           __ sw(at, ContextOperand(cp, variable->index()));
@@ -765,11 +864,13 @@
     case Variable::LOOKUP: {
       Comment cmnt(masm_, "[ Declaration");
       __ li(a2, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of three modes.
-      ASSERT(mode == Variable::VAR ||
-             mode == Variable::CONST ||
-             mode == Variable::LET);
-      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of four modes.
+      ASSERT(mode == VAR ||
+             mode == CONST ||
+             mode == CONST_HARMONY ||
+             mode == LET);
+      PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
+        ? READ_ONLY : NONE;
       __ li(a1, Operand(Smi::FromInt(attr)));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
@@ -779,7 +880,7 @@
         __ Push(cp, a2, a1);
         // Push initial value for function declaration.
         VisitForStackValue(function);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
           __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
           __ Push(cp, a2, a1, a0);
       } else {
@@ -794,9 +895,6 @@
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
-
-
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
@@ -859,7 +957,7 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
     patch_site.EmitPatchInfo();
 
     __ Branch(&next_test, ne, v0, Operand(zero_reg));
@@ -909,7 +1007,8 @@
   Register null_value = t1;
   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   __ Branch(&exit, eq, a0, Operand(null_value));
-
+  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+  __ mov(a0, v0);
   // Convert the object to a JS object.
   Label convert, done_convert;
   __ JumpIfSmi(a0, &convert);
@@ -922,48 +1021,17 @@
   __ bind(&done_convert);
   __ push(a0);
 
+  // Check for proxies.
+  Label call_runtime;
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
+
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  Label next, call_runtime;
-  // Preload a couple of values used in the loop.
-  Register  empty_fixed_array_value = t2;
-  __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
-  Register empty_descriptor_array_value = t3;
-  __ LoadRoot(empty_descriptor_array_value,
-              Heap::kEmptyDescriptorArrayRootIndex);
-  __ mov(a1, a0);
-  __ bind(&next);
-
-  // Check that there are no elements.  Register a1 contains the
-  // current JS object we've reached through the prototype chain.
-  __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
-  __ Branch(&call_runtime, ne, a2, Operand(empty_fixed_array_value));
-
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in a2 for the subsequent
-  // prototype load.
-  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
-  __ lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
-  __ JumpIfSmi(a3, &call_runtime);
-
-  // Check that there is an enum cache in the non-empty instance
-  // descriptors (a3).  This is the case if the next enumeration
-  // index field does not contain a smi.
-  __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
-  __ JumpIfSmi(a3, &call_runtime);
-
-  // For all objects but the receiver, check that the cache is empty.
-  Label check_prototype;
-  __ Branch(&check_prototype, eq, a1, Operand(a0));
-  __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ Branch(&call_runtime, ne, a3, Operand(empty_fixed_array_value));
-
-  // Load the prototype from the map and loop if non-null.
-  __ bind(&check_prototype);
-  __ lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
-  __ Branch(&next, ne, a1, Operand(null_value));
+  __ CheckEnumCache(null_value, &call_runtime);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -991,7 +1059,7 @@
   __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
   __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
-  // Setup the four remaining stack slots.
+  // Set up the four remaining stack slots.
   __ push(v0);  // Map.
   __ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
   __ li(a0, Operand(Smi::FromInt(0)));
@@ -1000,14 +1068,32 @@
   __ jmp(&loop);
 
   // We got a fixed array in register v0. Iterate through that.
+  Label non_proxy;
   __ bind(&fixed_array);
-  __ li(a1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
-  __ Push(a1, v0);
+
+  Handle<JSGlobalPropertyCell> cell =
+      isolate()->factory()->NewJSGlobalPropertyCell(
+          Handle<Object>(
+              Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+  RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+  __ LoadHeapObject(a1, cell);
+  __ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+  __ sw(a2, FieldMemOperand(a1, JSGlobalPropertyCell::kValueOffset));
+
+  __ li(a1, Operand(Smi::FromInt(1)));  // Smi indicates slow check
+  __ lw(a2, MemOperand(sp, 0 * kPointerSize));  // Get enumerated object
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ GetObjectType(a2, a3, a3);
+  __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
+  __ li(a1, Operand(Smi::FromInt(0)));  // Zero indicates proxy
+  __ bind(&non_proxy);
+  __ Push(a1, v0);  // Smi and array
   __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
   __ li(a0, Operand(Smi::FromInt(0)));
   __ Push(a1, a0);  // Fixed array length (as smi) and initial index.
 
   // Generate code for doing the condition check.
+  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
   // Load the current count to a0, load the length to a1.
   __ lw(a0, MemOperand(sp, 0 * kPointerSize));
@@ -1021,17 +1107,22 @@
   __ addu(t0, a2, t0);  // Array base + scaled (smi) index.
   __ lw(a3, MemOperand(t0));  // Current entry.
 
-  // Get the expected map from the stack or a zero map in the
+  // Get the expected map from the stack or a smi in the
   // permanent slow case into register a2.
   __ lw(a2, MemOperand(sp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
+  // If not, we may have to filter the key.
   Label update_each;
   __ lw(a1, MemOperand(sp, 4 * kPointerSize));
   __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
   __ Branch(&update_each, eq, t0, Operand(a2));
 
+  // For proxies, no filtering is done.
+  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+  ASSERT_EQ(Smi::FromInt(0), 0);
+  __ Branch(&update_each, eq, a2, Operand(zero_reg));
+
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1047,7 +1138,7 @@
   __ mov(result_register(), a3);
   // Perform the assignment as if via '='.
   { EffectContext context(this);
-    EmitAssignment(stmt->each(), stmt->AssignmentId());
+    EmitAssignment(stmt->each());
   }
 
   // Generate code for the body of the loop.
@@ -1060,7 +1151,7 @@
   __ Addu(a0, a0, Operand(Smi::FromInt(1)));
   __ push(a0);
 
-  EmitStackCheck(stmt);
+  EmitStackCheck(stmt, &loop);
   __ Branch(&loop);
 
   // Remove the pointers stored on the stack.
@@ -1068,6 +1159,7 @@
   __ Drop(5);
 
   // Exit and decrement the loop depth.
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1086,7 +1178,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(info->language_mode());
     __ li(a0, Operand(info));
     __ push(a0);
     __ CallStub(&stub);
@@ -1117,7 +1209,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
         __ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1129,7 +1221,7 @@
     }
     // If no outer scope calls eval, we do not need to check more
     // context extensions.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1158,7 +1250,7 @@
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ Call(ic, mode);
+  CallIC(ic, mode);
 }
 
 
@@ -1171,7 +1263,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
         __ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1201,17 +1293,26 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+  if (var->mode() == DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ Branch(done);
-  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+  } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == Variable::CONST) {
+    if (local->mode() == CONST ||
+        local->mode() == CONST_HARMONY ||
+        local->mode() == LET) {
       __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
       __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
-      __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-      __ movz(v0, a0, at);  // Conditional move: return Undefined if TheHole.
+      if (local->mode() == CONST) {
+        __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+        __ Movz(v0, a0, at);  // Conditional move: return Undefined if TheHole.
+      } else {  // LET || CONST_HARMONY
+        __ Branch(done, ne, at, Operand(zero_reg));
+        __ li(a0, Operand(var->name()));
+        __ push(a0);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      }
     }
     __ Branch(done);
   }
@@ -1233,7 +1334,7 @@
       __ lw(a0, GlobalObjectOperand());
       __ li(a2, Operand(var->name()));
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+      CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(v0);
       break;
     }
@@ -1244,26 +1345,66 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        GetVar(v0, var);
-        __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-        __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
-        if (var->mode() == Variable::LET) {
-          Label done;
-          __ Branch(&done, ne, at, Operand(zero_reg));
-          __ li(a0, Operand(var->name()));
-          __ push(a0);
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-          __ bind(&done);
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        bool skip_init_check;
+        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+          skip_init_check = false;
         } else {
-          __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-          __ movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
+          // Check that we always have valid source position.
+          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          skip_init_check = var->mode() != CONST &&
+              var->initializer_position() < proxy->position();
         }
-        context()->Plug(v0);
+
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          GetVar(v0, var);
+          __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+          __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            Label done;
+            __ Branch(&done, ne, at, Operand(zero_reg));
+            __ li(a0, Operand(var->name()));
+            __ push(a0);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+            __ bind(&done);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+            __ Movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
+          }
+          context()->Plug(v0);
+          break;
+        }
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1335,12 +1476,23 @@
 }
 
 
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+  if (expression == NULL) {
+    __ LoadRoot(a1, Heap::kNullValueRootIndex);
+    __ push(a1);
+  } else {
+    VisitForStackValue(expression);
+  }
+}
+
+
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
+  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ lw(a3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
   __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
   __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
-  __ li(a1, Operand(expr->constant_properties()));
+  __ li(a1, Operand(constant_properties));
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1349,10 +1501,15 @@
       : ObjectLiteral::kNoFlags;
   __ li(a0, Operand(Smi::FromInt(flags)));
   __ Push(a3, a2, a1, a0);
+  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    __ CallStub(&stub);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1364,6 +1521,7 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore();
 
+  AccessorTable accessor_table(isolate()->zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
@@ -1387,10 +1545,10 @@
             __ mov(a0, result_register());
             __ li(a2, Operand(key->handle()));
             __ lw(a1, MemOperand(sp));
-            Handle<Code> ic = is_strict_mode()
-                ? isolate()->builtins()->StoreIC_Initialize_Strict()
-                : isolate()->builtins()->StoreIC_Initialize();
-            __ Call(ic, RelocInfo::CODE_TARGET, key->id());
+            Handle<Code> ic = is_classic_mode()
+                ? isolate()->builtins()->StoreIC_Initialize()
+                : isolate()->builtins()->StoreIC_Initialize_Strict();
+            CallIC(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1413,21 +1571,29 @@
         }
         break;
       case ObjectLiteral::Property::GETTER:
+        accessor_table.lookup(key)->second->getter = value;
+        break;
       case ObjectLiteral::Property::SETTER:
-        // Duplicate receiver on stack.
-        __ lw(a0, MemOperand(sp));
-        __ push(a0);
-        VisitForStackValue(key);
-        __ li(a1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
-                           Smi::FromInt(1) :
-                           Smi::FromInt(0)));
-        __ push(a1);
-        VisitForStackValue(value);
-        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        accessor_table.lookup(key)->second->setter = value;
         break;
     }
   }
 
+  // Emit code to define accessors, using only a single call to the runtime for
+  // each pair of corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end();
+       ++it) {
+    __ lw(a0, MemOperand(sp));  // Duplicate receiver.
+    __ push(a0);
+    VisitForStackValue(it->first);
+    EmitAccessor(it->second->getter);
+    EmitAccessor(it->second->setter);
+    __ li(a0, Operand(Smi::FromInt(NONE)));
+    __ push(a0);
+    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+  }
+
   if (expr->has_function()) {
     ASSERT(result_saved);
     __ lw(a0, MemOperand(sp));
@@ -1448,13 +1614,22 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
+
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
+
   __ mov(a0, result_register());
   __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
   __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
-  __ li(a1, Operand(expr->constant_elements()));
+  __ li(a1, Operand(constant_elements));
   __ Push(a3, a2, a1);
-  if (expr->constant_elements()->map() ==
+  if (has_fast_elements && constant_elements_values->map() ==
       isolate()->heap()->fixed_cow_array_map()) {
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1466,8 +1641,13 @@
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           FLAG_smi_only_arrays);
+    FastCloneShallowArrayStub::Mode mode = has_fast_elements
+      ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+      : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
 
@@ -1488,21 +1668,30 @@
       __ push(v0);
       result_saved = true;
     }
+
     VisitForAccumulatorValue(subexpr);
 
-    // Store the subexpression value in the array's elements.
-    __ lw(a1, MemOperand(sp));  // Copy of array literal.
-    __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
-    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-    __ sw(result_register(), FieldMemOperand(a1, offset));
-
-    // Update the write barrier for the array store with v0 as the scratch
-    // register.
-    __ RecordWrite(a1, Operand(offset), a2, result_register());
+    if (constant_elements_kind == FAST_ELEMENTS) {
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ lw(t2, MemOperand(sp));  // Copy of array literal.
+      __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
+      __ sw(result_register(), FieldMemOperand(a1, offset));
+      // Update the write barrier for the array store.
+      __ RecordWriteField(a1, offset, result_register(), a2,
+                          kRAHasBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+    } else {
+      __ lw(a1, MemOperand(sp));  // Copy of array literal.
+      __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
+      __ li(a3, Operand(Smi::FromInt(i)));
+      __ li(t0, Operand(Smi::FromInt(expr->literal_index())));
+      __ mov(a0, result_register());
+      StoreArrayLiteralElementStub stub;
+      __ CallStub(&stub);
+    }
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1632,7 +1821,7 @@
   __ li(a2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name a0 and a2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1641,7 +1830,7 @@
   __ mov(a0, result_register());
   // Call keyed load IC. It has arguments key and receiver in a0 and a1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1669,7 +1858,7 @@
 
   __ bind(&stub_call);
   BinaryOpStub stub(op, mode);
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   __ jmp(&done);
 
@@ -1752,13 +1941,13 @@
   __ pop(a1);
   BinaryOpStub stub(op, mode);
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
@@ -1790,10 +1979,10 @@
       __ mov(a1, result_register());
       __ pop(a0);  // Restore value.
       __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
-      __ Call(ic);
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
+      CallIC(ic);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1803,14 +1992,13 @@
       __ mov(a1, result_register());
       __ pop(a2);
       __ pop(a0);  // Restore value.
-      Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-        : isolate()->builtins()->KeyedStoreIC_Initialize();
-      __ Call(ic);
+      Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->KeyedStoreIC_Initialize()
+        : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      CallIC(ic);
       break;
     }
   }
-  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
   context()->Plug(v0);
 }
 
@@ -1822,10 +2010,10 @@
     __ mov(a0, result_register());
     __ li(a2, Operand(var->name()));
     __ lw(a1, GlobalObjectOperand());
-    Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->StoreIC_Initialize_Strict()
-        : isolate()->builtins()->StoreIC_Initialize();
-    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->StoreIC_Initialize()
+        : isolate()->builtins()->StoreIC_Initialize_Strict();
+    CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
@@ -1850,12 +2038,12 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+  } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(v0);  // Value.
       __ li(a1, Operand(var->name()));
-      __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ li(a0, Operand(Smi::FromInt(language_mode())));
       __ Push(cp, a1, a0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
@@ -1875,12 +2063,14 @@
         // RecordWrite may destroy all its register arguments.
         __ mov(a3, result_register());
         int offset = Context::SlotOffset(var->index());
-        __ RecordWrite(a1, Operand(offset), a2, a3);
+        __ RecordWriteContextSlot(
+            a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
       }
     }
 
-  } else if (var->mode() != Variable::CONST) {
-    // Assignment to var or initializing assignment to let.
+  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+    // Assignment to var or initializing assignment to let/const
+    // in harmony mode.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, a1);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1893,13 +2083,15 @@
       __ sw(v0, location);
       if (var->IsContextSlot()) {
         __ mov(a3, v0);
-        __ RecordWrite(a1, Operand(Context::SlotOffset(var->index())), a2, a3);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(
+            a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(v0);  // Value.
       __ li(a1, Operand(var->name()));
-      __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ li(a0, Operand(Smi::FromInt(language_mode())));
       __ Push(cp, a1, a0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
@@ -1937,10 +2129,10 @@
     __ pop(a1);
   }
 
-  Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->StoreIC_Initialize_Strict()
-        : isolate()->builtins()->StoreIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->StoreIC_Initialize()
+        : isolate()->builtins()->StoreIC_Initialize_Strict();
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1989,10 +2181,10 @@
     __ pop(a2);
   }
 
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize()
+      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2027,6 +2219,14 @@
 }
 
 
+void FullCodeGenerator::CallIC(Handle<Code> code,
+                               RelocInfo::Mode rmode,
+                               unsigned ast_id) {
+  ic_total_count_++;
+  __ Call(code, rmode, ast_id);
+}
+
+
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
@@ -2044,7 +2244,7 @@
   // Call the IC initialization code.
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-  __ Call(ic, mode, expr->id());
+  CallIC(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2077,7 +2277,7 @@
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
-  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2097,6 +2297,7 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   CallFunctionStub stub(arg_count, flags);
+  __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2105,8 +2306,7 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
@@ -2115,22 +2315,20 @@
   }
   __ push(a1);
 
-  // Push the receiver of the enclosing function and do runtime call.
+  // Push the receiver of the enclosing function.
   int receiver_offset = 2 + info_->scope()->num_parameters();
   __ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
   __ push(a1);
-  // Push the strict mode flag. In harmony mode every eval call
-  // is a strict mode eval call.
-  StrictModeFlag strict_mode = strict_mode_flag();
-  if (FLAG_harmony_block_scoping) {
-    strict_mode = kStrictMode;
-  }
-  __ li(a1, Operand(Smi::FromInt(strict_mode)));
+  // Push the language mode.
+  __ li(a1, Operand(Smi::FromInt(language_mode())));
   __ push(a1);
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  // Push the start position of the scope the calls resides in.
+  __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
+  __ push(a1);
+
+  // Do the runtime call.
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2164,28 +2362,11 @@
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly
-      // in generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(v0);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
       __ push(a1);
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in v0 (function) and
       // v1 (receiver). Touch up the stack with the right values.
@@ -2195,6 +2376,7 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2301,14 +2483,29 @@
   __ li(a0, Operand(arg_count));
   __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
 
-  Handle<Code> construct_builtin =
-      isolate()->builtins()->JSConstructCall();
-  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+  // Record call targets in unoptimized code, but not in the snapshot.
+  CallFunctionFlags flags;
+  if (!Serializer::enabled()) {
+    flags = RECORD_CALL_TARGET;
+    Handle<Object> uninitialized =
+       TypeFeedbackCells::UninitializedSentinel(isolate());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+    RecordTypeFeedbackCell(expr->id(), cell);
+    __ li(a2, Operand(cell));
+  } else {
+    flags = NO_CALL_FUNCTION_FLAGS;
+  }
+
+  CallConstructStub stub(flags);
+  __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2320,7 +2517,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ And(t0, v0, Operand(kSmiTagMask));
   Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
 
@@ -2328,7 +2525,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2340,7 +2538,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
 
@@ -2348,7 +2546,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2370,7 +2569,7 @@
   __ Branch(if_false, ne, at, Operand(zero_reg));
   __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
   __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
         if_true, if_false, fall_through);
 
@@ -2378,7 +2577,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2392,7 +2592,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
         if_true, if_false, fall_through);
 
@@ -2400,7 +2600,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2416,7 +2617,7 @@
   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
   __ And(at, a1, Operand(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2424,8 +2625,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2475,7 +2676,7 @@
   Label entry, loop;
   // The use of t2 to store the valueOf symbol asumes that it is not otherwise
   // used in the loop below.
-  __ li(t2, Operand(FACTORY->value_of_symbol()));
+  __ LoadRoot(t2, Heap::kvalue_of_symbolRootIndex);
   __ jmp(&entry);
   __ bind(&loop);
   __ lw(a3, MemOperand(t0, 0));
@@ -2501,12 +2702,13 @@
   __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2520,7 +2722,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a2);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
   __ Branch(if_false);
 
@@ -2528,7 +2730,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2542,7 +2745,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a1, Operand(JS_ARRAY_TYPE),
         if_true, if_false, fall_through);
 
@@ -2550,7 +2753,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2564,15 +2768,15 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2594,7 +2798,7 @@
   // Check the marker in the calling frame.
   __ bind(&check_frame_marker);
   __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
         if_true, if_false, fall_through);
 
@@ -2602,7 +2806,8 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2617,14 +2822,15 @@
                          &if_true, &if_false, &fall_through);
 
   __ pop(a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in a1 and the formal
@@ -2638,9 +2844,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label exit;
   // Get the number of formal parameters.
   __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2660,7 +2865,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2671,18 +2877,23 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ GetObjectType(v0, v0, a1);  // Map is now in v0.
   __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  // Check if the constructor in the map is a function.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+  // Check if the constructor in the map is a JS function.
   __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
   __ GetObjectType(v0, a1, a1);
   __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
@@ -2714,7 +2925,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2722,6 +2933,7 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2735,9 +2947,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
 
@@ -2760,10 +2971,10 @@
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   if (CpuFeatures::IsSupported(FPU)) {
     __ PrepareCallCFunction(1, a0);
-    __ li(a0, Operand(ExternalReference::isolate_address()));
+    __ lw(a0, ContextOperand(cp, Context::GLOBAL_INDEX));
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
     __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
-
     CpuFeatures::Scope scope(FPU);
     // 0x41300000 is the top half of 1.0 x 2^20 as a double.
     __ li(a1, Operand(0x41300000));
@@ -2778,7 +2989,8 @@
   } else {
     __ PrepareCallCFunction(2, a0);
     __ mov(a0, s0);
-    __ li(a1, Operand(ExternalReference::isolate_address()));
+    __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
+    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalContextOffset));
     __ CallCFunction(
         ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
   }
@@ -2787,9 +2999,10 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2799,9 +3012,10 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2812,7 +3026,8 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2831,18 +3046,70 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
-  // Load the arguments on the stack and call the runtime function.
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-  MathPowStub stub;
-  __ CallStub(&stub);
+  ASSERT_NE(NULL, args->at(1)->AsLiteral());
+  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label runtime, done;
+  Register object = v0;
+  Register result = v0;
+  Register scratch0 = t5;
+  Register scratch1 = a1;
+
+#ifdef DEBUG
+  __ AbortIfSmi(object);
+  __ GetObjectType(object, scratch1, scratch1);
+  __ Assert(eq, "Trying to get date field from non-date.",
+      scratch1, Operand(JS_DATE_TYPE));
+#endif
+
+  if (index->value() == 0) {
+    __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ li(scratch1, Operand(stamp));
+      __ lw(scratch1, MemOperand(scratch1));
+      __ lw(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
+      __ Branch(&runtime, ne, scratch1, Operand(scratch0));
+      __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
+                                            kPointerSize * index->value()));
+      __ jmp(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2, scratch1);
+    __ li(a1, Operand(index));
+    __ Move(a0, object);
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ bind(&done);
+  }
+
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+  // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  if (CpuFeatures::IsSupported(FPU)) {
+    MathPowStub stub(MathPowStub::ON_STACK);
+    __ CallStub(&stub);
+  } else {
+    __ CallRuntime(Runtime::kMath_pow, 2);
+  }
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
@@ -2861,14 +3128,17 @@
   __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
-  __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
+  __ mov(a2, v0);
+  __ RecordWriteField(
+      a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -2880,7 +3150,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2898,7 +3169,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2907,7 +3179,6 @@
 
   Register object = a1;
   Register index = a0;
-  Register scratch = a2;
   Register result = v0;
 
   __ pop(object);
@@ -2917,7 +3188,6 @@
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
-                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -2946,7 +3216,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2955,8 +3226,7 @@
 
   Register object = a1;
   Register index = a0;
-  Register scratch1 = a2;
-  Register scratch2 = a3;
+  Register scratch = a3;
   Register result = v0;
 
   __ pop(object);
@@ -2966,8 +3236,7 @@
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch1,
-                                  scratch2,
+                                  scratch,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -2996,9 +3265,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3008,7 +3277,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3020,10 +3290,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3032,10 +3303,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3044,10 +3316,24 @@
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3056,8 +3342,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3065,7 +3352,8 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3074,18 +3362,31 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
+  // Check for proxy.
+  Label proxy, done;
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&proxy, eq, a1, Operand(JS_FUNCTION_PROXY_TYPE));
+
   // InvokeFunction requires the function in a1. Move it in there.
   __ mov(a1, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(a1, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ jmp(&done);
+
+  __ bind(&proxy);
+  __ push(v0);
+  __ CallRuntime(Runtime::kCall, args->length());
+  __ bind(&done);
+
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3095,7 +3396,8 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3154,16 +3456,31 @@
   __ sw(scratch1, MemOperand(index2, 0));
   __ sw(scratch2, MemOperand(index1, 0));
 
-  Label new_space;
-  __ InNewSpace(elements, scratch1, eq, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   scratch1,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   ne,
+                   &no_remembered_set);
   // Possible optimization: do a check that both values are Smis
   // (or them and test against Smi mask).
 
-  __ mov(scratch1, elements);
-  __ RecordWriteHelper(elements, index1, scratch2);
-  __ RecordWriteHelper(scratch1, index2, scratch2);  // scratch1 holds elements.
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index1,
+                         scratch2,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index2,
+                         scratch2,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
   // We are done. Drop elements from the stack, and return undefined.
   __ Drop(3);
   __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
@@ -3177,7 +3494,8 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3230,7 +3548,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = v0;
@@ -3246,8 +3565,7 @@
   __ Branch(&ok, eq, left, Operand(right));
   // Fail if either is a non-HeapObject.
   __ And(tmp, left, Operand(right));
-  __ And(at, tmp, Operand(kSmiTagMask));
-  __ Branch(&fail, eq, at, Operand(zero_reg));
+  __ JumpIfSmi(tmp, &fail);
   __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
   __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
   __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
@@ -3267,7 +3585,8 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
@@ -3280,14 +3599,15 @@
   __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
   __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3302,12 +3622,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       empty_separator_loop, one_char_separator_loop,
       one_char_separator_loop_entry, long_separator_loop;
-
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
@@ -3475,7 +3795,7 @@
 
   // One-character separator case.
   __ bind(&one_char_separator);
-  // Replace separator with its ascii character value.
+  // Replace separator with its ASCII character value.
   __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
   // Jump into the loop after the code that copies the separator, so the first
   // element is not preceded by a separator.
@@ -3486,7 +3806,7 @@
   //   result_pos: the position to which we are currently copying characters.
   //   element: Current array element.
   //   elements_end: Array end.
-  //   separator: Single separator ascii char (in lower byte).
+  //   separator: Single separator ASCII char (in lower byte).
 
   // Copy the separator character to the result.
   __ sb(separator, MemOperand(result_pos));
@@ -3571,7 +3891,7 @@
     RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
         isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-    __ Call(ic, mode, expr->id());
+    CallIC(ic, mode, expr->id());
     // Restore context register.
     __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3592,7 +3912,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+            ? kNonStrictMode : kStrictMode;
+        __ li(a1, Operand(Smi::FromInt(strict_mode_flag)));
         __ push(a1);
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
         context()->Plug(v0);
@@ -3600,7 +3922,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
           __ lw(a2, GlobalObjectOperand());
           __ li(a1, Operand(var->name()));
@@ -3643,18 +3965,35 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+        if (context()->IsStackValue()) __ push(v0);
+        __ jmp(&done);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+        if (context()->IsStackValue()) __ push(v0);
+        __ bind(&done);
       }
       break;
     }
@@ -3708,7 +4047,7 @@
   VisitForAccumulatorValue(expr->expression());
   SetSourcePosition(expr->position());
   __ mov(a0, result_register());
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   context()->Plug(v0);
 }
 
@@ -3819,7 +4158,7 @@
   SetSourcePosition(expr->position());
 
   BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -3849,10 +4188,10 @@
       __ mov(a0, result_register());  // Value.
       __ li(a2, Operand(prop->key()->AsLiteral()->handle()));  // Name.
       __ pop(a1);  // Receiver.
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
-      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3867,10 +4206,10 @@
       __ mov(a0, result_register());  // Value.
       __ pop(a1);  // Key.
       __ pop(a2);  // Receiver.
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3896,7 +4235,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ Call(ic);
+    CallIC(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(v0);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -3916,19 +4255,24 @@
     context()->Plug(v0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(v0, if_true);
@@ -3964,10 +4308,11 @@
     Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(v0, if_false);
-    __ GetObjectType(v0, a1, v0);  // Leave map in a1.
-    Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE),
-        if_true, if_false, fall_through);
-
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
+    Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
+          if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(v0, if_false);
     if (!FLAG_harmony_typeof) {
@@ -3986,18 +4331,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -4005,9 +4339,12 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4015,20 +4352,13 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(t0, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
       break;
@@ -4037,7 +4367,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       // The stub returns 0 for true.
       Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
       break;
@@ -4050,36 +4380,26 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cc = eq;
-          __ mov(a0, result_register());
-          __ pop(a1);
           break;
         case Token::LT:
           cc = lt;
-          __ mov(a0, result_register());
-          __ pop(a1);
           break;
         case Token::GT:
-          // Reverse left and right sides to obtain ECMA-262 conversion order.
-          cc = lt;
-          __ mov(a1, result_register());
-          __ pop(a0);
+          cc = gt;
          break;
         case Token::LTE:
-          // Reverse left and right sides to obtain ECMA-262 conversion order.
-          cc = ge;
-          __ mov(a1, result_register());
-          __ pop(a0);
+          cc = le;
           break;
         case Token::GTE:
           cc = ge;
-          __ mov(a0, result_register());
-          __ pop(a1);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
+      __ mov(a0, result_register());
+      __ pop(a1);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4093,9 +4413,9 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
     }
   }
@@ -4106,8 +4426,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4115,18 +4436,23 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Heap::RootListIndex nil_value = nil == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
   __ mov(a0, result_register());
-  __ LoadRoot(a1, Heap::kNullValueRootIndex);
-  if (expr->is_strict()) {
+  __ LoadRoot(a1, nil_value);
+  if (expr->op() == Token::EQ_STRICT) {
     Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
   } else {
+    Heap::RootListIndex other_nil_value = nil == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     __ Branch(if_true, eq, a0, Operand(a1));
-    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(a1, other_nil_value);
     __ Branch(if_true, eq, a0, Operand(a1));
-    __ And(at, a0, Operand(kSmiTagMask));
-    __ Branch(if_false, eq, at, Operand(zero_reg));
+    __ JumpIfSmi(a0, if_false);
     // It can be an undetectable object.
     __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
     __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index a76c215..2c4da1a 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -210,7 +210,8 @@
 
   // Update the write barrier. Make sure not to clobber the value.
   __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1);
+  __ RecordWrite(
+      elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
 }
 
 
@@ -383,10 +384,10 @@
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                          int argc,
-                                          Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                               int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- a1    : receiver
   //  -- a2    : name
@@ -396,11 +397,11 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_ic_state,
+                                         extra_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, a1, a2, a3, t0, t1);
+      masm, flags, a1, a2, a3, t0, t1, t2);
 
   // If the stub cache probing failed, the receiver might be a value.
   // For value objects, we use the map of the prototype objects for
@@ -436,7 +437,7 @@
   // Probe the stub cache for the value object.
   __ bind(&probe);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, a1, a2, a3, t0, t1);
+      masm, flags, a1, a2, a3, t0, t1, t2);
 
   __ bind(&miss);
 }
@@ -462,7 +463,7 @@
 }
 
 
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -485,10 +486,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm,
-                             int argc,
-                             IC::UtilityId id,
-                             Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+                              int argc,
+                              IC::UtilityId id,
+                              Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -504,29 +505,29 @@
   // Get the receiver of the function from the stack.
   __ lw(a3, MemOperand(sp, argc*kPointerSize));
 
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ Push(a3, a2);
+    // Push the receiver and the name of the function.
+    __ Push(a3, a2);
 
-  // Call the entry.
-  __ li(a0, Operand(2));
-  __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+    // Call the entry.
+    __ PrepareCEntryArgs(2);
+    __ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
 
-  CEntryStub stub(1);
-  __ CallStub(&stub);
+    CEntryStub stub(1);
+    __ CallStub(&stub);
 
-  // Move result to a1 and leave the internal frame.
-  __ mov(a1, v0);
-  __ LeaveInternalFrame();
+    // Move result to a1 and leave the internal frame.
+    __ mov(a1, v0);
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
   if (id == IC::kCallIC_Miss) {
     Label invoke, global;
     __ lw(a2, MemOperand(sp, argc * kPointerSize));
-    __ andi(t0, a2, kSmiTagMask);
-    __ Branch(&invoke, eq, t0, Operand(zero_reg));
+    __ JumpIfSmi(a2, &invoke);
     __ GetObjectType(a2, a3, a3);
     __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
     __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
@@ -538,7 +539,7 @@
     __ bind(&invoke);
   }
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -550,18 +551,6 @@
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm,
-                          int argc,
-                          Code::ExtraICState extra_ic_state) {
-  // ----------- S t a t e -------------
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
 void CallIC::GenerateMegamorphic(MacroAssembler* masm,
                                  int argc,
                                  Code::ExtraICState extra_ic_state) {
@@ -577,27 +566,6 @@
 }
 
 
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-
-  GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- a2    : name
@@ -649,12 +617,13 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
-  __ EnterInternalFrame();
-  __ push(a2);  // Save the key.
-  __ Push(a1, a2);  // Pass the receiver and the key.
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(a2);  // Restore the key.
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(a2);  // Save the key.
+    __ Push(a1, a2);  // Pass the receiver and the key.
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(a2);  // Restore the key.
+  }
   __ mov(a1, v0);
   __ jmp(&do_call);
 
@@ -713,7 +682,7 @@
   __ JumpIfSmi(a2, &miss);
   __ IsObjectJSStringType(a2, a0, &miss);
 
-  GenerateCallNormal(masm, argc);
+  CallICBase::GenerateNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
@@ -733,7 +702,7 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, a0, a2, a3, t0, t1);
+      masm, flags, a0, a2, a3, t0, t1, t2);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
@@ -789,8 +758,6 @@
                                                 Register scratch3,
                                                 Label* unmapped_case,
                                                 Label* slow_case) {
-  Heap* heap = masm->isolate()->heap();
-
   // Check that the receiver is a JSObject. Because of the map check
   // later, we do not need to check for interceptors or whether it
   // requires access checks.
@@ -804,10 +771,12 @@
   __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
 
   // Load the elements into scratch1 and check its map.
-  Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
   __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
-  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
+  __ CheckMap(scratch1,
+              scratch2,
+              Heap::kNonStrictArgumentsElementsMapRootIndex,
+              slow_case,
+              DONT_DO_SMI_CHECK);
   // Check if element is in the range of mapped arguments. If not, jump
   // to the unmapped lookup with the parameter map in scratch1.
   __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
@@ -819,7 +788,7 @@
       FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
 
   __ li(scratch3, Operand(kPointerSize >> 1));
-  __ mul(scratch3, key, scratch3);
+  __ Mul(scratch3, key, scratch3);
   __ Addu(scratch3, scratch3, Operand(kOffset));
 
   __ Addu(scratch2, scratch1, scratch3);
@@ -832,7 +801,7 @@
   // map in scratch1).
   __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
   __ li(scratch3, Operand(kPointerSize >> 1));
-  __ mul(scratch3, scratch2, scratch3);
+  __ Mul(scratch3, scratch2, scratch3);
   __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
   __ Addu(scratch2, scratch1, scratch3);
   return MemOperand(scratch2);
@@ -851,13 +820,15 @@
   const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
   Register backing_store = parameter_map;
   __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
-  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
-  __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+  __ CheckMap(backing_store,
+              scratch,
+              Heap::kFixedArrayMapRootIndex,
+              slow_case,
               DONT_DO_SMI_CHECK);
   __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
   __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
   __ li(scratch, Operand(kPointerSize >> 1));
-  __ mul(scratch, key, scratch);
+  __ Mul(scratch, key, scratch);
   __ Addu(scratch,
           scratch,
           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -875,8 +846,8 @@
   Label slow, notin;
   MemOperand mapped_location =
       GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
+  __ Ret(USE_DELAY_SLOT);
   __ lw(v0, mapped_location);
-  __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in a2.
   MemOperand unmapped_location =
@@ -884,8 +855,8 @@
   __ lw(a2, unmapped_location);
   __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
   __ Branch(&slow, eq, a2, Operand(a3));
+  __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a2);
-  __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
 }
@@ -899,21 +870,26 @@
   //  -- lr     : return address
   // -----------------------------------
   Label slow, notin;
+  // Store address is returned in register (of MemOperand) mapped_location.
   MemOperand mapped_location =
       GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
   __ sw(a0, mapped_location);
-  // Verify mapped_location MemOperand is register, with no offset.
+  __ mov(t5, a0);
   ASSERT_EQ(mapped_location.offset(), 0);
-  __ RecordWrite(a3, mapped_location.rm(), t5);
+  __ RecordWrite(a3, mapped_location.rm(), t5,
+                 kRAHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in a3.
+  // Store address is returned in register (of MemOperand) unmapped_location.
   MemOperand unmapped_location =
       GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
   __ sw(a0, unmapped_location);
+  __ mov(t5, a0);
   ASSERT_EQ(unmapped_location.offset(), 0);
-  __ RecordWrite(a3, unmapped_location.rm(), t5);
+  __ RecordWrite(a3, unmapped_location.rm(), t5,
+                 kRAHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
   __ bind(&slow);
@@ -1059,19 +1035,32 @@
   __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
   __ sra(at, t0, String::kHashShift);
   __ xor_(a3, a3, at);
-  __ And(a3, a3, Operand(KeyedLookupCache::kCapacityMask));
+  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+  __ And(a3, a3, Operand(mask));
 
   // Load the key (consisting of map and symbol) from the cache and
   // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
   ExternalReference cache_keys =
       ExternalReference::keyed_lookup_cache_keys(isolate);
   __ li(t0, Operand(cache_keys));
   __ sll(at, a3, kPointerSizeLog2 + 1);
   __ addu(t0, t0, at);
-  __ lw(t1, MemOperand(t0));  // Move t0 to symbol.
-  __ Addu(t0, t0, Operand(kPointerSize));
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
+    __ Branch(&try_next_entry, ne, a2, Operand(t1));
+    __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
+    __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
+    __ bind(&try_next_entry);
+  }
+
+  __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
   __ Branch(&slow, ne, a2, Operand(t1));
-  __ lw(t1, MemOperand(t0));
+  __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
   __ Branch(&slow, ne, a0, Operand(t1));
 
   // Get field offset.
@@ -1081,15 +1070,24 @@
   // a3     : lookup cache index
   ExternalReference cache_field_offsets =
       ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-  __ li(t0, Operand(cache_field_offsets));
-  __ sll(at, a3, kPointerSizeLog2);
-  __ addu(at, t0, at);
-  __ lw(t1, MemOperand(at));
-  __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
-  __ Subu(t1, t1, t2);
-  __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    __ li(t0, Operand(cache_field_offsets));
+    __ sll(at, a3, kPointerSizeLog2);
+    __ addu(at, t0, at);
+    __ lw(t1, MemOperand(at, kPointerSize * i));
+    __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
+    __ Subu(t1, t1, t2);
+    __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+    if (i != 0) {
+      __ Branch(&load_in_object_property);
+    }
+  }
 
   // Load in-object property.
+  __ bind(&load_in_object_property);
   __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
   __ addu(t2, t2, t1);  // Index from start of object.
   __ Subu(a1, a1, Operand(kHeapObjectTag));  // Remove the heap tag.
@@ -1150,14 +1148,12 @@
 
   Register receiver = a1;
   Register index = a0;
-  Register scratch1 = a2;
-  Register scratch2 = a3;
+  Register scratch = a3;
   Register result = v0;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch1,
-                                          scratch2,
+                                          scratch,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -1201,109 +1197,193 @@
   //  -- a2     : receiver
   //  -- ra     : return address
   // -----------------------------------
-
-  Label slow, fast, array, extra, exit;
+  Label slow, array, extra, check_if_double_array;
+  Label fast_object_with_map_check, fast_object_without_map_check;
+  Label fast_double_with_map_check, fast_double_without_map_check;
+  Label transition_smi_elements, finish_object_store, non_double_value;
+  Label transition_double_elements;
 
   // Register usage.
   Register value = a0;
   Register key = a1;
   Register receiver = a2;
-  Register elements = a3;  // Elements array of the receiver.
-  // t0 is used as ip in the arm version.
-  // t3-t4 are used as temporaries.
+  Register receiver_map = a3;
+  Register elements_map = t2;
+  Register elements = t3;  // Elements array of the receiver.
+  // t0 and t1 are used as general scratch registers.
 
   // Check that the key is a smi.
   __ JumpIfNotSmi(key, &slow);
   // Check that the object isn't a smi.
   __ JumpIfSmi(receiver, &slow);
-
   // Get the map of the object.
-  __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
+  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
   __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
   __ Branch(&slow, ne, t0, Operand(zero_reg));
   // Check if the object is a JS array or not.
-  __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
-
-  __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
+  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
   // Check that the object is some kind of JSObject.
-  __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
-  __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
-  __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
 
   // Object case: Check key against length in the elements array.
   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check that the object is in fast mode and writable.
-  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&slow, ne, t3, Operand(t0));
   // Check array bounds. Both the key and the length of FixedArray are smis.
   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&fast, lo, key, Operand(t0));
-  // Fall thru to slow if un-tagged index >= length.
+  __ Branch(&fast_object_with_map_check, lo, key, Operand(t0));
 
   // Slow case, handle jump to runtime.
   __ bind(&slow);
-
   // Entry registers are intact.
   // a0: value.
   // a1: key.
   // a2: receiver.
-
   GenerateRuntimeSetProperty(masm, strict_mode);
 
   // Extra capacity case: Check if there is extra capacity to
   // perform the store and update the length. Used for adding one
   // element to the array by writing to array[array.length].
-
   __ bind(&extra);
+  // Condition code from comparing key and array length is still available.
   // Only support writing to array[array.length].
   __ Branch(&slow, ne, key, Operand(t0));
   // Check for room in the elements backing store.
   // Both the key and the length of FixedArray are smis.
   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
   __ Branch(&slow, hs, key, Operand(t0));
-  // Calculate key + 1 as smi.
-  STATIC_ASSERT(0 == kSmiTag);
-  __ Addu(t3, key, Operand(Smi::FromInt(1)));
-  __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Branch(&fast);
+  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ Branch(
+      &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
 
+  // Calculate key + 1 as smi.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Addu(t0, key, Operand(Smi::FromInt(1)));
+  __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&fast_object_without_map_check);
+
+  __ bind(&check_if_double_array);
+  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+  // Add 1 to key, and go to common element store code for doubles.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Addu(t0, key, Operand(Smi::FromInt(1)));
+  __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
   // is the length is always a smi.
-
   __ bind(&array);
   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&slow, ne, t3, Operand(t0));
 
   // Check the key against the length in the array.
   __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
   __ Branch(&extra, hs, key, Operand(t0));
   // Fall through to fast case.
 
-  __ bind(&fast);
-  // Fast case, store the value to the elements backing store.
-  __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t4, t4, Operand(t1));
-  __ sw(value, MemOperand(t4));
-  // Skip write barrier if the written value is a smi.
-  __ JumpIfSmi(value, &exit);
+  __ bind(&fast_object_with_map_check);
+  Register scratch_value = t0;
+  Register address = t1;
+  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ Branch(&fast_double_with_map_check,
+            ne,
+            elements_map,
+            Heap::kFixedArrayMapRootIndex);
+  __ bind(&fast_object_without_map_check);
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(address, address, scratch_value);
+  __ sw(value, MemOperand(address));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value);
 
+  __ bind(&non_smi_value);
+  // Escape to elements kind transition case.
+  __ CheckFastObjectElements(receiver_map, scratch_value,
+                             &transition_smi_elements);
+  // Fast elements array, store the value to the elements backing store.
+  __ bind(&finish_object_store);
+  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(address, address, scratch_value);
+  __ sw(value, MemOperand(address));
   // Update write barrier for the elements array address.
-  __ Subu(t3, t4, Operand(elements));
-
-  __ RecordWrite(elements, Operand(t3), t4, t5);
-  __ bind(&exit);
-
-  __ mov(v0, a0);  // Return the value written.
+  __ mov(v0, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements,
+                 address,
+                 value,
+                 kRAHasNotBeenSaved,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
   __ Ret();
+
+  __ bind(&fast_double_with_map_check);
+  // Check for fast double array case. If this fails, call through to the
+  // runtime.
+  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value,
+                                 key,
+                                 receiver,
+                                 elements,
+                                 a3,
+                                 t0,
+                                 t1,
+                                 t2,
+                                 &transition_double_elements);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value);
+
+  __ bind(&transition_smi_elements);
+  // Transition the array appropriately depending on the value type.
+  __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  __ Branch(&non_double_value, ne, t0, Operand(at));
+
+  // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_DOUBLE_ELEMENTS,
+                                         receiver_map,
+                                         t0,
+                                         &slow);
+  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
+  ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         receiver_map,
+                                         t0,
+                                         &slow);
+  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
+  ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         receiver_map,
+                                         t0,
+                                         &slow);
+  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
+  ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
 }
 
 
@@ -1382,6 +1462,47 @@
 }
 
 
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- a2     : receiver
+  //  -- a3     : target map
+  //  -- ra     : return address
+  // -----------------------------------
+  // Must return the modified receiver in v0.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(v0, a2);
+    __ bind(&fail);
+  }
+
+  __ push(a2);
+  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+    MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- a2     : receiver
+  //  -- a3     : target map
+  //  -- ra     : return address
+  // -----------------------------------
+  // Must return the modified receiver in v0.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(v0, a2);
+    __ bind(&fail);
+  }
+
+  __ push(a2);
+  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
 void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
                                   StrictModeFlag strict_mode) {
   // ----------- S t a t e -------------
@@ -1395,7 +1516,7 @@
   Code::Flags flags =
       Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, a1, a2, a3, t0, t1);
+      masm, flags, a1, a2, a3, t0, t1, t2);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
@@ -1426,11 +1547,10 @@
   //  -- ra    : return address
   // -----------------------------------
   //
-  // This accepts as a receiver anything JSObject::SetElementsLength accepts
-  // (currently anything except for external and pixel arrays which means
-  // anything with elements of FixedArray type.), but currently is restricted
-  // to JSArray.
-  // Value must be a number, but only smis are accepted as the most common case.
+  // This accepts as a receiver anything JSArray::SetElementsLength accepts
+  // (currently anything except for external arrays which means anything with
+  // elements of FixedArray type).  Value must be a number, but only smis are
+  // accepted as the most common case.
 
   Label miss;
 
@@ -1452,6 +1572,13 @@
   __ GetObjectType(scratch, scratch, scratch);
   __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
 
+  // Check that the array has fast properties, otherwise the length
+  // property might have been redefined.
+  __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
+  __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&miss, eq, scratch, Operand(at));
+
   // Check that value is a smi.
   __ JumpIfNotSmi(value, &miss);
 
@@ -1521,11 +1648,9 @@
     case Token::LT:
       return lt;
     case Token::GT:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return lt;
+      return gt;
     case Token::LTE:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return ge;
+      return le;
     case Token::GTE:
       return ge;
     default:
@@ -1545,6 +1670,9 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
+    if (state == KNOWN_OBJECTS) {
+      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
@@ -1572,7 +1700,8 @@
   // If the instruction following the call is not a andi at, rx, #yyy, nothing
   // was inlined.
   Instr instr = Assembler::instr_at(andi_instruction_address);
-  if (!Assembler::IsAndImmediate(instr)) {
+  if (!(Assembler::IsAndImmediate(instr) &&
+        Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
     return;
   }
 
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
new file mode 100644
index 0000000..6628d8e
--- /dev/null
+++ b/src/mips/lithium-codegen-mips.cc
@@ -0,0 +1,5073 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "mips/lithium-codegen-mips.h"
+#include "mips/lithium-gap-resolver-mips.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deopt_mode_(mode) { }
+  virtual ~SafepointGenerator() { }
+
+  virtual void BeforeCall(int call_size) const { }
+
+  virtual void AfterCall() const {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  HPhase phase("Z_Code generation", chunk());
+  ASSERT(is_unused());
+  status_ = GENERATING;
+  CpuFeatures::Scope scope(FPU);
+
+  CodeStub::GenerateFPStubs();
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  ASSERT(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
+    PrintF("Aborting LCodeGen in @\"%s\": ", *name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  size_t length = builder.position();
+  Vector<char> copy = Vector<char>::New(length + 1);
+  memcpy(copy.start(), builder.Finalize(), copy.length());
+  masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  ASSERT(is_generating());
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop_at");
+  }
+#endif
+
+  // a1: Callee's JS function.
+  // cp: Callee's context.
+  // fp: Caller's frame pointer.
+  // lr: Caller's pc.
+
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). r5 is zero for method calls and non-zero for
+  // function calls.
+  if (!info_->is_classic_mode() || info_->is_native()) {
+    Label ok;
+    __ Branch(&ok, eq, t1, Operand(zero_reg));
+
+    int receiver_offset = scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ sw(a2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+
+  __ Push(ra, fp, cp, a1);
+  __ Addu(fp, sp, Operand(2 * kPointerSize));  // Adj. FP to point to saved FP.
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ li(a0, Operand(slots));
+      __ li(a2, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ push(a2);
+      __ Subu(a0, a0, 1);
+      __ Branch(&loop, ne, a0, Operand(zero_reg));
+    } else {
+      __ Subu(sp, sp, Operand(slots * kPointerSize));
+    }
+  }
+
+  // Possibly allocate a local context.
+  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots > 0) {
+    Comment(";;; Allocate local context");
+    // Argument to NewContext is the function, which is in a1.
+    __ push(a1);
+    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(heap_slots);
+      __ CallStub(&stub);
+    } else {
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
+    }
+    RecordSafepoint(Safepoint::kNoLazyDeopt);
+    // Context is returned in both v0 and cp.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in cp.
+    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ lw(a0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextOperand(cp, var->index());
+        __ sw(a0, target);
+        // Update the write barrier. This clobbers a3 and a0.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  // Trace the call.
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  EnsureSpaceForLazyDeopt();
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+  ASSERT(is_generating());
+  bool emit_instructions = true;
+  for (current_instruction_ = 0;
+       !is_aborted() && current_instruction_ < instructions_->length();
+       current_instruction_++) {
+    LInstruction* instr = instructions_->at(current_instruction_);
+    if (instr->IsLabel()) {
+      LLabel* label = LLabel::cast(instr);
+      emit_instructions = !label->HasReplacement();
+    }
+
+    if (emit_instructions) {
+      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+      instr->CompileToNative(this);
+    }
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  ASSERT(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+      __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
+      code->Generate();
+      __ jmp(code->exit());
+    }
+  }
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+  // TODO(plind): not clear that this will have advantage for MIPS.
+  // Skipping it for now. Raised issue #100 for this.
+  Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
+  return false;
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  ASSERT(is_done());
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  ASSERT(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
+    } else if (r.IsDouble()) {
+      Abort("EmitLoadRegister: Unsupported double immediate.");
+    } else {
+      ASSERT(r.IsTagged());
+      if (literal->IsSmi()) {
+        __ li(scratch, Operand(literal));
+      } else {
+       __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
+      }
+    }
+    return scratch;
+  } else if (op->IsStackSlot() || op->IsArgument()) {
+    __ lw(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  ASSERT(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                                FloatRegister flt_scratch,
+                                                DoubleRegister dbl_scratch) {
+  if (op->IsDoubleRegister()) {
+    return ToDoubleRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
+      __ mtc1(at, flt_scratch);
+      __ cvt_d_w(dbl_scratch, flt_scratch);
+      return dbl_scratch;
+    } else if (r.IsDouble()) {
+      Abort("unsupported double immediate");
+    } else if (r.IsTagged()) {
+      Abort("unsupported tagged immediate");
+    }
+  } else if (op->IsStackSlot() || op->IsArgument()) {
+    MemOperand mem_op = ToMemOperand(op);
+    __ ldc1(dbl_scratch, mem_op);
+    return dbl_scratch;
+  }
+  UNREACHABLE();
+  return dbl_scratch;
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  Handle<Object> literal = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+  return literal;
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+      value->Number());
+  return static_cast<int32_t>(value->Number());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  return value->Number();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      return Operand(static_cast<int32_t>(literal->Number()));
+    } else if (r.IsDouble()) {
+      Abort("ToOperand Unsupported double immediate.");
+    }
+    ASSERT(r.IsTagged());
+    return Operand(literal);
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort("ToOperand IsDoubleRegister unimplemented");
+    return Operand(0);
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand(0);
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  ASSERT(!op->IsRegister());
+  ASSERT(!op->IsDoubleRegister());
+  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return MemOperand(fp, -(index + 3) * kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address.
+    return MemOperand(fp, -(index - 1) * kPointerSize);
+  }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+  ASSERT(op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, context,
+    // and the first word of the double in the fixed part of the frame.
+    return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address and the first word of
+    // the double.
+    return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - environment->parameter_count();
+
+  WriteTranslation(environment->outer(), translation);
+  int closure_id = DefineDeoptimizationLiteral(environment->closure());
+  switch (environment->frame_type()) {
+    case JS_FUNCTION:
+      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+      break;
+    case JS_CONSTRUCT:
+      translation->BeginConstructStubFrame(closure_id, translation_size);
+      break;
+    case ARGUMENTS_ADAPTOR:
+      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (environment->spilled_registers() != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          environment->spilled_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(translation,
+                         environment->spilled_registers()[value->index()],
+                         environment->HasTaggedValueAt(i));
+      } else if (
+          value->IsDoubleRegister() &&
+          environment->spilled_double_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(
+            translation,
+            environment->spilled_double_registers()[value->index()],
+            false);
+      }
+    }
+
+    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+  }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+                                LOperand* op,
+                                bool is_tagged) {
+  if (op == NULL) {
+    // TODO(twuerthinger): Introduce marker operands to indicate that this value
+    // is not present and must be reconstructed from the deoptimizer. Currently
+    // this is only used for the arguments object.
+    translation->StoreArgumentsObject();
+  } else if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsArgument()) {
+    ASSERT(is_tagged);
+    int src_index = GetStackSlotCount() + op->index();
+    translation->StoreStackSlot(src_index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(literal);
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  __ Call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  ASSERT(pointers != NULL);
+  RecordPosition(pointers->position());
+
+  __ CallRuntime(function, num_arguments);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr) {
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count);
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc,
+                            LEnvironment* environment,
+                            Register src1,
+                            const Operand& src2) {
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  ASSERT(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  if (entry == NULL) {
+    Abort("bailout was not prepared");
+    return;
+  }
+
+  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on MIPS.
+
+  if (FLAG_deopt_every_n_times == 1 &&
+      info_->shared_info()->opt_count() == id) {
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    return;
+  }
+
+  if (FLAG_trap_on_deopt) {
+    Label skip;
+    if (cc != al) {
+      __ Branch(&skip, NegateCondition(cc), src1, src2);
+    }
+    __ stop("trap_on_deopt");
+    __ bind(&skip);
+  }
+
+  // TODO(plind): The Arm port is a little different here, due to their
+  // DeOpt jump table, which is not used for Mips yet.
+  __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  Handle<DeoptimizationInputData> data =
+      factory()->NewDeoptimizationInputData(length, TENURED);
+
+  Handle<ByteArray> translations = translations_.CreateByteArray();
+  data->SetTranslationByteArray(*translations);
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+  Handle<FixedArray> literals =
+      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+  for (int i = 0; i < deoptimization_literals_.length(); i++) {
+    literals->set(i, *deoptimization_literals_[i]);
+  }
+  data->SetLiteralArray(*literals);
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, Smi::FromInt(env->ast_id()));
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+    data->SetPc(i, Smi::FromInt(env->pc_offset()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal);
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  ASSERT(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length();
+       i < length;
+       i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  ASSERT(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+      kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
+    }
+  }
+  if (kind & Safepoint::kWithRegisters) {
+    // Register cp always contains a pointer to the context.
+    safepoint.DefinePointerRegister(cp);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(RelocInfo::kNoPosition);
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+    LPointerMap* pointers,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  if (label->is_loop_header()) {
+    Comment(";;; B%d - LOOP entry", label->block_id());
+  } else {
+    Comment(";;; B%d", label->block_id());
+  }
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpConstructResult: {
+      RegExpConstructResultStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::NumberToString: {
+      NumberToStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringAdd: {
+      StringAddStub stub(NO_STRING_ADD_FLAGS);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCompare: {
+      StringCompareStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::TranscendentalCache: {
+      __ lw(a0, MemOperand(sp, 0));
+      TranscendentalCacheStub stub(instr->transcendental_type(),
+                                   TranscendentalCacheStub::TAGGED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  Register scratch = scratch0();
+  const Register left = ToRegister(instr->InputAt(0));
+  const Register result = ToRegister(instr->result());
+
+  Label done;
+
+  if (instr->hydrogen()->HasPowerOf2Divisor()) {
+    Register scratch = scratch0();
+    ASSERT(!left.is(scratch));
+    __ mov(scratch, left);
+    int32_t p2constant = HConstant::cast(
+        instr->hydrogen()->right())->Integer32Value();
+    ASSERT(p2constant != 0);
+    // Result always takes the sign of the dividend (left).
+    p2constant = abs(p2constant);
+
+    Label positive_dividend;
+    __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
+    __ subu(result, zero_reg, left);
+    __ And(result, result, p2constant - 1);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+    }
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ subu(result, zero_reg, result);
+    __ bind(&positive_dividend);
+    __ And(result, scratch, p2constant - 1);
+  } else {
+    // div runs in the background while we check for special cases.
+    Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+    __ div(left, right);
+
+    // Check for x % 0.
+    if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+      DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+    }
+
+    __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
+    __ mfhi(result);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+  const Register left = ToRegister(instr->InputAt(0));
+  const Register right = ToRegister(instr->InputAt(1));
+  const Register result = ToRegister(instr->result());
+
+  // On MIPS div is asynchronous - it will run in the background while we
+  // check for special cases.
+  __ div(left, right);
+
+  // Check for x / 0.
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label left_not_zero;
+    __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
+    DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (-kMinInt / -1).
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    Label left_not_min_int;
+    __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
+    DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+    __ bind(&left_not_min_int);
+  }
+
+  __ mfhi(result);
+  DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+  __ mflo(result);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->InputAt(0));
+  LOperand* right_op = instr->InputAt(1);
+
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+  if (right_op->IsConstantOperand() && !can_overflow) {
+    // Use optimized code for specific constants.
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+    }
+
+    switch (constant) {
+      case -1:
+        __ Subu(result, zero_reg, left);
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+          // If left is strictly negative and the constant is null, the
+          // result is -0. Deoptimize if required, otherwise return 0.
+          DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+        }
+        __ mov(result, zero_reg);
+        break;
+      case 1:
+        // Nothing to do.
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (IsPowerOf2(constant_abs) ||
+            IsPowerOf2(constant_abs - 1) ||
+            IsPowerOf2(constant_abs + 1)) {
+          if (IsPowerOf2(constant_abs)) {
+            int32_t shift = WhichPowerOf2(constant_abs);
+            __ sll(result, left, shift);
+          } else if (IsPowerOf2(constant_abs - 1)) {
+            int32_t shift = WhichPowerOf2(constant_abs - 1);
+            __ sll(result, left, shift);
+            __ Addu(result, result, left);
+          } else if (IsPowerOf2(constant_abs + 1)) {
+            int32_t shift = WhichPowerOf2(constant_abs + 1);
+            __ sll(result, left, shift);
+            __ Subu(result, result, left);
+          }
+
+          // Correct the sign of the result is the constant is negative.
+          if (constant < 0)  {
+            __ Subu(result, zero_reg, result);
+          }
+
+        } else {
+          // Generate standard code.
+          __ li(at, constant);
+          __ Mul(result, left, at);
+        }
+    }
+
+  } else {
+    Register right = EmitLoadRegister(right_op, scratch);
+    if (bailout_on_minus_zero) {
+      __ Or(ToRegister(instr->TempAt(0)), left, right);
+    }
+
+    if (can_overflow) {
+      // hi:lo = left * right.
+      __ mult(left, right);
+      __ mfhi(scratch);
+      __ mflo(result);
+      __ sra(at, result, 31);
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+    } else {
+      __ Mul(result, left, right);
+    }
+
+    if (bailout_on_minus_zero) {
+      // Bail out if the result is supposed to be negative zero.
+      Label done;
+      __ Branch(&done, ne, result, Operand(zero_reg));
+      DeoptimizeIf(lt,
+                   instr->environment(),
+                   ToRegister(instr->TempAt(0)),
+                   Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left_op = instr->InputAt(0);
+  LOperand* right_op = instr->InputAt(1);
+  ASSERT(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+  Operand right(no_reg);
+
+  if (right_op->IsStackSlot() || right_op->IsArgument()) {
+    right = Operand(EmitLoadRegister(right_op, at));
+  } else {
+    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+    right = ToOperand(right_op);
+  }
+
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ And(result, left, right);
+      break;
+    case Token::BIT_OR:
+      __ Or(result, left, right);
+      break;
+    case Token::BIT_XOR:
+      __ Xor(result, left, right);
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->InputAt(1);
+  Register left = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  if (right_op->IsRegister()) {
+    // No need to mask the right operand on MIPS, it is built into the variable
+    // shift instructions.
+    switch (instr->op()) {
+      case Token::SAR:
+        __ srav(result, left, ToRegister(right_op));
+        break;
+      case Token::SHR:
+        __ srlv(result, left, ToRegister(right_op));
+        if (instr->can_deopt()) {
+          DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+        }
+        break;
+      case Token::SHL:
+        __ sllv(result, left, ToRegister(right_op));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sra(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ srl(result, left, shift_count);
+        } else {
+          if (instr->can_deopt()) {
+            __ And(at, left, Operand(0x80000000));
+            DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+          }
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          __ sll(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    if (right->IsStackSlot() || right->IsArgument()) {
+      Register right_reg = EmitLoadRegister(right, at);
+      __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
+    } else {
+      ASSERT(right->IsRegister() || right->IsConstantOperand());
+      __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
+    }
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    if (right->IsStackSlot() ||
+        right->IsArgument() ||
+        right->IsConstantOperand()) {
+      Register right_reg = EmitLoadRegister(right, scratch);
+      __ SubuAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 right_reg,
+                                 overflow);  // Reg at also used as scratch.
+    } else {
+      ASSERT(right->IsRegister());
+      // Due to overflow check macros not supporting constant operands,
+      // handling the IsConstantOperand case was moved to prev if clause.
+      __ SubuAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 ToRegister(right),
+                                 overflow);  // Reg at also used as scratch.
+    }
+    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  ASSERT(instr->result()->IsDoubleRegister());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  double v = instr->value();
+  __ Move(result, v);
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Handle<Object> value = instr->value();
+  if (value->IsSmi()) {
+    __ li(ToRegister(instr->result()), Operand(value));
+  } else {
+    __ LoadHeapObject(ToRegister(instr->result()),
+                      Handle<HeapObject>::cast(value));
+  }
+}
+
+
+void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register array = ToRegister(instr->InputAt(0));
+  __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
+}
+
+
+void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register array = ToRegister(instr->InputAt(0));
+  __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
+}
+
+
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+  Register result = ToRegister(instr->result());
+  Register input = ToRegister(instr->InputAt(0));
+
+  // Load map into |result|.
+  __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
+  // Load the map's "bit field 2" into |result|. We only need the first byte,
+  // but the following bit field extraction takes care of that anyway.
+  __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->TempAt(0));
+  Label done;
+
+  // If the object is a smi return the object.
+  __ Move(result, input);
+  __ JumpIfSmi(input, &done);
+
+  // If the object is not a value type, return the object.
+  __ GetObjectType(input, map, map);
+  __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
+  __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+  Register object = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->TempAt(0));
+  Smi* index = instr->index();
+  Label runtime, done;
+  ASSERT(object.is(a0));
+  ASSERT(result.is(v0));
+  ASSERT(!scratch.is(scratch0()));
+  ASSERT(!scratch.is(object));
+
+#ifdef DEBUG
+  __ AbortIfSmi(object);
+  __ GetObjectType(object, scratch, scratch);
+  __ Assert(eq, "Trying to get date field from non-date.",
+      scratch, Operand(JS_DATE_TYPE));
+#endif
+
+  if (index->value() == 0) {
+    __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ li(scratch, Operand(stamp));
+      __ lw(scratch, MemOperand(scratch));
+      __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
+      __ Branch(&runtime, ne, scratch, Operand(scratch0()));
+      __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
+                                            kPointerSize * index->value()));
+      __ jmp(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2, scratch);
+    __ li(a1, Operand(index));
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  __ Nor(result, zero_reg, Operand(input));
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+  __ push(input_reg);
+  CallRuntime(Runtime::kThrow, 1, instr);
+
+  if (FLAG_debug_code) {
+    __ stop("Unreachable code.");
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    if (right->IsStackSlot() || right->IsArgument()) {
+      Register right_reg = EmitLoadRegister(right, at);
+      __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
+    } else {
+      ASSERT(right->IsRegister() || right->IsConstantOperand());
+      __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
+    }
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    if (right->IsStackSlot() ||
+        right->IsArgument() ||
+        right->IsConstantOperand()) {
+      Register right_reg = EmitLoadRegister(right, scratch);
+      __ AdduAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 right_reg,
+                                 overflow);  // Reg at also used as scratch.
+    } else {
+      ASSERT(right->IsRegister());
+      // Due to overflow check macros not supporting constant operands,
+      // handling the IsConstantOperand case was moved to prev if clause.
+      __ AdduAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 ToRegister(right),
+                                 overflow);  // Reg at also used as scratch.
+    }
+    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ add_d(result, left, right);
+      break;
+    case Token::SUB:
+      __ sub_d(result, left, right);
+      break;
+    case Token::MUL:
+      __ mul_d(result, left, right);
+      break;
+    case Token::DIV:
+      __ div_d(result, left, right);
+      break;
+    case Token::MOD: {
+      // Save a0-a3 on the stack.
+      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+      __ MultiPush(saved_regs);
+
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ SetCallCDoubleArguments(left, right);
+      __ CallCFunction(
+          ExternalReference::double_fp_operation(Token::MOD, isolate()),
+          0, 2);
+      // Move the result in the double result register.
+      __ GetCFunctionDoubleResult(result);
+
+      // Restore saved register.
+      __ MultiPop(saved_regs);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+  ASSERT(ToRegister(instr->InputAt(1)).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  // Other arch use a nop here, to signal that there is no inlined
+  // patchable code. Mips does not need the nop, since our marker
+  // instruction (andi zero_reg) will never be used in normal code.
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+    LLabel* label = chunk_->GetLabel(i);
+    if (!label->HasReplacement()) return i;
+  }
+  return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block,
+                          Condition cc, Register src1, const Operand& src2) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(right_block),
+              NegateCondition(cc), src1, src2);
+  } else if (right_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+  } else {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::EmitBranchF(int left_block, int right_block,
+                           Condition cc, FPURegister src1, FPURegister src2) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
+               NegateCondition(cc), src1, src2);
+  } else if (right_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+  } else {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsInteger32()) {
+    Register reg = ToRegister(instr->InputAt(0));
+    EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+  } else if (r.IsDouble()) {
+    DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
+    // Test the double value. Zero and NaN are false.
+    EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
+  } else {
+    ASSERT(r.IsTagged());
+    Register reg = ToRegister(instr->InputAt(0));
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      __ LoadRoot(at, Heap::kTrueValueRootIndex);
+      EmitBranch(true_block, false_block, eq, reg, Operand(at));
+    } else if (type.IsSmi()) {
+      EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+    } else {
+      Label* true_label = chunk_->GetAssemblyLabel(true_block);
+      Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+        __ Branch(false_label, eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ LoadRoot(at, Heap::kTrueValueRootIndex);
+        __ Branch(true_label, eq, reg, Operand(at));
+        __ LoadRoot(at, Heap::kFalseValueRootIndex);
+        __ Branch(false_label, eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ LoadRoot(at, Heap::kNullValueRootIndex);
+        __ Branch(false_label, eq, reg, Operand(at));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ Branch(false_label, eq, reg, Operand(zero_reg));
+        __ JumpIfSmi(reg, true_label);
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ And(at, reg, Operand(kSmiTagMask));
+        DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ And(at, at, Operand(1 << Map::kIsUndetectable));
+          __ Branch(false_label, ne, at, Operand(zero_reg));
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
+        __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
+        __ Branch(true_label, ne, at, Operand(zero_reg));
+        __ Branch(false_label);
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        DoubleRegister dbl_scratch = double_scratch0();
+        Label not_heap_number;
+        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+        __ Branch(&not_heap_number, ne, map, Operand(at));
+        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+        __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
+        // Falls through if dbl_scratch == 0.
+        __ Branch(false_label);
+        __ bind(&not_heap_number);
+      }
+
+      // We've seen something for the first time -> deopt.
+      DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  block = chunk_->LookupDestination(block);
+  int next_block = GetNextEmittedBlock(current_block_);
+  if (block != next_block) {
+    __ jmp(chunk_->GetAssemblyLabel(block));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = kNoCondition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? lo : lt;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? hi : gt;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? ls : le;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? hs : ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  Condition cond = TokenToCondition(instr->op(), false);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block =
+      EvalComparison(instr->op(), left_val, right_val) ? true_block
+                                                       : false_block;
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Compare left and right as doubles and load the
+      // resulting flags into the normal status register.
+      FPURegister left_reg = ToDoubleRegister(left);
+      FPURegister right_reg = ToDoubleRegister(right);
+
+      // If a NaN is involved, i.e. the result is unordered,
+      // jump to false block label.
+      __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
+                 left_reg, right_reg);
+
+      EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
+    } else {
+      Register cmp_left;
+      Operand cmp_right = Operand(0);
+
+      if (right->IsConstantOperand()) {
+        cmp_left = ToRegister(left);
+        cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
+      } else if (left->IsConstantOperand()) {
+        cmp_left = ToRegister(right);
+        cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
+        // We transposed the operands. Reverse the condition.
+        cond = ReverseCondition(cond);
+      } else {
+        cmp_left = ToRegister(left);
+        cmp_right = Operand(ToRegister(right));
+      }
+
+      EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
+    }
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  Register right = ToRegister(instr->InputAt(1));
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  EmitBranch(true_block, false_block, eq, left, Operand(right));
+}
+
+
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  EmitBranch(true_block, false_block, eq, left,
+             Operand(instr->hydrogen()->right()));
+}
+
+
+
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->InputAt(0));
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
+  if (instr->hydrogen()->representation().IsSpecialization() ||
+      instr->hydrogen()->type().IsSmi()) {
+    EmitGoto(false_block);
+    return;
+  }
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ LoadRoot(at, nil_value);
+  if (instr->kind() == kStrictEquality) {
+    EmitBranch(true_block, false_block, eq, reg, Operand(at));
+  } else {
+    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
+    Label* true_label = chunk_->GetAssemblyLabel(true_block);
+    Label* false_label = chunk_->GetAssemblyLabel(false_block);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
+    __ LoadRoot(at, other_nil_value);  // In the delay slot.
+    __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
+    __ JumpIfSmi(reg, false_label);  // In the delay slot.
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ And(scratch, scratch, 1 << Map::kIsUndetectable);
+    EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
+  }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+                                 Register temp1,
+                                 Register temp2,
+                                 Label* is_not_object,
+                                 Label* is_object) {
+  __ JumpIfSmi(input, is_not_object);
+
+  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+  __ Branch(is_object, eq, input, Operand(temp2));
+
+  // Load map.
+  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined.
+  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
+  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
+
+  // Load instance type and check that it is in object type range.
+  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+  __ Branch(is_not_object,
+            lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+
+  return le;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp1 = ToRegister(instr->TempAt(0));
+  Register temp2 = scratch0();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond =
+      EmitIsObject(reg, temp1, temp2, false_label, true_label);
+
+  EmitBranch(true_block, false_block, true_cond, temp2,
+             Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+  __ GetObjectType(input, temp1, temp1);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp1 = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond =
+      EmitIsString(reg, temp1, false_label);
+
+  EmitBranch(true_block, false_block, true_cond, temp1,
+             Operand(FIRST_NONSTRING_TYPE));
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+  __ And(at, input_reg, kSmiTagMask);
+  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
+  EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+
+  EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  ASSERT(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return hs;
+  if (from == FIRST_TYPE) return ls;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->InputAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  __ JumpIfSmi(input, false_label);
+
+  __ GetObjectType(input, scratch, scratch);
+  EmitBranch(true_block,
+             false_block,
+             BranchCondition(instr->hydrogen()),
+             scratch,
+             Operand(TestType(instr->hydrogen())));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  if (FLAG_debug_code) {
+    __ AbortIfNotString(input);
+  }
+
+  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ lw(scratch,
+         FieldMemOperand(input, String::kHashFieldOffset));
+  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
+  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+}
+
+
+// Branches to a label or falls through with the answer in flags.  Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  ASSERT(!input.is(temp));
+  ASSERT(!input.is(temp2));
+  ASSERT(!temp.is(temp2));
+
+  __ JumpIfSmi(input, is_false);
+
+  if (class_name->IsEqualTo(CStrVector("Function"))) {
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+    __ GetObjectType(input, temp, temp2);
+    __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+    __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+    __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+  } else {
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ GetObjectType(input, temp, temp2);
+    __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  }
+
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+  // Check if the constructor in the map is a function.
+  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ GetObjectType(temp, temp2, temp2);
+  if (class_name->IsEqualTo(CStrVector("Object"))) {
+    __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
+  } else {
+    __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(temp, FieldMemOperand(temp,
+                               SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is a symbol because it's a literal.
+  // The name in the constructor is a symbol because of the way the context is
+  // booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are symbols it is sufficient to use an identity
+  // comparison.
+
+  // End with the address of this class_name instance in temp register.
+  // On MIPS, the caller must do the comparison with Handle<String>class_name.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->TempAt(0));
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+
+  EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+  int true_block = instr->true_block_id();
+  int false_block = instr->false_block_id();
+
+  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+  EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  Label true_label, done;
+  ASSERT(ToRegister(instr->InputAt(0)).is(a0));  // Object is in a0.
+  ASSERT(ToRegister(instr->InputAt(1)).is(a1));  // Function is in a1.
+  Register result = ToRegister(instr->result());
+  ASSERT(result.is(v0));
+
+  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+  __ Branch(&true_label, eq, result, Operand(zero_reg));
+  __ li(result, Operand(factory()->false_value()));
+  __ Branch(&done);
+  __ bind(&true_label);
+  __ li(result, Operand(factory()->true_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+   public:
+    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+                                  LInstanceOfKnownGlobal* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+    }
+    virtual LInstruction* instr() { return instr_; }
+    Label* map_check() { return &map_check_; }
+
+   private:
+    LInstanceOfKnownGlobal* instr_;
+    Label map_check_;
+  };
+
+  DeferredInstanceOfKnownGlobal* deferred;
+  deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+  Label done, false_result;
+  Register object = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+  Register result = ToRegister(instr->result());
+
+  ASSERT(object.is(a0));
+  ASSERT(result.is(v0));
+
+  // A Smi is not instance of anything.
+  __ JumpIfSmi(object, &false_result);
+
+  // This is the inlined call site instanceof cache. The two occurences of the
+  // hole value will be patched to the last map/result pair generated by the
+  // instanceof stub.
+  Label cache_miss;
+  Register map = temp;
+  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
+
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  __ bind(deferred->map_check());  // Label for calculating code patching.
+  // We use Factory::the_hole_value() on purpose instead of loading from the
+  // root array to force relocation to be able to later patch with
+  // the cached map.
+  Handle<JSGlobalPropertyCell> cell =
+      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+  __ li(at, Operand(Handle<Object>(cell)));
+  __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+  __ Branch(&cache_miss, ne, map, Operand(at));
+  // We use Factory::the_hole_value() on purpose instead of loading from the
+  // root array to force relocation to be able to later patch
+  // with true or false.
+  __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
+  __ Branch(&done);
+
+  // The inlined call site cache did not match. Check null and string before
+  // calling the deferred code.
+  __ bind(&cache_miss);
+  // Null is not instance of anything.
+  __ LoadRoot(temp, Heap::kNullValueRootIndex);
+  __ Branch(&false_result, eq, object, Operand(temp));
+
+  // String values is not instance of anything.
+  Condition cc = __ IsObjectStringType(object, temp, temp);
+  __ Branch(&false_result, cc, temp, Operand(zero_reg));
+
+  // Go to the deferred code.
+  __ Branch(deferred->entry());
+
+  __ bind(&false_result);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+  // Here result has either true or false. Deferred code also produces true or
+  // false object.
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                               Label* map_check) {
+  Register result = ToRegister(instr->result());
+  ASSERT(result.is(v0));
+
+  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kArgsInRegisters);
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kCallSiteInlineCheck);
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kReturnTrueFalseObject);
+  InstanceofStub stub(flags);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+  // Get the temp register reserved by the instruction. This needs to be t0 as
+  // its slot of the pushing of safepoint registers is used to communicate the
+  // offset to the location of the map check.
+  Register temp = ToRegister(instr->TempAt(0));
+  ASSERT(temp.is(t0));
+  __ LoadHeapObject(InstanceofStub::right(), instr->function());
+  static const int kAdditionalDelta = 7;
+  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+  Label before_push_delta;
+  __ bind(&before_push_delta);
+  {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
+    __ StoreToSafepointRegisterSlot(temp, temp);
+  }
+  CallCodeGeneric(stub.GetCode(),
+                  RelocInfo::CODE_TARGET,
+                  instr,
+                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LEnvironment* env = instr->deoptimization_environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+  // Put the result value into the result register slot and
+  // restore all registers.
+  __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // On MIPS there is no need for a "no inlined smi code" marker (nop).
+
+  Condition condition = ComputeCompareCondition(op);
+  // A minor optimization that relies on LoadRoot always emitting one
+  // instruction.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+  Label done;
+  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+  ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in v0.
+    __ push(v0);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+  __ mov(sp, fp);
+  __ Pop(ra, fp);
+  __ Addu(sp, sp, Operand(sp_delta));
+  __ Jump(ra);
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+  Register result = ToRegister(instr->result());
+  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
+  __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+  }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  __ li(a2, Operand(instr->name()));
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
+                                             : RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+  Register value = ToRegister(instr->value());
+  Register cell = scratch0();
+
+  // Load the cell.
+  __ li(cell, Operand(instr->hydrogen()->cell()));
+
+  // If the cell we are storing to contains the hole it could have
+  // been deleted from the property dictionary. In that case, we need
+  // to update the property details in the property dictionary to mark
+  // it as no longer deleted.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    // We use a temp to check the payload.
+    Register payload = ToRegister(instr->TempAt(0));
+    __ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
+  }
+
+  // Store the value.
+  __ sw(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+  // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(a1));
+  ASSERT(ToRegister(instr->value()).is(a0));
+
+  __ li(a2, Operand(instr->name()));
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+
+  __ lw(result, ContextOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+    } else {
+      Label is_not_hole;
+      __ Branch(&is_not_hole, ne, result, Operand(at));
+      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+      __ bind(&is_not_hole);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  MemOperand target = ContextOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ lw(scratch, target);
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
+    } else {
+      __ Branch(&skip_assignment, ne, scratch, Operand(at));
+    }
+  }
+
+  __ sw(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context,
+                              target.offset(),
+                              value,
+                              scratch0(),
+                              kRAHasBeenSaved,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  Register object = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  if (instr->hydrogen()->is_in_object()) {
+    __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
+  } else {
+    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
+  }
+}
+
+
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+                                               Register object,
+                                               Handle<Map> type,
+                                               Handle<String> name) {
+  LookupResult lookup(isolate());
+  type->LookupInDescriptors(NULL, *name, &lookup);
+  ASSERT(lookup.IsFound() &&
+         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+  if (lookup.type() == FIELD) {
+    int index = lookup.GetLocalFieldIndexFromMap(*type);
+    int offset = index * kPointerSize;
+    if (index < 0) {
+      // Negative property indices are in-object properties, indexed
+      // from the end of the fixed part of the object.
+      __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
+    } else {
+      // Non-negative property indices are in the properties array.
+      __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+      __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+    }
+  } else {
+    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+    __ LoadHeapObject(result, function);
+  }
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+  Register object = ToRegister(instr->object());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  int map_count = instr->hydrogen()->types()->length();
+  Handle<String> name = instr->hydrogen()->name();
+  if (map_count == 0) {
+    ASSERT(instr->hydrogen()->need_generic());
+    __ li(a2, Operand(name));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    Label done;
+    __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+    for (int i = 0; i < map_count - 1; ++i) {
+      Handle<Map> map = instr->hydrogen()->types()->at(i);
+      Label next;
+      __ Branch(&next, ne, scratch, Operand(map));
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ Branch(&done);
+      __ bind(&next);
+    }
+    Handle<Map> map = instr->hydrogen()->types()->last();
+    if (instr->hydrogen()->need_generic()) {
+      Label generic;
+      __ Branch(&generic, ne, scratch, Operand(map));
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ Branch(&done);
+      __ bind(&generic);
+      __ li(a2, Operand(name));
+      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+      CallCode(ic, RelocInfo::CODE_TARGET, instr);
+    } else {
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  // Name is always in a2.
+  __ li(a2, Operand(instr->name()));
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register scratch = scratch0();
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Check that the function really is a function. Load map into the
+  // result register.
+  __ GetObjectType(function, result, scratch);
+  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Make sure that the function has an instance prototype.
+  Label non_instance;
+  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
+
+  // Get the prototype or initial map from the function.
+  __ lw(result,
+         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ GetObjectType(result, scratch, scratch);
+  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+  // Get the prototype from the initial map.
+  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
+  __ Branch(&done);
+
+  // Non-instance prototype: Fetch prototype from constructor field
+  // in initial map.
+  __ bind(&non_instance);
+  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+  Register result = ToRegister(instr->result());
+  Register input = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+
+  __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
+  if (FLAG_debug_code) {
+    Label done, fail;
+    __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
+    __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);  // In the delay slot.
+    __ Branch(&done, eq, scratch, Operand(at));
+    // |scratch| still contains |input|'s map.
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+    __ Ext(scratch, scratch, Map::kElementsKindShift,
+           Map::kElementsKindBitCount);
+    __ Branch(&done, eq, scratch,
+              Operand(FAST_ELEMENTS));
+    __ Branch(&fail, lt, scratch,
+              Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+    __ Branch(&done, le, scratch,
+              Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+    __ bind(&fail);
+    __ Abort("Check for fast or external elements failed.");
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadExternalArrayPointer(
+    LLoadExternalArrayPointer* instr) {
+  Register to_reg = ToRegister(instr->result());
+  Register from_reg  = ToRegister(instr->InputAt(0));
+  __ lw(to_reg, FieldMemOperand(from_reg,
+                                ExternalArray::kExternalPointerOffset));
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register length = ToRegister(instr->length());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+
+  // Bailout index is not a valid argument index. Use unsigned check to get
+  // negative check for free.
+
+  // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
+  // as they do in Arm. It will save us an instruction.
+  DeoptimizeIf(ls, instr->environment(), length, Operand(index));
+
+  // There are two words between the frame pointer and the last argument.
+  // Subtracting from length accounts for one of them, add one more.
+  __ subu(length, length, index);
+  __ Addu(length, length, Operand(1));
+  __ sll(length, length, kPointerSizeLog2);
+  __ Addu(at, arguments, Operand(length));
+  __ lw(result, MemOperand(at, 0));
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register key = EmitLoadRegister(instr->key(), scratch0());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Load the result.
+  __ sll(scratch, key, kPointerSizeLog2);  // Key indexes words.
+  __ addu(scratch, elements, scratch);
+  __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+
+  // Check for the hole value.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+    LLoadKeyedFastDoubleElement* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int shift_size =
+      ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+
+  if (key_is_constant) {
+    __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
+            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  } else {
+    __ sll(scratch, key, shift_size);
+    __ Addu(elements, elements, Operand(scratch));
+    __ Addu(elements, elements,
+            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  }
+
+  __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+  DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+
+  __ ldc1(result, MemOperand(elements));
+}
+
+
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+    LLoadKeyedSpecializedArrayElement* instr) {
+  Register external_pointer = ToRegister(instr->external_pointer());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    FPURegister result = ToDoubleRegister(instr->result());
+    if (key_is_constant) {
+      __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch0(), key, shift_size);
+      __ Addu(scratch0(), scratch0(), external_pointer);
+    }
+
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+      __ lwc1(result, MemOperand(scratch0()));
+      __ cvt_d_s(result, result);
+    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ ldc1(result, MemOperand(scratch0()));
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    Register scratch = scratch0();
+    MemOperand mem_operand(zero_reg);
+    if (key_is_constant) {
+      mem_operand = MemOperand(external_pointer,
+                               constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch, key, shift_size);
+      __ Addu(scratch, scratch, external_pointer);
+      mem_operand = MemOperand(scratch);
+    }
+    switch (elements_kind) {
+      case EXTERNAL_BYTE_ELEMENTS:
+        __ lb(result, mem_operand);
+        break;
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+        __ lbu(result, mem_operand);
+        break;
+      case EXTERNAL_SHORT_ELEMENTS:
+        __ lh(result, mem_operand);
+        break;
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ lhu(result, mem_operand);
+        break;
+      case EXTERNAL_INT_ELEMENTS:
+        __ lw(result, mem_operand);
+        break;
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ lw(result, mem_operand);
+        // TODO(danno): we could be more clever here, perhaps having a special
+        // version of the stub that detects if the overflow case actually
+        // happens, and generate code that returns a double rather than int.
+        DeoptimizeIf(Ugreater_equal, instr->environment(),
+            result, Operand(0x80000000));
+        break;
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a1));
+  ASSERT(ToRegister(instr->key()).is(a0));
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register scratch = scratch0();
+  Register temp = scratch1();
+  Register result = ToRegister(instr->result());
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label done, adapted;
+  __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Result is the frame pointer for the frame if not adapted and for the real
+  // frame below the adaptor frame if adapted.
+  __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
+  __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elem = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
+  __ Branch(&done, eq, fp, Operand(elem));
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(result,
+        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register scratch = scratch0();
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, receiver_ok;
+
+  // Do not transform the receiver to object for strict mode
+  // functions.
+  __ lw(scratch,
+         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(scratch,
+         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+
+  // Do not transform the receiver to object for builtins.
+  int32_t strict_mode_function_mask =
+                  1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
+  __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+
+  // Deoptimize if the receiver is not a JS object.
+  __ And(scratch, receiver, Operand(kSmiTagMask));
+  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+
+  __ GetObjectType(receiver, scratch, scratch);
+  DeoptimizeIf(lt, instr->environment(),
+               scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ Branch(&receiver_ok);
+
+  __ bind(&global_object);
+  __ lw(receiver, GlobalObjectOperand());
+  __ lw(receiver,
+         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+  __ bind(&receiver_ok);
+}
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  ASSERT(receiver.is(a0));  // Used for parameter count.
+  ASSERT(function.is(a1));  // Required by InvokeFunction.
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ Move(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ Addu(elements, elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
+  __ sll(scratch, length, 2);
+  __ bind(&loop);
+  __ Addu(scratch, elements, scratch);
+  __ lw(scratch, MemOperand(scratch));
+  __ push(scratch);
+  __ Subu(length, length, Operand(1));
+  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
+  __ sll(scratch, length, 2);
+
+  __ bind(&invoke);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  // The number of arguments is stored in receiver which is a0, as expected
+  // by InvokeFunction.
+  ParameterCount actual(receiver);
+  __ InvokeFunction(function, actual, CALL_FUNCTION,
+                    safepoint_generator, CALL_AS_METHOD);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->InputAt(0);
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort("DoPushArgument not implemented for double type.");
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, at);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadHeapObject(result, instr->hydrogen()->closure());
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, cp);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ lw(result,
+        MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
+  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
+  // The context is the first argument.
+  __ Push(cp, scratch0(), scratch1());
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Register global = ToRegister(instr->global());
+  Register result = ToRegister(instr->result());
+  __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int arity,
+                                 LInstruction* instr,
+                                 CallKind call_kind) {
+  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+      function->shared()->formal_parameter_count() == arity;
+
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+
+  if (can_invoke_directly) {
+    __ LoadHeapObject(a1, function);
+    // Change context if needed.
+    bool change_context =
+        (info()->closure()->context() != function->context()) ||
+        scope()->contains_with() ||
+        (scope()->num_heap_slots() > 0);
+    if (change_context) {
+      __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    }
+
+    // Set r0 to arguments count if adaption is not needed. Assumes that r0
+    // is available to write to at this point.
+    if (!function->NeedsArgumentsAdaption()) {
+      __ li(a0, Operand(arity));
+    }
+
+    // Invoke function.
+    __ SetCallKind(t1, call_kind);
+    __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+    __ Call(at);
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+  }
+
+  // Restore context.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  __ mov(a0, v0);
+  CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Deoptimize if not a heap number.
+  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+
+  Label done;
+  Register exponent = scratch0();
+  scratch = no_reg;
+  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it.
+  __ Move(result, input);
+  __ And(at, exponent, Operand(HeapNumber::kSignMask));
+  __ Branch(&done, eq, at, Operand(zero_reg));
+
+  // Input is negative. Reverse its sign.
+  // Preserve the value of all registers.
+  {
+    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(a1) ? a0 : a1;
+    Register tmp2 = input.is(a2) ? a0 : a2;
+    Register tmp3 = input.is(a3) ? a0 : a3;
+    Register tmp4 = input.is(t0) ? a0 : t0;
+
+    // exponent: floating point exponent value.
+
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ Branch(&allocated);
+
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
+
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(v0))
+      __ mov(tmp1, v0);
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
+    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+    __ StoreToSafepointRegisterSlot(tmp1, result);
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  Label done;
+  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+  __ mov(result, input);
+  ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
+  __ subu(result, zero_reg, input);
+  // Overflow if result is still negative, i.e. 0x80000000.
+  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+                                    LUnaryMathOperation* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LUnaryMathOperation* instr_;
+  };
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    FPURegister input = ToDoubleRegister(instr->InputAt(0));
+    FPURegister result = ToDoubleRegister(instr->result());
+    __ abs_d(result, input);
+  } else if (r.IsInteger32()) {
+    EmitIntegerMathAbs(instr);
+  } else {
+    // Representation is tagged.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input = ToRegister(instr->InputAt(0));
+    // Smi check.
+    __ JumpIfNotSmi(input, deferred->entry());
+    // If smi, handle it directly.
+    EmitIntegerMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  FPURegister single_scratch = double_scratch0().low();
+  Register scratch1 = scratch0();
+  Register except_flag = ToRegister(instr->TempAt(0));
+
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     single_scratch,
+                     input,
+                     scratch1,
+                     except_flag);
+
+  // Deopt if the operation did not succeed.
+  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+  // Load the result.
+  __ mfc1(result, single_scratch);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    Label done;
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ mfc1(scratch1, input.high());
+    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  Label done, check_sign_on_zero;
+
+  // Extract exponent bits.
+  __ mfc1(result, input.high());
+  __ Ext(scratch,
+         result,
+         HeapNumber::kExponentShift,
+         HeapNumber::kExponentBits);
+
+  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+  Label skip1;
+  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
+  __ mov(result, zero_reg);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Branch(&check_sign_on_zero);
+  } else {
+    __ Branch(&done);
+  }
+  __ bind(&skip1);
+
+  // The following conversion will not work with numbers
+  // outside of ]-2^32, 2^32[.
+  DeoptimizeIf(ge, instr->environment(), scratch,
+               Operand(HeapNumber::kExponentBias + 32));
+
+  // Save the original sign for later comparison.
+  __ And(scratch, result, Operand(HeapNumber::kSignMask));
+
+  __ Move(double_scratch0(), 0.5);
+  __ add_d(double_scratch0(), input, double_scratch0());
+
+  // Check sign of the result: if the sign changed, the input
+  // value was in ]0.5, 0[ and the result should be -0.
+  __ mfc1(result, double_scratch0().high());
+  __ Xor(result, result, Operand(scratch));
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // ARM uses 'mi' here, which is 'lt'
+    DeoptimizeIf(lt, instr->environment(), result,
+                 Operand(zero_reg));
+  } else {
+    Label skip2;
+    // ARM uses 'mi' here, which is 'lt'
+    // Negating it results in 'ge'
+    __ Branch(&skip2, ge, result, Operand(zero_reg));
+    __ mov(result, zero_reg);
+    __ Branch(&done);
+    __ bind(&skip2);
+  }
+
+  Register except_flag = scratch;
+
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     double_scratch0().low(),
+                     double_scratch0(),
+                     result,
+                     except_flag);
+
+  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+  __ mfc1(result, double_scratch0().low());
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ bind(&check_sign_on_zero);
+    __ mfc1(scratch, input.high());
+    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ sqrt_d(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+
+  ASSERT(!input.is(result));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done;
+  __ Move(temp, -V8_INFINITY);
+  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
+  // Set up Infinity in the delay slot.
+  // result is overwritten if the branch is not taken.
+  __ neg_d(result, temp);
+
+  // Add +0 to convert -0 to +0.
+  __ add_d(result, input, kDoubleRegZero);
+  __ sqrt_d(result, result);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+         ToDoubleRegister(instr->InputAt(1)).is(f4));
+  ASSERT(!instr->InputAt(1)->IsRegister() ||
+         ToRegister(instr->InputAt(1)).is(a2));
+  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
+  ASSERT(ToDoubleRegister(instr->result()).is(f0));
+
+  if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(a2, &no_deopt);
+    __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
+    DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
+    __ bind(&no_deopt);
+    MathPowStub stub(MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    ASSERT(exponent_type.IsDouble());
+    MathPowStub stub(MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+
+void LCodeGen::DoRandom(LRandom* instr) {
+  class DeferredDoRandom: public LDeferredCode {
+   public:
+    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LRandom* instr_;
+  };
+
+  DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+  // Having marked this instruction as a call we can use any
+  // registers.
+  ASSERT(ToDoubleRegister(instr->result()).is(f0));
+  ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+
+  static const int kSeedSize = sizeof(uint32_t);
+  STATIC_ASSERT(kPointerSize == kSeedSize);
+
+  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+  static const int kRandomSeedOffset =
+      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
+  __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
+  // a2: FixedArray of the global context's random seeds
+
+  // Load state[0].
+  __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
+  __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
+  // Load state[1].
+  __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
+  // a1: state[0].
+  // a0: state[1].
+
+  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
+  __ And(a3, a1, Operand(0xFFFF));
+  __ li(t0, Operand(18273));
+  __ mul(a3, a3, t0);
+  __ srl(a1, a1, 16);
+  __ Addu(a1, a3, a1);
+  // Save state[0].
+  __ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
+
+  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
+  __ And(a3, a0, Operand(0xFFFF));
+  __ li(t0, Operand(36969));
+  __ mul(a3, a3, t0);
+  __ srl(a0, a0, 16),
+  __ Addu(a0, a3, a0);
+  // Save state[1].
+  __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
+
+  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
+  __ And(a0, a0, Operand(0x3FFFF));
+  __ sll(a1, a1, 14);
+  __ Addu(v0, a0, a1);
+
+  __ bind(deferred->exit());
+
+  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+  __ li(a2, Operand(0x41300000));
+  // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
+  __ Move(f12, v0, a2);
+  // Move 0x4130000000000000 to FPU.
+  __ Move(f14, zero_reg, a2);
+  // Subtract to get the result.
+  __ sub_d(f0, f12, f14);
+}
+
+void LCodeGen::DoDeferredRandom(LRandom* instr) {
+  __ PrepareCallCFunction(1, scratch0());
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+  // Return value is in v0.
+}
+
+
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathAbs:
+      DoMathAbs(instr);
+      break;
+    case kMathFloor:
+      DoMathFloor(instr);
+      break;
+    case kMathRound:
+      DoMathRound(instr);
+      break;
+    case kMathSqrt:
+      DoMathSqrt(instr);
+      break;
+    case kMathPowHalf:
+      DoMathPowHalf(instr);
+      break;
+    case kMathCos:
+      DoMathCos(instr);
+      break;
+    case kMathSin:
+      DoMathSin(instr);
+      break;
+    case kMathTan:
+      DoMathTan(instr);
+      break;
+    case kMathLog:
+      DoMathLog(instr);
+      break;
+    default:
+      Abort("Unimplemented type of LUnaryMathOperation.");
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(a1));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
+  __ li(a2, Operand(instr->name()));
+  CallCode(ic, mode, instr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(a1));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
+  __ li(a2, Operand(instr->name()));
+  CallCode(ic, mode, instr);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+  __ li(a0, Operand(instr->arity()));
+  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Register object = ToRegister(instr->object());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  int offset = instr->offset();
+
+  ASSERT(!object.is(value));
+
+  if (!instr->transition().is_null()) {
+    __ li(scratch, Operand(instr->transition()));
+    __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  }
+
+  // Do the store.
+  HType type = instr->hydrogen()->value()->type();
+  SmiCheck check_needed =
+      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  if (instr->is_in_object()) {
+    __ sw(value, FieldMemOperand(object, offset));
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
+      // Update the write barrier for the object for in-object properties.
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          scratch,
+                          kRAHasBeenSaved,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
+    }
+  } else {
+    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    __ sw(value, FieldMemOperand(scratch, offset));
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
+      // Update the write barrier for the properties array.
+      // object is used as a scratch register.
+      __ RecordWriteField(scratch,
+                          offset,
+                          value,
+                          object,
+                          kRAHasBeenSaved,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a1));
+  ASSERT(ToRegister(instr->value()).is(a0));
+
+  // Name is always in a2.
+  __ li(a2, Operand(instr->name()));
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  DeoptimizeIf(hs,
+               instr->environment(),
+               ToRegister(instr->index()),
+               Operand(ToRegister(instr->length())));
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->object());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+  Register scratch = scratch0();
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    int offset =
+        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+    __ sw(value, FieldMemOperand(elements, offset));
+  } else {
+    __ sll(scratch, key, kPointerSizeLog2);
+    __ addu(scratch, elements, scratch);
+    __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+  }
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   kRAHasBeenSaved,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+    LStoreKeyedFastDoubleElement* instr) {
+  DoubleRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = no_reg;
+  Register scratch = scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  Label not_nan;
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  if (key_is_constant) {
+    __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
+            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  } else {
+    __ sll(scratch, key, shift_size);
+    __ Addu(scratch, elements, Operand(scratch));
+    __ Addu(scratch, scratch,
+            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  }
+
+  Label is_nan;
+  // Check for NaN. All NaNs must be canonicalized.
+  __ BranchF(NULL, &is_nan, eq, value, value);
+  __ Branch(&not_nan);
+
+  // Only load canonical NaN if the comparison above set the overflow.
+  __ bind(&is_nan);
+  __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+  __ bind(&not_nan);
+  __ sdc1(value, MemOperand(scratch));
+}
+
+
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+    LStoreKeyedSpecializedArrayElement* instr) {
+
+  Register external_pointer = ToRegister(instr->external_pointer());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    FPURegister value(ToDoubleRegister(instr->value()));
+    if (key_is_constant) {
+      __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch0(), key, shift_size);
+      __ Addu(scratch0(), scratch0(), external_pointer);
+    }
+
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+      __ cvt_s_d(double_scratch0(), value);
+      __ swc1(double_scratch0(), MemOperand(scratch0()));
+    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ sdc1(value, MemOperand(scratch0()));
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    MemOperand mem_operand(zero_reg);
+    Register scratch = scratch0();
+    if (key_is_constant) {
+      mem_operand = MemOperand(external_pointer,
+                               constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch, key, shift_size);
+      __ Addu(scratch, scratch, external_pointer);
+      mem_operand = MemOperand(scratch);
+    }
+    switch (elements_kind) {
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+        __ sb(value, mem_operand);
+        break;
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ sh(value, mem_operand);
+        break;
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ sw(value, mem_operand);
+        break;
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a2));
+  ASSERT(ToRegister(instr->key()).is(a1));
+  ASSERT(ToRegister(instr->value()).is(a0));
+
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register new_map_reg = ToRegister(instr->new_map_reg());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = from_map->elements_kind();
+  ElementsKind to_kind = to_map->elements_kind();
+
+  __ mov(ToRegister(instr->result()), object_reg);
+
+  Label not_applicable;
+  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
+
+  __ li(new_map_reg, Operand(to_map));
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+                        scratch, kRAHasBeenSaved, kDontSaveFPRegs);
+  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(a2));
+    ASSERT(new_map_reg.is(a3));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+             RelocInfo::CODE_TARGET, instr);
+  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(a2));
+    ASSERT(new_map_reg.is(a3));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+             RelocInfo::CODE_TARGET, instr);
+  } else {
+    UNREACHABLE();
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  __ push(ToRegister(instr->left()));
+  __ push(ToRegister(instr->right()));
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt: public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new DeferredStringCharCodeAt(this, instr);
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  if (instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
+    __ push(scratch);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+  if (FLAG_debug_code) {
+    __ AbortIfNotSmi(v0);
+  }
+  __ SmiUntag(v0);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode: public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new DeferredStringCharFromCode(this, instr);
+
+  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  ASSERT(!char_code.is(result));
+
+  __ Branch(deferred->entry(), hi,
+            char_code, Operand(String::kMaxAsciiCharCode));
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ sll(scratch, char_code, kPointerSizeLog2);
+  __ Addu(result, result, scratch);
+  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(deferred->entry(), eq, result, Operand(scratch));
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+  Register string = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  __ lw(result, FieldMemOperand(string, String::kLengthOffset));
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  ASSERT(output->IsDoubleRegister());
+  FPURegister single_scratch = double_scratch0().low();
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ lw(scratch, ToMemOperand(input));
+    __ mtc1(scratch, single_scratch);
+  } else {
+    __ mtc1(ToRegister(input), single_scratch);
+  }
+  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI: public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LNumberTagI* instr_;
+  };
+
+  Register src = ToRegister(instr->InputAt(0));
+  Register dst = ToRegister(instr->result());
+  Register overflow = scratch0();
+
+  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+  __ SmiTagCheckOverflow(dst, src, overflow);
+  __ BranchOnOverflow(deferred->entry(), overflow);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+  Label slow;
+  Register src = ToRegister(instr->InputAt(0));
+  Register dst = ToRegister(instr->result());
+  FPURegister dbl_scratch = double_scratch0();
+
+  // Preserve the value of all registers.
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+  // There was overflow, so bits 30 and 31 of the original integer
+  // disagree. Try to allocate a heap number in new space and store
+  // the value in there. If that fails, call the runtime system.
+  Label done;
+  if (dst.is(src)) {
+    __ SmiUntag(src, dst);
+    __ Xor(src, src, Operand(0x80000000));
+  }
+  __ mtc1(src, dbl_scratch);
+  __ cvt_d_w(dbl_scratch, dbl_scratch);
+  if (FLAG_inline_new) {
+    __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
+    __ Move(dst, t1);
+    __ Branch(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  // TODO(3095996): Put a valid pointer value in the stack slot where the result
+  // register is stored, as this register is in the pointer map, but contains an
+  // integer value.
+  __ StoreToSafepointRegisterSlot(zero_reg, dst);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+  __ Move(dst, v0);
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+  __ StoreToSafepointRegisterSlot(dst, dst);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD: public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->TempAt(0));
+  Register temp2 = ToRegister(instr->TempAt(1));
+
+  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+  } else {
+    __ Branch(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ mov(reg, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+  __ StoreToSafepointRegisterSlot(v0, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  if (instr->needs_check()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    // If the input is a HeapObject, value of scratch won't be zero.
+    __ And(scratch, input, Operand(kHeapObjectTag));
+    __ SmiUntag(result, input);
+    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+  } else {
+    __ SmiUntag(result, input);
+  }
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+                                DoubleRegister result_reg,
+                                bool deoptimize_on_undefined,
+                                bool deoptimize_on_minus_zero,
+                                LEnvironment* env) {
+  Register scratch = scratch0();
+
+  Label load_smi, heap_number, done;
+
+  // Smi check.
+  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+
+  // Heap number map check.
+  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  if (deoptimize_on_undefined) {
+    DeoptimizeIf(ne, env, scratch, Operand(at));
+  } else {
+    Label heap_number;
+    __ Branch(&heap_number, eq, scratch, Operand(at));
+
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    DeoptimizeIf(ne, env, input_reg, Operand(at));
+
+    // Convert undefined to NaN.
+    __ LoadRoot(at, Heap::kNanValueRootIndex);
+    __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+    __ Branch(&done);
+
+    __ bind(&heap_number);
+  }
+  // Heap number to double register conversion.
+  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+  if (deoptimize_on_minus_zero) {
+    __ mfc1(at, result_reg.low());
+    __ Branch(&done, ne, at, Operand(zero_reg));
+    __ mfc1(scratch, result_reg.high());
+    DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+  }
+  __ Branch(&done);
+
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  // scratch: untagged value of input_reg
+  __ mtc1(scratch, result_reg);
+  __ cvt_d_w(result_reg, result_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Register input_reg = ToRegister(instr->InputAt(0));
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+  DoubleRegister double_scratch = double_scratch0();
+  FPURegister single_scratch = double_scratch.low();
+
+  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
+
+  // The input is a tagged HeapObject.
+  // Heap number map check.
+  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  // This 'at' value and scratch1 map value are used for tests in both clauses
+  // of the if.
+
+  if (instr->truncating()) {
+    Register scratch3 = ToRegister(instr->TempAt(1));
+    DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
+    ASSERT(!scratch3.is(input_reg) &&
+           !scratch3.is(scratch1) &&
+           !scratch3.is(scratch2));
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    Label heap_number;
+    __ Branch(&heap_number, eq, scratch1, Operand(at));  // HeapNumber map?
+    // Check for undefined. Undefined is converted to zero for truncating
+    // conversions.
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
+    ASSERT(ToRegister(instr->result()).is(input_reg));
+    __ mov(input_reg, zero_reg);
+    __ Branch(&done);
+
+    __ bind(&heap_number);
+    __ ldc1(double_scratch2,
+            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    __ EmitECMATruncate(input_reg,
+                        double_scratch2,
+                        single_scratch,
+                        scratch1,
+                        scratch2,
+                        scratch3);
+  } else {
+    // Deoptimize if we don't have a heap number.
+    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+
+    // Load the double value.
+    __ ldc1(double_scratch,
+            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       single_scratch,
+                       double_scratch,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed.
+    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+    // Load the result.
+    __ mfc1(input_reg, single_scratch);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ Branch(&done, ne, input_reg, Operand(zero_reg));
+
+      __ mfc1(scratch1, double_scratch.high());
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  ASSERT(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+
+  // Let the deferred code handle the HeapObject case.
+  __ JumpIfNotSmi(input_reg, deferred->entry());
+
+  // Smi to int32 conversion.
+  __ SmiUntag(input_reg);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  EmitNumberUntagD(input_reg, result_reg,
+                   instr->hydrogen()->deoptimize_on_undefined(),
+                   instr->hydrogen()->deoptimize_on_minus_zero(),
+                   instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+  DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister double_scratch = double_scratch0();
+  FPURegister single_scratch = double_scratch0().low();
+
+  if (instr->truncating()) {
+    Register scratch3 = ToRegister(instr->TempAt(1));
+    __ EmitECMATruncate(result_reg,
+                        double_input,
+                        single_scratch,
+                        scratch1,
+                        scratch2,
+                        scratch3);
+  } else {
+    Register except_flag = scratch2;
+
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       single_scratch,
+                       double_input,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed (except_flag != 0).
+    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+    // Load the result.
+    __ mfc1(result_reg, single_scratch);
+  }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->InputAt(0);
+  __ And(at, ToRegister(input), Operand(kSmiTagMask));
+  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  LOperand* input = instr->InputAt(0);
+  __ And(at, ToRegister(input), Operand(kSmiTagMask));
+  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+
+  __ GetObjectType(input, scratch, scratch);
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+    } else {
+      DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (IsPowerOf2(mask)) {
+      ASSERT(tag == 0 || IsPowerOf2(tag));
+      __ And(at, scratch, mask);
+      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
+          at, Operand(zero_reg));
+    } else {
+      __ And(scratch, scratch, Operand(mask));
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+  Register reg = ToRegister(instr->value());
+  Handle<JSFunction> target = instr->hydrogen()->target();
+  if (isolate()->heap()->InNewSpace(*target)) {
+    Register reg = ToRegister(instr->value());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(target);
+    __ li(at, Operand(Handle<Object>(cell)));
+    __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+    DeoptimizeIf(ne, instr->environment(), reg,
+                 Operand(at));
+  } else {
+    DeoptimizeIf(ne, instr->environment(), reg,
+                 Operand(target));
+  }
+}
+
+
+void LCodeGen::DoCheckMapCommon(Register reg,
+                                Register scratch,
+                                Handle<Map> map,
+                                CompareMapMode mode,
+                                LEnvironment* env) {
+  Label success;
+  __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
+  DeoptimizeIf(al, env);
+  __ bind(&success);
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+  Register scratch = scratch0();
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  Register reg = ToRegister(input);
+  Handle<Map> map = instr->hydrogen()->map();
+  DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
+                   instr->environment());
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
+
+  // Check for heap number
+  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  DeoptimizeIf(ne, instr->environment(), input_reg,
+               Operand(factory()->undefined_value()));
+  __ mov(result_reg, zero_reg);
+  __ jmp(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
+                                             HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+  __ jmp(&done);
+
+  __ bind(&is_smi);
+  __ ClampUint8(result_reg, scratch);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  Register temp1 = ToRegister(instr->TempAt(0));
+  Register temp2 = ToRegister(instr->TempAt(1));
+
+  Handle<JSObject> holder = instr->holder();
+  Handle<JSObject> current_prototype = instr->prototype();
+
+  // Load prototype object.
+  __ LoadHeapObject(temp1, current_prototype);
+
+  // Check prototype maps up to the holder.
+  while (!current_prototype.is_identical_to(holder)) {
+    DoCheckMapCommon(temp1, temp2,
+                     Handle<Map>(current_prototype->map()),
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+    current_prototype =
+        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+    // Load next prototype object.
+    __ LoadHeapObject(temp1, current_prototype);
+  }
+
+  // Check the holder map.
+  DoCheckMapCommon(temp1, temp2,
+                   Handle<Map>(current_prototype->map()),
+                   ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+}
+
+
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+  class DeferredAllocateObject: public LDeferredCode {
+   public:
+    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LAllocateObject* instr_;
+  };
+
+  DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->TempAt(0));
+  Register scratch2 = ToRegister(instr->TempAt(1));
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+  Handle<Map> initial_map(constructor->initial_map());
+  int instance_size = initial_map->instance_size();
+  ASSERT(initial_map->pre_allocated_property_fields() +
+         initial_map->unused_property_fields() -
+         initial_map->inobject_properties() == 0);
+
+  // Allocate memory for the object.  The initial map might change when
+  // the constructor's prototype changes, but instance size and property
+  // counts remain unchanged (if slack tracking finished).
+  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
+  __ AllocateInNewSpace(instance_size,
+                        result,
+                        scratch,
+                        scratch2,
+                        deferred->entry(),
+                        TAG_OBJECT);
+
+  // Load the initial map.
+  Register map = scratch;
+  __ LoadHeapObject(map, constructor);
+  __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Initialize map and fields of the newly allocated object.
+  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+  __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
+  __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
+  if (initial_map->inobject_properties() != 0) {
+    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+    for (int i = 0; i < initial_map->inobject_properties(); i++) {
+      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+      __ sw(scratch, FieldMemOperand(result, property_offset));
+    }
+  }
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+  Register result = ToRegister(instr->result());
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ LoadHeapObject(a0, constructor);
+  __ push(a0);
+  CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  Heap* heap = isolate()->heap();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate_elements_kind();
+
+  // Deopt if the array literal boilerplate ElementsKind is of a type different
+  // than the expected one. The check isn't necessary if the boilerplate has
+  // already been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
+    // Load map into a2.
+    __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+    // Load the map's "bit field 2".
+    __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
+    DeoptimizeIf(ne,
+                 instr->environment(),
+                 a2,
+                 Operand(boilerplate_elements_kind));
+  }
+  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+  // Boilerplate already exists, constant elements are never accessed.
+  // Pass an empty fixed array.
+  __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
+  __ Push(a3, a2, a1);
+
+  // Pick the right runtime function or stub to call.
+  int length = instr->hydrogen()->length();
+  if (instr->hydrogen()->IsCopyOnWrite()) {
+    ASSERT(instr->hydrogen()->depth() == 1);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+  } else {
+    FastCloneShallowArrayStub::Mode mode =
+        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+                            Register result,
+                            Register source,
+                            int* offset) {
+  ASSERT(!source.is(a2));
+  ASSERT(!result.is(a2));
+
+  // Only elements backing stores for non-COW arrays need to be copied.
+  Handle<FixedArrayBase> elements(object->elements());
+  bool has_elements = elements->length() > 0 &&
+      elements->map() != isolate()->heap()->fixed_cow_array_map();
+
+  // Increase the offset so that subsequent objects end up right after
+  // this object and its backing store.
+  int object_offset = *offset;
+  int object_size = object->map()->instance_size();
+  int elements_offset = *offset + object_size;
+  int elements_size = has_elements ? elements->Size() : 0;
+  *offset += object_size + elements_size;
+
+  // Copy object header.
+  ASSERT(object->properties()->length() == 0);
+  int inobject_properties = object->map()->inobject_properties();
+  int header_size = object_size - inobject_properties * kPointerSize;
+  for (int i = 0; i < header_size; i += kPointerSize) {
+    if (has_elements && i == JSObject::kElementsOffset) {
+      __ Addu(a2, result, Operand(elements_offset));
+    } else {
+      __ lw(a2, FieldMemOperand(source, i));
+    }
+    __ sw(a2, FieldMemOperand(result, object_offset + i));
+  }
+
+  // Copy in-object properties.
+  for (int i = 0; i < inobject_properties; i++) {
+    int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
+    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      __ Addu(a2, result, Operand(*offset));
+      __ sw(a2, FieldMemOperand(result, total_offset));
+      __ LoadHeapObject(source, value_object);
+      EmitDeepCopy(value_object, result, source, offset);
+    } else if (value->IsHeapObject()) {
+      __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
+      __ sw(a2, FieldMemOperand(result, total_offset));
+    } else {
+      __ li(a2, Operand(value));
+      __ sw(a2, FieldMemOperand(result, total_offset));
+    }
+  }
+
+
+  if (has_elements) {
+    // Copy elements backing store header.
+    __ LoadHeapObject(source, elements);
+    for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
+      __ lw(a2, FieldMemOperand(source, i));
+      __ sw(a2, FieldMemOperand(result, elements_offset + i));
+    }
+
+    // Copy elements backing store content.
+    int elements_length = has_elements ? elements->length() : 0;
+    if (elements->IsFixedDoubleArray()) {
+      Handle<FixedDoubleArray> double_array =
+          Handle<FixedDoubleArray>::cast(elements);
+      for (int i = 0; i < elements_length; i++) {
+        int64_t value = double_array->get_representation(i);
+        // We only support little endian mode...
+        int32_t value_low = value & 0xFFFFFFFF;
+        int32_t value_high = value >> 32;
+        int total_offset =
+            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+        __ li(a2, Operand(value_low));
+        __ sw(a2, FieldMemOperand(result, total_offset));
+        __ li(a2, Operand(value_high));
+        __ sw(a2, FieldMemOperand(result, total_offset + 4));
+      }
+    } else if (elements->IsFixedArray()) {
+      for (int i = 0; i < elements_length; i++) {
+        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+        Handle<Object> value = JSObject::GetElement(object, i);
+        if (value->IsJSObject()) {
+          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+          __ Addu(a2, result, Operand(*offset));
+          __ sw(a2, FieldMemOperand(result, total_offset));
+          __ LoadHeapObject(source, value_object);
+          EmitDeepCopy(value_object, result, source, offset);
+        } else if (value->IsHeapObject()) {
+          __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
+          __ sw(a2, FieldMemOperand(result, total_offset));
+        } else {
+          __ li(a2, Operand(value));
+          __ sw(a2, FieldMemOperand(result, total_offset));
+        }
+      }
+    } else {
+      UNREACHABLE();
+    }
+  }
+}
+
+
+void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
+  int size = instr->hydrogen()->total_size();
+
+  // Allocate all objects that are part of the literal in one big
+  // allocation. This avoids multiple limit checks.
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ push(a0);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+  __ bind(&allocated);
+  int offset = 0;
+  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
+  EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
+  ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  Handle<FixedArray> literals(instr->environment()->closure()->literals());
+  Handle<FixedArray> constant_properties =
+      instr->hydrogen()->constant_properties();
+
+  // Set up the parameters to the stub/runtime call.
+  __ LoadHeapObject(t0, literals);
+  __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ li(a2, Operand(constant_properties));
+  int flags = instr->hydrogen()->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  __ li(a1, Operand(Smi::FromInt(flags)));
+  __ Push(t0, a3, a2, a1);
+
+  // Pick the right runtime function or stub to call.
+  int properties_count = constant_properties->length() / 2;
+  if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+  __ push(a0);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  Label materialized;
+  // Registers will be used as follows:
+  // a3 = JS function.
+  // t3 = literals array.
+  // a1 = regexp literal.
+  // a0 = regexp literal clone.
+  // a2 and t0-t2 are used as temporaries.
+  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  int literal_offset = FixedArray::kHeaderSize +
+      instr->hydrogen()->literal_index() * kPointerSize;
+  __ lw(a1, FieldMemOperand(t3, literal_offset));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&materialized, ne, a1, Operand(at));
+
+  // Create regexp literal using runtime function
+  // Result will be in v0.
+  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ li(t1, Operand(instr->hydrogen()->pattern()));
+  __ li(t0, Operand(instr->hydrogen()->flags()));
+  __ Push(t3, t2, t1, t0);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+  __ mov(a1, v0);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+
+  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ Push(a1, a0);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+  __ pop(a1);
+
+  __ bind(&allocated);
+  // Copy the content into the newly allocated memory.
+  // (Unroll copy loop once for better throughput).
+  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+    __ lw(a3, FieldMemOperand(a1, i));
+    __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
+    __ sw(a3, FieldMemOperand(v0, i));
+    __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
+  }
+  if ((size % (2 * kPointerSize)) != 0) {
+    __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
+    __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
+  }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+  bool pretenure = instr->hydrogen()->pretenure();
+  if (!pretenure && shared_info->num_literals() == 0) {
+    FastNewClosureStub stub(shared_info->language_mode());
+    __ li(a1, Operand(shared_info));
+    __ push(a1);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ li(a2, Operand(shared_info));
+    __ li(a1, Operand(pretenure
+                       ? factory()->true_value()
+                       : factory()->false_value()));
+    __ Push(cp, a2, a1);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
+  }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  Register input = ToRegister(instr->InputAt(0));
+  __ push(input);
+  CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Register cmp1 = no_reg;
+  Operand cmp2 = Operand(no_reg);
+
+  Condition final_branch_condition = EmitTypeofIs(true_label,
+                                                  false_label,
+                                                  input,
+                                                  instr->type_literal(),
+                                                  cmp1,
+                                                  cmp2);
+
+  ASSERT(cmp1.is_valid());
+  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
+
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name,
+                                 Register& cmp1,
+                                 Operand& cmp2) {
+  // This function utilizes the delay slot heavily. This is used to load
+  // values that are always usable without depending on the type of the input
+  // register.
+  Condition final_branch_condition = kNoCondition;
+  Register scratch = scratch0();
+  if (type_name->Equals(heap()->number_symbol())) {
+    __ JumpIfSmi(input, true_label);
+    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    cmp1 = input;
+    cmp2 = Operand(at);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->string_symbol())) {
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, input, scratch);
+    __ Branch(USE_DELAY_SLOT, false_label,
+              ge, scratch, Operand(FIRST_NONSTRING_TYPE));
+    // input is an object so we can load the BitFieldOffset even if we take the
+    // other branch.
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->boolean_symbol())) {
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    cmp1 = at;
+    cmp2 = Operand(input);
+    final_branch_condition = eq;
+
+  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    cmp1 = at;
+    cmp2 = Operand(input);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->undefined_symbol())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    // The first instruction of JumpIfSmi is an And - it is safe in the delay
+    // slot.
+    __ JumpIfSmi(input, false_label);
+    // Check for undetectable objects => true.
+    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);
+    final_branch_condition = ne;
+
+  } else if (type_name->Equals(heap()->function_symbol())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, scratch, input);
+    __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
+    cmp1 = input;
+    cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->object_symbol())) {
+    __ JumpIfSmi(input, false_label);
+    if (!FLAG_harmony_typeof) {
+      __ LoadRoot(at, Heap::kNullValueRootIndex);
+      __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    }
+    // input is an object, it is safe to use GetObjectType in the delay slot.
+    __ GetObjectType(input, input, scratch);
+    __ Branch(USE_DELAY_SLOT, false_label,
+              lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    // Still an object, so the InstanceType can be loaded.
+    __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
+    __ Branch(USE_DELAY_SLOT, false_label,
+              gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    // Still an object, so the BitField can be loaded.
+    // Check for undetectable objects => false.
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);
+    final_branch_condition = eq;
+
+  } else {
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
+    __ Branch(false_label);
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+  Register temp1 = ToRegister(instr->TempAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  EmitIsConstructCall(temp1, scratch0());
+
+  EmitBranch(true_block, false_block, eq, temp1,
+             Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
+  ASSERT(!temp1.is(temp2));
+  // Get the frame pointer for the calling frame.
+  __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+  __ Branch(&check_frame_marker, ne, temp2,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt() {
+  // Ensure that we have enough space after the previous lazy-bailout
+  // instruction for patching the code here.
+  int current_pc = masm()->pc_offset();
+  int patch_size = Deoptimizer::patch_size();
+  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+    while (padding_size > 0) {
+      __ nop();
+      padding_size -= Assembler::kInstrSize;
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  EnsureSpaceForLazyDeopt();
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+  Register object = ToRegister(instr->object());
+  Register key = ToRegister(instr->key());
+  Register strict = scratch0();
+  __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
+  __ Push(object, key, strict);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+  Register obj = ToRegister(instr->object());
+  Register key = ToRegister(instr->key());
+  __ Push(key, obj);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStackCheck* instr_;
+  };
+
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(&done, hs, sp, Operand(at));
+    StackCheckStub stub;
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    EnsureSpaceForLazyDeopt();
+    __ bind(&done);
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+  } else {
+    ASSERT(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new DeferredStackCheck(this, instr);
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
+    EnsureSpaceForLazyDeopt();
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+                                   instr->SpilledDoubleRegisterArray());
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  ASSERT(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  ASSERT(osr_pc_offset_ == -1);
+  osr_pc_offset_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  Register result = ToRegister(instr->result());
+  Register object = ToRegister(instr->object());
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  DeoptimizeIf(eq, instr->environment(), object, Operand(at));
+
+  Register null_value = t1;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
+
+  __ And(at, object, kSmiTagMask);
+  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ GetObjectType(object, a1, a1);
+  DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
+
+  Label use_cache, call_runtime;
+  ASSERT(object.is(a0));
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ Branch(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(object);
+  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  ASSERT(result.is(v0));
+  __ LoadRoot(at, Heap::kMetaMapRootIndex);
+  DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  __ LoadInstanceDescriptors(map, result);
+  __ lw(result,
+        FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
+  __ lw(result,
+        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  Register map = ToRegister(instr->map());
+  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  Label out_of_object, done;
+  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
+  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
+
+  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+  __ Addu(scratch, object, scratch);
+  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+  __ Branch(&done);
+
+  __ bind(&out_of_object);
+  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  // Index is equal to negated out of object property index plus 1.
+  __ Subu(scratch, result, scratch);
+  __ lw(result, FieldMemOperand(scratch,
+                                FixedArray::kHeaderSize - kPointerSize));
+  __ bind(&done);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 2aec684..b508256 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,35 +29,421 @@
 #define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
 
 #include "mips/lithium-mips.h"
-
+#include "mips/lithium-gap-resolver-mips.h"
 #include "deoptimizer.h"
 #include "safepoint-table.h"
 #include "scopes.h"
 
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
 class LDeferredCode;
+class SafepointGenerator;
 
 class LCodeGen BASE_EMBEDDED {
  public:
-  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : chunk_(chunk),
+        masm_(assembler),
+        info_(info),
+        current_block_(-1),
+        current_instruction_(-1),
+        instructions_(chunk->instructions()),
+        deoptimizations_(4),
+        deopt_jump_table_(4),
+        deoptimization_literals_(8),
+        inlined_function_count_(0),
+        scope_(info->scope()),
+        status_(UNUSED),
+        deferred_(8),
+        osr_pc_offset_(-1),
+        last_lazy_deopt_pc_(0),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+
+  // Simple accessors.
+  MacroAssembler* masm() const { return masm_; }
+  CompilationInfo* info() const { return info_; }
+  Isolate* isolate() const { return info_->isolate(); }
+  Factory* factory() const { return isolate()->factory(); }
+  Heap* heap() const { return isolate()->heap(); }
+
+  // Support for converting LOperands to assembler types.
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  // LOperand is loaded into dbl_scratch, unless already a double register.
+  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+                                        FloatRegister flt_scratch,
+                                        DoubleRegister dbl_scratch);
+  int ToInteger32(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+  MemOperand ToHighMemOperand(LOperand* op) const;
+
+  bool IsInteger32(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
 
   // Try to generate code for the entire chunk, but it may fail if the
   // chunk contains constructs we cannot handle. Returns true if the
   // code generation attempt succeeded.
-  bool GenerateCode() {
-    UNIMPLEMENTED();
-    return false;
-  }
+  bool GenerateCode();
 
   // Finish the code by setting stack height, safepoint, and bailout
   // information on it.
-  void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+  void FinishCode(Handle<Code> code);
+
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+  void DoDeferredNumberTagI(LNumberTagI* instr);
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredRandom(LRandom* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocateObject(LAllocateObject* instr);
+  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                       Label* map_check);
+
+  void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
+                        CompareMapMode mode, LEnvironment* env);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    GENERATING,
+    DONE,
+    ABORTED
+  };
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_generating() const { return status_ == GENERATING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  StrictModeFlag strict_mode_flag() const {
+    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
+  }
+
+  LChunk* chunk() const { return chunk_; }
+  Scope* scope() const { return scope_; }
+  HGraph* graph() const { return chunk_->graph(); }
+
+  Register scratch0() { return kLithiumScratchReg; }
+  Register scratch1() { return kLithiumScratchReg2; }
+  DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
+
+  int GetNextEmittedBlock(int block);
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
+
+  void Abort(const char* format, ...);
+  void Comment(const char* format, ...);
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  bool GeneratePrologue();
+  bool GenerateBody();
+  bool GenerateDeferredCode();
+  bool GenerateDeoptJumpTable();
+  bool GenerateSafepointTable();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in a1.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int arity,
+                         LInstruction* instr,
+                         CallKind call_kind);
+
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition cc,
+                    LEnvironment* environment,
+                    Register src1 = zero_reg,
+                    const Operand& src2 = Operand(zero_reg));
+
+  void AddToTranslation(Translation* translation,
+                        LOperand* op,
+                        bool is_tagged);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  // Specific math operations - used from DoUnaryMathOperation.
+  void EmitIntegerMathAbs(LUnaryMathOperation* instr);
+  void DoMathAbs(LUnaryMathOperation* instr);
+  void DoMathFloor(LUnaryMathOperation* instr);
+  void DoMathRound(LUnaryMathOperation* instr);
+  void DoMathSqrt(LUnaryMathOperation* instr);
+  void DoMathPowHalf(LUnaryMathOperation* instr);
+  void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathTan(LUnaryMathOperation* instr);
+  void DoMathCos(LUnaryMathOperation* instr);
+  void DoMathSin(LUnaryMathOperation* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+                                              int arguments,
+                                              Safepoint::DeoptMode mode);
+  void RecordPosition(int position);
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+  void EmitBranch(int left_block,
+                  int right_block,
+                  Condition cc,
+                  Register src1,
+                  const Operand& src2);
+  void EmitBranchF(int left_block,
+                   int right_block,
+                   Condition cc,
+                   FPURegister src1,
+                   FPURegister src2);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(Register input,
+                        DoubleRegister result,
+                        bool deoptimize_on_undefined,
+                        bool deoptimize_on_minus_zero,
+                        LEnvironment* env);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  // Returns two registers in cmp1 and cmp2 that can be used in the
+  // Branch instruction after EmitTypeofIs.
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name,
+                         Register& cmp1,
+                         Operand& cmp2);
+
+  // Emits optimized code for %_IsObject(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsObject(Register input,
+                         Register temp1,
+                         Register temp2,
+                         Label* is_not_object,
+                         Label* is_object);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
+  // Emits optimized code for %_IsConstructCall().
+  // Caller should branch on equal condition.
+  void EmitIsConstructCall(Register temp1, Register temp2);
+
+  void EmitLoadFieldOrConstantFunction(Register result,
+                                       Register object,
+                                       Handle<Map> type,
+                                       Handle<String> name);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset);
+
+  struct JumpTableEntry {
+    explicit inline JumpTableEntry(Address entry)
+        : label(),
+          address(entry) { }
+    Label label;
+    Address address;
+  };
+
+  void EnsureSpaceForLazyDeopt();
+
+  LChunk* const chunk_;
+  MacroAssembler* const masm_;
+  CompilationInfo* const info_;
+
+  int current_block_;
+  int current_instruction_;
+  const ZoneList<LInstruction*>* instructions_;
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<JumpTableEntry> deopt_jump_table_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  Status status_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+  int last_lazy_deopt_pc_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    PushSafepointRegistersScope(LCodeGen* codegen,
+                                Safepoint::Kind kind)
+        : codegen_(codegen) {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = kind;
+
+      switch (codegen_->expected_safepoint_kind_) {
+        case Safepoint::kWithRegisters:
+          codegen_->masm_->PushSafepointRegisters();
+          break;
+        case Safepoint::kWithRegistersAndDoubles:
+          codegen_->masm_->PushSafepointRegistersAndDoubles();
+          break;
+        default:
+          UNREACHABLE();
+      }
+    }
+
+    ~PushSafepointRegistersScope() {
+      Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+      ASSERT((kind & Safepoint::kWithRegisters) != 0);
+      switch (kind) {
+        case Safepoint::kWithRegisters:
+          codegen_->masm_->PopSafepointRegisters();
+          break;
+        case Safepoint::kWithRegistersAndDoubles:
+          codegen_->masm_->PopSafepointRegistersAndDoubles();
+          break;
+        default:
+          UNREACHABLE();
+      }
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() { }
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc
new file mode 100644
index 0000000..41b060d
--- /dev/null
+++ b/src/mips/lithium-gap-resolver-mips.cc
@@ -0,0 +1,318 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "mips/lithium-gap-resolver-mips.h"
+#include "mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = kLithiumScratchReg;
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32),
+      root_index_(0),
+      in_cycle_(false),
+      saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  ASSERT(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when by reaching this move again.
+      PerformMove(i);
+      if (in_cycle_) {
+        RestoreValue();
+      }
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      ASSERT(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move);
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+
+  // We can only find a cycle, when doing a depth-first traversal of moves,
+  // be encountering the starting move again. So by spilling the source of
+  // the starting move, we break the cycle.  All moves are then unblocked,
+  // and the starting move is completed by writing the spilled value to
+  // its destination.  All other moves from the spilled source have been
+  // completed prior to breaking the cycle.
+  // An additional complication is that moves to MemOperands with large
+  // offsets (more than 1K or 4K) require us to spill this spilled value to
+  // the stack, to free up the register.
+  ASSERT(!moves_[index].IsPending());
+  ASSERT(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local.  Multiple moves can
+  // be pending because this function is recursive.
+  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    ASSERT(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+  // We save in a register the value that should end up in the source of
+  // moves_[root_index].  After performing all moves in the tree rooted
+  // in that move, we save the value to that source.
+  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  ASSERT(!in_cycle_);
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+  if (source->IsRegister()) {
+    __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    __ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+  // This move will be done by restoring the saved value to the destination.
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+  ASSERT(in_cycle_);
+  ASSERT(saved_destination_ != NULL);
+
+  // Spilled value is in kSavedValueRegister or kLithiumScratchDouble.
+  if (saved_destination_->IsRegister()) {
+    __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+  } else if (saved_destination_->IsStackSlot()) {
+    __ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
+            kLithiumScratchDouble);
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ sdc1(kLithiumScratchDouble,
+            cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(cgen_->ToRegister(destination), source_register);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      __ sw(source_register, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ lw(cgen_->ToRegister(destination), source_operand);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        if (!destination_operand.OffsetIsInt16Encodable()) {
+          // 'at' is overwritten while saving the value to the destination.
+          // Therefore we can't use 'at'.  It is OK if the read from the source
+          // destroys 'at', since that happens before the value is read.
+          // This uses only a single reg of the double reg-pair.
+          __ lwc1(kLithiumScratchDouble, source_operand);
+          __ swc1(kLithiumScratchDouble, destination_operand);
+        } else {
+          __ lw(at, source_operand);
+          __ sw(at, destination_operand);
+        }
+      } else {
+        __ lw(kSavedValueRegister, source_operand);
+        __ sw(kSavedValueRegister, destination_operand);
+      }
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      if (cgen_->IsInteger32(constant_source)) {
+        __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
+      } else {
+        __ LoadObject(dst, cgen_->ToHandle(constant_source));
+      }
+    } else {
+      ASSERT(destination->IsStackSlot());
+      ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      if (cgen_->IsInteger32(constant_source)) {
+        __ li(kSavedValueRegister,
+              Operand(cgen_->ToInteger32(constant_source)));
+      } else {
+        __ LoadObject(kSavedValueRegister,
+                      cgen_->ToHandle(constant_source));
+      }
+      __ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      __ sdc1(source_register, destination_operand);
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        // kLithiumScratchDouble was used to break the cycle,
+        // but kSavedValueRegister is free.
+        MemOperand source_high_operand =
+            cgen_->ToHighMemOperand(source);
+        MemOperand destination_high_operand =
+            cgen_->ToHighMemOperand(destination);
+        __ lw(kSavedValueRegister, source_operand);
+        __ sw(kSavedValueRegister, destination_operand);
+        __ lw(kSavedValueRegister, source_high_operand);
+        __ sw(kSavedValueRegister, destination_high_operand);
+      } else {
+        __ ldc1(kLithiumScratchDouble, source_operand);
+        __ sdc1(kLithiumScratchDouble, destination_operand);
+      }
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/mips/lithium-gap-resolver-mips.h b/src/mips/lithium-gap-resolver-mips.h
new file mode 100644
index 0000000..2506e38
--- /dev/null
+++ b/src/mips/lithium-gap-resolver-mips.h
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
new file mode 100644
index 0000000..29cb3c6
--- /dev/null
+++ b/src/mips/lithium-mips.cc
@@ -0,0 +1,2333 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-allocator-inl.h"
+#include "mips/lithium-mips.h"
+#include "mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+    register_spills_[i] = NULL;
+  }
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+    double_register_spills_[i] = NULL;
+  }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+                                    LOperand* spill_operand) {
+  ASSERT(spill_operand->IsStackSlot());
+  ASSERT(register_spills_[allocation_index] == NULL);
+  register_spills_[allocation_index] = spill_operand;
+}
+
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  ASSERT(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+                                          LOperand* spill_operand) {
+  ASSERT(spill_operand->IsDoubleStackSlot());
+  ASSERT(double_register_spills_[allocation_index] == NULL);
+  double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < inputs_.length(); i++) {
+    if (i > 0) stream->Add(" ");
+    inputs_[i]->PrintTo(stream);
+  }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+  for (int i = 0; i < results_.length(); i++) {
+    if (i > 0) stream->Add(" ");
+    results_[i]->PrintTo(stream);
+  }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::SHL: return "sll-t";
+    case Token::SAR: return "sra-t";
+    case Token::SHR: return "srl-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  InputAt(0)->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  InputAt(1)->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_object(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(0)->PrintTo(stream);
+  InputAt(1)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              *hydrogen()->type_literal()->ToCString(),
+              true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
+  stream->Add("/%s ", hydrogen()->OpName());
+  InputAt(0)->PrintTo(stream);
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  InputAt(1)->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) {
+  stream->Add("[a2] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) {
+  SmartArrayPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) {
+  SmartArrayPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+LChunk::LChunk(CompilationInfo* info, HGraph* graph)
+    : spill_slot_count_(0),
+      info_(info),
+      graph_(graph),
+      instructions_(32),
+      pointer_maps_(8),
+      inlined_closures_(1) {
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+  // Skip a slot if for a double-width slot.
+  if (is_double) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
+  int index = GetNextSpillIndex(is_double);
+  if (is_double) {
+    return LDoubleStackSlot::Create(index);
+  } else {
+    return LStackSlot::Create(index);
+  }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  HPhase phase("L_Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap);
+    index = instructions_.length();
+    instructions_.Add(instr);
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr);
+    instructions_.Add(gap);
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - info()->scope()->num_parameters() - 1;
+  ASSERT(result < 0);
+  return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  ASSERT(-1 <= index);  // -1 is the receiver.
+  return (1 + info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunkBuilder::Build() {
+  ASSERT(is_unused());
+  chunk_ = new(zone()) LChunk(info(), graph());
+  HPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
+    PrintF("Aborting LChunk building in @\"%s\": ", *name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+                                  Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                                  DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateInstruction<1, I, T>* instr, int index) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(
+    LTemplateInstruction<1, I, T>* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+    LInstruction* instr, int ast_id) {
+  ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+  instruction_pending_deoptimization_environment_ = instr;
+  pending_deoptimization_ast_id_ = ast_id;
+  return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+  instruction_pending_deoptimization_environment_ = NULL;
+  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  if (hinstr->HasObservableSideEffects()) {
+    ASSERT(hinstr->next()->IsSimulate());
+    HSimulate* sim = HSimulate::cast(hinstr->next());
+    instr = SetInstructionPendingDeoptimizationEnvironment(
+        instr, sim->ast_id());
+  }
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+  instr->MarkAsSaveDoubles();
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  ASSERT(!instr->HasPointerMap());
+  instr->set_pointer_map(new(zone()) LPointerMap(position_));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  operand->set_virtual_register(allocator_->GetVirtualRegister());
+  if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  ASSERT(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  ASSERT(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+  return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsTagged()) {
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), a1);
+    LOperand* right = UseFixed(instr->right(), a0);
+    LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  }
+
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->left());
+
+  HValue* right_value = instr->right();
+  LOperand* right = NULL;
+  int constant_value = 0;
+  if (right_value->IsConstant()) {
+    HConstant* constant = HConstant::cast(right_value);
+    right = chunk_->DefineConstantOperand(constant);
+    constant_value = constant->Integer32Value() & 0x1f;
+  } else {
+    right = UseRegisterAtStart(right_value);
+  }
+
+  // Shift operations can only deoptimize if we do a logical shift
+  // by 0 and the result cannot be truncated to int32.
+  bool may_deopt = (op == Token::SHR && constant_value == 0);
+  bool does_deopt = false;
+  if (may_deopt) {
+    for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+      if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+        does_deopt = true;
+        break;
+      }
+    }
+  }
+
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+  return does_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->left()->representation().IsDouble());
+  ASSERT(instr->right()->representation().IsDouble());
+  ASSERT(op != Token::MOD);
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(op == Token::ADD ||
+         op == Token::DIV ||
+         op == Token::MOD ||
+         op == Token::MUL ||
+         op == Token::SUB);
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  ASSERT(left->representation().IsTagged());
+  ASSERT(right->representation().IsTagged());
+  LOperand* left_operand = UseFixed(left, a1);
+  LOperand* right_operand = UseFixed(right, a0);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  ASSERT(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    ASSERT(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    ASSERT(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      ASSERT(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    ASSERT(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      last_environment->SetValueAt(phi->merged_index(), phi);
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                   graph_->GetConstantUndefined());
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+  if (current->has_position()) position_ = current->position();
+  LInstruction* instr = current->CompileToLithium(this);
+
+  if (instr != NULL) {
+    if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+      instr = AssignPointerMap(instr);
+    }
+    if (FLAG_stress_environments && !instr->HasEnvironment()) {
+      instr = AssignEnvironment(instr);
+    }
+    instr->set_hydrogen_value(current);
+    chunk_->AddInstruction(instr, current_block_);
+  }
+  current_instruction_ = old_current;
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+  int ast_id = hydrogen_env->ast_id();
+  ASSERT(ast_id != AstNode::kNoNumber ||
+         hydrogen_env->frame_type() != JS_FUNCTION);
+  int value_count = hydrogen_env->length();
+  LEnvironment* result = new(zone()) LEnvironment(
+      hydrogen_env->closure(),
+      hydrogen_env->frame_type(),
+      ast_id,
+      hydrogen_env->parameter_count(),
+      argument_count_,
+      value_count,
+      outer);
+  int argument_index = *argument_index_accumulator;
+  for (int i = 0; i < value_count; ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
+    HValue* value = hydrogen_env->values()->at(i);
+    LOperand* op = NULL;
+    if (value->IsArgumentsObject()) {
+      op = NULL;
+    } else if (value->IsPushArgument()) {
+      op = new(zone()) LArgument(argument_index++);
+    } else {
+      op = UseAny(value);
+    }
+    result->AddValue(op, value->representation());
+  }
+
+  if (hydrogen_env->frame_type() == JS_FUNCTION) {
+    *argument_index_accumulator = argument_index;
+  }
+
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  if (value->EmitAtUses()) {
+    HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
+        ? instr->FirstSuccessor()
+        : instr->SecondSuccessor();
+    return new(zone()) LGoto(successor->block_id());
+  }
+
+  LBranch* result = new(zone()) LBranch(UseRegister(value));
+  // Tagged values that are not known smis or booleans require a
+  // deoptimization environment.
+  Representation rep = value->representation();
+  HType type = value->type();
+  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+    return AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  return DefineAsRegister(
+      new(zone()) LArgumentsLength(UseRegister(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LInstanceOf* result =
+      new(zone()) LInstanceOf(UseFixed(instr->left(), a0),
+                              UseFixed(instr->right(), a1));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+    HInstanceOfKnownGlobal* instr) {
+  LInstanceOfKnownGlobal* result =
+      new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), a0),
+                                         FixedTemp(t0));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegisterAtStart(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  LOperand* receiver = UseFixed(instr->receiver(), a0);
+  LOperand* length = UseFixed(instr->length(), a2);
+  LOperand* elements = UseFixed(instr->elements(), a3);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                        receiver,
+                                                        length,
+                                                        elements);
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+  ++argument_count_;
+  LOperand* argument = Use(instr->argument());
+  return new(zone()) LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LOuterContext(context));
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LGlobalObject(context));
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+  LOperand* global_object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+    HCallConstantFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+  return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  BuiltinFunctionId op = instr->op();
+  if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
+    LOperand* input = UseFixedDouble(instr->value(), f4);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
+    return MarkAsCall(DefineFixedDouble(result, f4), instr);
+  } else if (op == kMathPowHalf) {
+    // Input cannot be the same as the result.
+    // See lithium-codegen-mips.cc::DoMathPowHalf.
+    LOperand* input = UseFixedDouble(instr->value(), f8);
+    LOperand* temp = FixedTemp(f6);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
+    return DefineFixedDouble(result, f4);
+  } else {
+    LOperand* input = UseRegisterAtStart(instr->value());
+    LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
+    switch (op) {
+      case kMathAbs:
+        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+      case kMathFloor:
+        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+      case kMathSqrt:
+        return DefineAsRegister(result);
+      case kMathRound:
+        return AssignEnvironment(DefineAsRegister(result));
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+  ASSERT(instr->key()->representation().IsTagged());
+  argument_count_ -= instr->argument_count();
+  LOperand* key = UseFixed(instr->key(), a2);
+  return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new(zone()) LCallNamed, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  LOperand* constructor = UseFixed(instr->constructor(), a1);
+  argument_count_ -= instr->argument_count();
+  LCallNew* result = new(zone()) LCallNew(constructor);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), v0),
+                    instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineAsRegister(new(zone()) LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), a1);
+    LOperand* right = UseFixed(instr->right(), a0);
+    LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+  ASSERT(instr->value()->representation().IsInteger32());
+  ASSERT(instr->representation().IsInteger32());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LBitNotI(value));
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else if (instr->representation().IsInteger32()) {
+    // TODO(1042) The fixed register allocation
+    // is needed because we call TypeRecordingBinaryOpStub from
+    // the generated code, which requires registers a0
+    // and a1 to be used. We should remove that
+    // when we provide a native implementation.
+    LOperand* dividend = UseFixed(instr->left(), a0);
+    LOperand* divisor = UseFixed(instr->right(), a1);
+    return AssignEnvironment(AssignPointerMap(
+             DefineFixed(new(zone()) LDivI(dividend, divisor), v0)));
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LModI* mod;
+    if (instr->HasPowerOf2Divisor()) {
+      ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+      LOperand* value = UseRegisterAtStart(instr->left());
+      mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
+    } else {
+      LOperand* dividend = UseRegister(instr->left());
+      LOperand* divisor = UseRegister(instr->right());
+      mod = new(zone()) LModI(dividend,
+                              divisor,
+                              TempRegister(),
+                              FixedTemp(f20),
+                              FixedTemp(f22));
+    }
+
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+        instr->CheckFlag(HValue::kCanBeDivByZero)) {
+      return AssignEnvironment(DefineAsRegister(mod));
+    } else {
+      return DefineAsRegister(mod);
+    }
+  } else if (instr->representation().IsTagged()) {
+    return DoArithmeticT(Token::MOD, instr);
+  } else {
+    ASSERT(instr->representation().IsDouble());
+    // We call a C function for double modulo. It can't trigger a GC.
+    // We need to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    LOperand* left = UseFixedDouble(instr->left(), f2);
+    LOperand* right = UseFixedDouble(instr->right(), f4);
+    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
+    return MarkAsCall(DefineFixedDouble(result, f2), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left;
+    LOperand* right = UseOrConstant(instr->MostConstantOperand());
+    LOperand* temp = NULL;
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+        (instr->CheckFlag(HValue::kCanOverflow) ||
+        !right->IsConstantOperand())) {
+      left = UseRegister(instr->LeastConstantOperand());
+      temp = TempRegister();
+    } else {
+      left = UseRegisterAtStart(instr->LeastConstantOperand());
+    }
+    LMulI* mul = new(zone()) LMulI(left, right, temp);
+    if (instr->CheckFlag(HValue::kCanOverflow) ||
+        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineAsRegister(mul);
+
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new(zone()) LSubI(left, right);
+    LInstruction* result = DefineAsRegister(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LAddI* add = new(zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  ASSERT(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  ASSERT(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), f2);
+  LOperand* right = exponent_type.IsDouble() ?
+      UseFixedDouble(instr->right(), f4) :
+      UseFixed(instr->right(), a2);
+  LPower* result = new(zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, f0),
+                    instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->global_object()->representation().IsTagged());
+  LOperand* global_object = UseFixed(instr->global_object(), a0);
+  LRandom* result = new(zone()) LRandom(global_object);
+  return MarkAsCall(DefineFixedDouble(result, f0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  Representation r = instr->GetInputRepresentation();
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LCmpT* result = new(zone()) LCmpT(left, right);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+    HCompareIDAndBranch* instr) {
+  Representation r = instr->GetInputRepresentation();
+  if (r.IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new(zone()) LCmpIDAndBranch(left, right);
+  } else {
+    ASSERT(r.IsDouble());
+    ASSERT(instr->left()->representation().IsDouble());
+    ASSERT(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new(zone()) LCmpIDAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+    HCompareConstantEqAndBranch* instr) {
+  return new(zone()) LCmpConstantEqAndBranch(
+      UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()),
+                                        temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()),
+                                        temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new(zone()) LIsUndetectableAndBranch(
+      UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LStringCompareAndBranch* result =
+      new(zone()) LStringCompareAndBranch(left, right);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasInstanceTypeAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr)  {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new(zone()) LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+                                           TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
+  LOperand* array = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LJSArrayLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
+    HFixedArrayBaseLength* instr) {
+  LOperand* array = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+  LOperand* object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LElementsKind(object));
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+  LOperand* object = UseRegister(instr->value());
+  LValueOf* result = new(zone()) LValueOf(object, TempRegister());
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+  LOperand* object = UseFixed(instr->value(), a0);
+  LDateField* result = new LDateField(object, FixedTemp(a1), instr->index());
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  LOperand* value = UseRegisterAtStart(instr->index());
+  LOperand* length = UseRegister(instr->length());
+  return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+  LOperand* value = UseFixed(instr->value(), a0);
+  return MarkAsCall(new(zone()) LThrow(value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(instr->value());
+      LNumberUntagD* res = new(zone()) LNumberUntagD(value);
+      return AssignEnvironment(DefineAsRegister(res));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegisterAtStart(instr->value());
+      bool needs_check = !instr->value()->type().IsSmi();
+      LInstruction* res = NULL;
+      if (!needs_check) {
+        res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
+      } else {
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
+                                                      : NULL;
+        LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
+                                                      : NULL;
+        res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
+                                                       temp1,
+                                                       temp2,
+                                                       temp3));
+        res = AssignEnvironment(res);
+      }
+      return res;
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(instr->value());
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+
+      // Make sure that the temp and result_temp registers are
+      // different.
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+      Define(result, result_temp);
+      return AssignPointerMap(result);
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
+      LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
+      return AssignEnvironment(DefineAsRegister(res));
+    }
+  } else if (from.IsInteger32()) {
+    if (to.IsTagged()) {
+      HValue* val = instr->value();
+      LOperand* value = UseRegisterAtStart(val);
+      if (val->HasRange() && val->range()->IsInSmiRange()) {
+        return DefineAsRegister(new(zone()) LSmiTag(value));
+      } else {
+        LNumberTagI* result = new(zone()) LNumberTagI(value);
+        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+      }
+    } else {
+      ASSERT(to.IsDouble());
+      LOperand* value = Use(instr->value());
+      return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckFunction(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckMap(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    // Revisit this decision, here and 8 lines below.
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(f22)));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+  } else {
+    ASSERT(input_rep.IsTagged());
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve f22 explicitly.
+    LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(f22));
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  return new(zone()) LReturn(UseFixed(instr->value(), v0));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new(zone()) LConstantD);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new(zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+  return instr->RequiresHoleCheck()
+      ? AssignEnvironment(DefineAsRegister(result))
+      : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), a0);
+  LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+  LOperand* value = UseRegister(instr->value());
+  // Use a temp to check the value in the cell in the case where we perform
+  // a hole check.
+  return instr->RequiresHoleCheck()
+      ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
+      : new(zone()) LStoreGlobalCell(value, NULL);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), a1);
+  LOperand* value = UseFixed(instr->value(), a0);
+  LStoreGlobalGeneric* result =
+      new(zone()) LStoreGlobalGeneric(global_object, value);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value);
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  return DefineAsRegister(
+      new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+    HLoadNamedFieldPolymorphic* instr) {
+  ASSERT(instr->representation().IsTagged());
+  if (instr->need_generic()) {
+    LOperand* obj = UseFixed(instr->object(), a0);
+    LLoadNamedFieldPolymorphic* result =
+        new(zone()) LLoadNamedFieldPolymorphic(obj);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  } else {
+    LOperand* obj = UseRegisterAtStart(instr->object());
+    LLoadNamedFieldPolymorphic* result =
+        new(zone()) LLoadNamedFieldPolymorphic(obj);
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), a0);
+  LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
+    HLoadExternalArrayPointer* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+    HLoadKeyedFastElement* instr) {
+  ASSERT(instr->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+  if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+    HLoadKeyedFastDoubleElement* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* elements = UseTempRegister(instr->elements());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LLoadKeyedFastDoubleElement* result =
+      new(zone()) LLoadKeyedFastDoubleElement(elements, key);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+    HLoadKeyedSpecializedArrayElement* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+  Representation representation(instr->representation());
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* external_pointer = UseRegister(instr->external_pointer());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LLoadKeyedSpecializedArrayElement* result =
+      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+  LInstruction* load_instr = DefineAsRegister(result);
+  // An unsigned int array load might overflow and cause a deopt, make sure it
+  // has an environment.
+  return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+      AssignEnvironment(load_instr) : load_instr;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), a1);
+  LOperand* key = UseFixed(instr->key(), a0);
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+    HStoreKeyedFastElement* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  ASSERT(instr->value()->representation().IsTagged());
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegisterAtStart(instr->value());
+  LOperand* key = needs_write_barrier
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+  return new(zone()) LStoreKeyedFastElement(obj, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+    HStoreKeyedFastDoubleElement* instr) {
+  ASSERT(instr->value()->representation().IsDouble());
+  ASSERT(instr->elements()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LOperand* val = UseTempRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+  return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+    HStoreKeyedSpecializedArrayElement* instr) {
+  Representation representation(instr->value()->representation());
+  ElementsKind elements_kind = instr->elements_kind();
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+  ASSERT(instr->external_pointer()->representation().IsExternal());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* external_pointer = UseRegister(instr->external_pointer());
+  bool val_is_temp_register =
+      elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
+      elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+  LOperand* val = val_is_temp_register
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+
+  return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                        key,
+                                                        val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), a2);
+  LOperand* key = UseFixed(instr->key(), a1);
+  LOperand* val = UseFixed(instr->value(), a0);
+
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsTagged());
+  ASSERT(instr->value()->representation().IsTagged());
+
+  return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
+    return DefineSameAsFirst(result);
+  } else {
+    LOperand* object = UseFixed(instr->object(), a0);
+    LOperand* fixed_object_reg = FixedTemp(a2);
+    LOperand* new_map_reg = FixedTemp(a3);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object,
+                                            new_map_reg,
+                                            fixed_object_reg);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+  LOperand* obj = needs_write_barrier
+      ? UseTempRegister(instr->object())
+      : UseRegisterAtStart(instr->object());
+
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+
+  return new(zone()) LStoreNamedField(obj, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), a1);
+  LOperand* val = UseFixed(instr->value(), a0);
+
+  LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), v0),
+                    instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+  LOperand* string = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LStringLength(string));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+  LAllocateObject* result = new(zone()) LAllocateObject(
+      TempRegister(), TempRegister());
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
+  return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+  return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+  return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+  LOperand* object = UseFixed(instr->object(), a0);
+  LOperand* key = UseFixed(instr->key(), a1);
+  LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new(zone()) LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  int spill_index = chunk()->GetNextSpillIndex(false);  // Not double-width.
+  if (spill_index > LUnallocated::kMaxFixedIndex) {
+    Abort("Too many spill slots needed for OSR");
+    spill_index = 0;
+  }
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new(zone()) LCallStub, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  LOperand* arguments = UseRegister(instr->arguments());
+  LOperand* length = UseTempRegister(instr->length());
+  LOperand* index = UseRegister(instr->index());
+  LAccessArgumentsAt* result =
+      new(zone()) LAccessArgumentsAt(arguments, length, index);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), a0);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), a0));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+    HIsConstructCallAndBranch* instr) {
+  return new(zone()) LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  HEnvironment* env = current_block_->last_environment();
+  ASSERT(env != NULL);
+
+  env->set_ast_id(instr->ast_id());
+
+  env->Drop(instr->pop_count());
+  for (int i = 0; i < instr->values()->length(); ++i) {
+    HValue* value = instr->values()->at(i);
+    if (instr->HasAssignedIndexAt(i)) {
+      env->Bind(instr->GetAssignedIndexAt(i), value);
+    } else {
+      env->Push(value);
+    }
+  }
+
+  // If there is an instruction pending deoptimization environment create a
+  // lazy bailout instruction to capture the environment.
+  if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+    LInstruction* result = new(zone()) LLazyBailout;
+    result = AssignEnvironment(result);
+    instruction_pending_deoptimization_environment_->
+        set_deoptimization_environment(result->environment());
+    ClearInstructionPendingDeoptimizationEnvironment();
+    return result;
+  }
+
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    return MarkAsCall(new(zone()) LStackCheck, instr);
+  } else {
+    ASSERT(instr->is_backwards_branch());
+    return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->call_kind(),
+                                               instr->is_construct());
+  if (instr->arguments() != NULL) {
+    inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+  }
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* object = UseRegisterAtStart(instr->object());
+  LIn* result = new(zone()) LIn(key, object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* object = UseFixed(instr->enumerable(), a0);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseRegister(instr->index());
+  return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index ebc1e43..5a7bf4d 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,131 +32,311 @@
 #include "lithium-allocator.h"
 #include "lithium.h"
 #include "safepoint-table.h"
-
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
 class LCodeGen;
-class LEnvironment;
-class Translation;
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
+  V(ControlInstruction)                         \
+  V(Call)                                       \
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
+  V(AccessArgumentsAt)                          \
+  V(AddI)                                       \
+  V(AllocateObject)                             \
+  V(ApplyArguments)                             \
+  V(ArgumentsElements)                          \
+  V(ArgumentsLength)                            \
+  V(ArithmeticD)                                \
+  V(ArithmeticT)                                \
+  V(ArrayLiteral)                               \
+  V(BitI)                                       \
+  V(BitNotI)                                    \
+  V(BoundsCheck)                                \
+  V(Branch)                                     \
+  V(CallConstantFunction)                       \
+  V(CallFunction)                               \
+  V(CallGlobal)                                 \
+  V(CallKeyed)                                  \
+  V(CallKnownGlobal)                            \
+  V(CallNamed)                                  \
+  V(CallNew)                                    \
+  V(CallRuntime)                                \
+  V(CallStub)                                   \
+  V(CheckFunction)                              \
+  V(CheckInstanceType)                          \
+  V(CheckMap)                                   \
+  V(CheckNonSmi)                                \
+  V(CheckPrototypeMaps)                         \
+  V(CheckSmi)                                   \
+  V(ClampDToUint8)                              \
+  V(ClampIToUint8)                              \
+  V(ClampTToUint8)                              \
+  V(ClassOfTestAndBranch)                       \
+  V(CmpConstantEqAndBranch)                     \
+  V(CmpIDAndBranch)                             \
+  V(CmpObjectEqAndBranch)                       \
+  V(CmpMapAndBranch)                            \
+  V(CmpT)                                       \
+  V(ConstantD)                                  \
+  V(ConstantI)                                  \
+  V(ConstantT)                                  \
+  V(Context)                                    \
+  V(DeclareGlobals)                             \
+  V(DeleteProperty)                             \
+  V(Deoptimize)                                 \
+  V(DivI)                                       \
+  V(DoubleToI)                                  \
+  V(ElementsKind)                               \
+  V(FastLiteral)                                \
+  V(FixedArrayBaseLength)                       \
+  V(FunctionLiteral)                            \
+  V(GetCachedArrayIndex)                        \
+  V(GlobalObject)                               \
+  V(GlobalReceiver)                             \
+  V(Goto)                                       \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(HasInstanceTypeAndBranch)                   \
+  V(In)                                         \
+  V(InstanceOf)                                 \
+  V(InstanceOfKnownGlobal)                      \
+  V(InstructionGap)                             \
+  V(Integer32ToDouble)                          \
+  V(InvokeFunction)                             \
+  V(IsConstructCallAndBranch)                   \
+  V(IsNilAndBranch)                             \
+  V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
+  V(IsSmiAndBranch)                             \
+  V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
+  V(JSArrayLength)                              \
+  V(Label)                                      \
+  V(LazyBailout)                                \
+  V(LoadContextSlot)                            \
+  V(LoadElements)                               \
+  V(LoadExternalArrayPointer)                   \
+  V(LoadFunctionPrototype)                      \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
+  V(LoadKeyedFastDoubleElement)                 \
+  V(LoadKeyedFastElement)                       \
+  V(LoadKeyedGeneric)                           \
+  V(LoadKeyedSpecializedArrayElement)           \
+  V(LoadNamedField)                             \
+  V(LoadNamedFieldPolymorphic)                  \
+  V(LoadNamedGeneric)                           \
+  V(ModI)                                       \
+  V(MulI)                                       \
+  V(NumberTagD)                                 \
+  V(NumberTagI)                                 \
+  V(NumberUntagD)                               \
+  V(ObjectLiteral)                              \
+  V(OsrEntry)                                   \
+  V(OuterContext)                               \
+  V(Parameter)                                  \
+  V(Power)                                      \
+  V(PushArgument)                               \
+  V(Random)                                     \
+  V(RegExpLiteral)                              \
+  V(Return)                                     \
+  V(ShiftI)                                     \
+  V(SmiTag)                                     \
+  V(SmiUntag)                                   \
+  V(StackCheck)                                 \
+  V(StoreContextSlot)                           \
+  V(StoreGlobalCell)                            \
+  V(StoreGlobalGeneric)                         \
+  V(StoreKeyedFastDoubleElement)                \
+  V(StoreKeyedFastElement)                      \
+  V(StoreKeyedGeneric)                          \
+  V(StoreKeyedSpecializedArrayElement)          \
+  V(StoreNamedField)                            \
+  V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
+  V(StringCharCodeAt)                           \
+  V(StringCharFromCode)                         \
+  V(StringLength)                               \
+  V(SubI)                                       \
+  V(TaggedToI)                                  \
+  V(ThisFunction)                               \
+  V(Throw)                                      \
+  V(ToFastProperties)                           \
+  V(TransitionElementsKind)                     \
+  V(Typeof)                                     \
+  V(TypeofIsAndBranch)                          \
+  V(UnaryMathOperation)                         \
+  V(UnknownOSRValue)                            \
+  V(ValueOf)                                    \
+  V(ForInPrepareMap)                            \
+  V(ForInCacheArray)                            \
+  V(CheckMapValue)                              \
+  V(LoadFieldByIndex)                           \
+  V(DateField)                                  \
+  V(WrapReceiver)
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual Opcode opcode() const { return LInstruction::k##type; } \
+  virtual void CompileToNative(LCodeGen* generator);              \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                     \
+    ASSERT(instr->Is##type());                                    \
+    return reinterpret_cast<L##type*>(instr);                     \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
 
 class LInstruction: public ZoneObject {
  public:
-  LInstruction() { }
+  LInstruction()
+      :  environment_(NULL),
+         hydrogen_value_(NULL),
+         is_call_(false),
+         is_save_doubles_(false) { }
   virtual ~LInstruction() { }
 
-  // Predicates should be generated by macro as in lithium-ia32.h.
-  virtual bool IsLabel() const {
-    UNIMPLEMENTED();
-    return false;
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream) = 0;
+  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void set_deoptimization_environment(LEnvironment* env) {
+    deoptimization_environment_.set(env);
   }
-  virtual bool IsOsrEntry() const {
-    UNIMPLEMENTED();
-    return false;
+  LEnvironment* deoptimization_environment() const {
+    return deoptimization_environment_.get();
+  }
+  bool HasDeoptimizationEnvironment() const {
+    return deoptimization_environment_.is_set();
   }
 
-  LPointerMap* pointer_map() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  bool HasPointerMap() const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
-
-  LEnvironment* environment() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  bool HasEnvironment() const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
-
-  virtual bool IsControl() const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  void MarkAsCall() { UNIMPLEMENTED(); }
-  void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
+  void MarkAsCall() { is_call_ = true; }
+  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
 
   // Interface to the register allocator and iterators.
-  bool IsMarkedAsCall() const {
-    UNIMPLEMENTED();
-    return false;
-  }
+  bool IsMarkedAsCall() const { return is_call_; }
+  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
 
-  bool IsMarkedAsSaveDoubles() const {
-    UNIMPLEMENTED();
-    return false;
-  }
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() = 0;
 
-  virtual bool HasResult() const {
-    UNIMPLEMENTED();
-    return false;
-  }
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
 
-  virtual LOperand* result() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  virtual int InputCount() {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  virtual LOperand* InputAt(int i) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  virtual int TempCount() {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  virtual LOperand* TempAt(int i) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  LOperand* FirstInput() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  LOperand* Output() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
 
 #ifdef DEBUG
-  void VerifyCall() { UNIMPLEMENTED(); }
+  void VerifyCall();
 #endif
+
+ private:
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  SetOncePointer<LEnvironment> deoptimization_environment_;
+  bool is_call_;
+  bool is_save_doubles_;
 };
 
 
-class LGap: public LInstruction {
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction: public LInstruction {
  public:
-  explicit LGap(HBasicBlock* block) { }
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  virtual bool HasResult() const { return R != 0; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() { return results_[0]; }
 
-  HBasicBlock* block() const {
-    UNIMPLEMENTED();
-    return NULL;
+  int InputCount() { return I; }
+  LOperand* InputAt(int i) { return inputs_[i]; }
+
+  int TempCount() { return T; }
+  LOperand* TempAt(int i) { return temps_[i]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+};
+
+
+class LGap: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
   }
 
+  // Can't use the DECLARE-macro here because of sub-classes.
+  virtual bool IsGap() const { return true; }
+  virtual void PrintDataTo(StringStream* stream);
+  static LGap* cast(LInstruction* instr) {
+    ASSERT(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
   enum InnerPosition {
     BEFORE,
     START,
@@ -166,141 +346,2047 @@
     LAST_INNER_POSITION = AFTER
   };
 
-  LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
-    UNIMPLEMENTED();
-    return NULL;
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos)  {
+    if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+    return parallel_moves_[pos];
   }
 
   LParallelMove* GetParallelMove(InnerPosition pos)  {
-    UNIMPLEMENTED();
-    return NULL;
+    return parallel_moves_[pos];
   }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap: public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(int block_id) : block_id_(block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int block_id() const { return block_id_; }
+
+ private:
+  int block_id_;
+};
+
+
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
 };
 
 
 class LLabel: public LGap {
  public:
-  explicit LLabel(HBasicBlock* block) : LGap(block) { }
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
 };
 
 
-class LOsrEntry: public LInstruction {
+class LParameter: public LTemplateInstruction<1, 0, 0> {
  public:
-  // Function could be generated by a macro as in lithium-ia32.h.
-  static LOsrEntry* cast(LInstruction* instr) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
 
-  LOperand** SpilledRegisterArray() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-  LOperand** SpilledDoubleRegisterArray() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
 
-  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
-    UNIMPLEMENTED();
-  }
-  void MarkSpilledDoubleRegister(int allocation_index,
-                                 LOperand* spill_operand) {
-    UNIMPLEMENTED();
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+  TranscendentalCache::Type transcendental_type() {
+    return hydrogen()->transcendental_type();
   }
 };
 
 
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
+ public:
+  virtual bool IsControl() const { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+};
+
+
+class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+ public:
+  LArgumentsElements() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LTemplateInstruction<1, 2, 3> {
+ public:
+  // Used when the right hand is a constant power of 2.
+  LModI(LOperand* left,
+        LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = NULL;
+    temps_[1] = NULL;
+    temps_[2] = NULL;
+  }
+
+  // Used for the standard case.
+  LModI(LOperand* left,
+        LOperand* right,
+        LOperand* temp1,
+        LOperand* temp2,
+        LOperand* temp3) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LDivI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LTemplateInstruction<1, 2, 1> {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LCmpIDAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->GetInputRepresentation().IsDouble();
+  }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LUnaryMathOperation(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+  virtual void PrintDataTo(StringStream* stream);
+  BuiltinFunctionId op() const { return hydrogen()->op(); }
+};
+
+
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+                               "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpConstantEqAndBranch(LOperand* left) {
+    inputs_[0] = left;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+                               "cmp-constant-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
+};
+
+
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LIsNilAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCmpT(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInstanceOf(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+                               "instance-of-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+};
+
+
+class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantD: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantT: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value() const { return hydrogen()->handle(); }
+};
+
+
+class LBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  virtual bool IsControl() const { return true; }
+
+  Handle<Map> map() const { return hydrogen()->map(); }
+  int true_block_id() const {
+    return hydrogen()->FirstSuccessor()->block_id();
+  }
+  int false_block_id() const {
+    return hydrogen()->SecondSuccessor()->block_id();
+  }
+};
+
+
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LJSArrayLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
+  DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+};
+
+
+class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LFixedArrayBaseLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
+                               "fixed-array-base-length")
+  DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
+};
+
+
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LElementsKind(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
+class LValueOf: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LValueOf(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+};
+
+
+class LDateField: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
+    inputs_[0] = date;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+  Smi* index() const { return index_; }
+
+ private:
+  Smi* index_;
+};
+
+
+class LThrow: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LThrow(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LBitNotI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LPower: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LRandom: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LRandom(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Random, "random")
+  DECLARE_HYDROGEN_ACCESSOR(Random)
+};
+
+
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+
+  virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+  Token::Value op() const { return op_; }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LReturn(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedFieldPolymorphic(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+
+  LOperand* object() { return inputs_[0]; }
+};
+
+
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedGeneric(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  LOperand* object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+
+  LOperand* function() { return inputs_[0]; }
+};
+
+
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadElements(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadExternalArrayPointer(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
+                               "load-external-array-pointer")
+};
+
+
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+                               "load-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+                                    LOperand* key) {
+    inputs_[0] = external_pointer;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+                               "load-keyed-specialized-array-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+
+  LOperand* external_pointer() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+};
+
+
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadGlobalGeneric(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* global_object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+  LStoreGlobalCell(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+
+  LOperand* value() { return inputs_[0]; }
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+  explicit LStoreGlobalGeneric(LOperand* global_object,
+                               LOperand* value) {
+    inputs_[0] = global_object;
+    inputs_[1] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+  LOperand* global_object() { return InputAt(0); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LOperand* value() { return InputAt(1); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  LOperand* context() { return InputAt(0); }
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  LOperand* context() { return InputAt(0); }
+  LOperand* value() { return InputAt(1); }
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LOuterContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+  LOperand* context() { return InputAt(0); }
+};
+
+
+class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGlobalObject(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+  LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGlobalReceiver(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+  LOperand* global() { return InputAt(0); }
+};
+
+
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<JSFunction> function() { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInvokeFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* function() { return inputs_[0]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallKeyed(LOperand* key) {
+    inputs_[0] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+  DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<String> name() const { return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  LOperand* function() { return inputs_[0]; }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<String> name() const {return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<JSFunction> target() const { return hydrogen()->target();  }
+  int arity() const { return hydrogen()->argument_count() - 1;  }
+};
+
+
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallNew(LOperand* constructor) {
+    inputs_[0] = constructor;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberTagI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+ public:
+  LTaggedToI(LOperand* value,
+             LOperand* temp1,
+             LOperand* temp2,
+             LOperand* temp3) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreNamedField(LOperand* obj, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool is_in_object() { return hydrogen()->is_in_object(); }
+  int offset() { return hydrogen()->offset(); }
+  Handle<Map> transition() const { return hydrogen()->transition(); }
+};
+
+
+class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreNamedGeneric(LOperand* obj, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedFastDoubleElement(LOperand* elements,
+                               LOperand* key,
+                               LOperand* val) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+                               "store-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+                                     LOperand* key,
+                                     LOperand* val) {
+    inputs_[0] = external_pointer;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+                               "store-keyed-specialized-array-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+  LOperand* external_pointer() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* new_map_temp,
+                          LOperand* temp_reg) {
+    inputs_[0] = object;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp_reg;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_reg() { return temps_[0]; }
+  LOperand* temp_reg() { return temps_[1]; }
+  Handle<Map> original_map() { return hydrogen()->original_map(); }
+  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+};
+
+
+
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringCharCodeAt(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+  LOperand* string() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+};
+
+
+class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* char_code) {
+    inputs_[0] = char_code;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+
+  LOperand* char_code() { return inputs_[0]; }
+};
+
+
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LStringLength(LOperand* string) {
+    inputs_[0] = string;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+  DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+  LOperand* string() { return inputs_[0]; }
+};
+
+
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckFunction(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return InputAt(0); }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+  DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMap(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
+ public:
+  LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2)  {
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+  Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+  Handle<JSObject> holder() const { return hydrogen()->holder(); }
+};
+
+
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+};
+
+
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampDToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
+ public:
+  LAllocateObject(LOperand* temp1, LOperand* temp2) {
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+};
+
+
+class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
+};
+
+
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+  Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LTypeof(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+ public:
+  explicit LIsConstructCallAndBranch(LOperand* temp) {
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+                               "is-construct-call-and-branch")
+};
+
+
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LDeleteProperty(LOperand* obj, LOperand* key) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry();
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+  LOperand** SpilledRegisterArray() { return register_spills_; }
+  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand);
+
+ private:
+  // Arrays of spill slot operands for registers with an assigned spill
+  // slot, i.e., that must also be restored to the spill slot on OSR entry.
+  // NULL if the register has no assigned spill slot.  Indexed by allocation
+  // index.
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LIn(LOperand* key, LOperand* object) {
+    inputs_[0] = key;
+    inputs_[1] = object;
+  }
+
+  LOperand* key() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(In, "in")
+};
+
+
+class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInPrepareMap(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
-  explicit LChunk(HGraph* graph) { }
+  explicit LChunk(CompilationInfo* info, HGraph* graph);
 
-  HGraph* graph() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  void AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  LConstantOperand* DefineConstantOperand(HConstant* constant);
+  Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
 
-  const ZoneList<LPointerMap*>* pointer_maps() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  int GetNextSpillIndex(bool is_double);
+  LOperand* GetNextSpillSlot(bool is_double);
 
-  LOperand* GetNextSpillSlot(bool double_slot) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  LConstantOperand* DefineConstantOperand(HConstant* constant) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
+  int ParameterAt(int index);
+  int GetParameterStackSlot(int index) const;
+  int spill_slot_count() const { return spill_slot_count_; }
+  CompilationInfo* info() const { return info_; }
+  HGraph* graph() const { return graph_; }
+  const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+  void AddGapMove(int index, LOperand* from, LOperand* to);
+  LGap* GetGapAt(int index) const;
+  bool IsGapAt(int index) const;
+  int NearestGapPos(int index) const;
+  void MarkEmptyBlocks();
+  const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
   LLabel* GetLabel(int block_id) const {
-    UNIMPLEMENTED();
-    return NULL;
+    HBasicBlock* block = graph_->blocks()->at(block_id);
+    int first_instruction = block->first_instruction_index();
+    return LLabel::cast(instructions_[first_instruction]);
+  }
+  int LookupDestination(int block_id) const {
+    LLabel* cur = GetLabel(block_id);
+    while (cur->replacement() != NULL) {
+      cur = cur->replacement();
+    }
+    return cur->block_id();
+  }
+  Label* GetAssemblyLabel(int block_id) const {
+    LLabel* label = GetLabel(block_id);
+    ASSERT(!label->HasReplacement());
+    return label->label();
   }
 
-  const ZoneList<LInstruction*>* instructions() const {
-    UNIMPLEMENTED();
-    return NULL;
+  const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+    return &inlined_closures_;
   }
 
-  int GetParameterStackSlot(int index) const {
-    UNIMPLEMENTED();
-    return 0;
+  void AddInlinedClosure(Handle<JSFunction> closure) {
+    inlined_closures_.Add(closure);
   }
 
-  void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
-
-  LGap* GetGapAt(int index) const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  bool IsGapAt(int index) const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  int NearestGapPos(int index) const {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  void MarkEmptyBlocks() { UNIMPLEMENTED(); }
-
-  CompilationInfo* info() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-#ifdef DEBUG
-  void Verify() { UNIMPLEMENTED(); }
-#endif
+ private:
+  int spill_slot_count_;
+  CompilationInfo* info_;
+  HGraph* const graph_;
+  ZoneList<LInstruction*> instructions_;
+  ZoneList<LPointerMap*> pointer_maps_;
+  ZoneList<Handle<JSFunction> > inlined_closures_;
 };
 
 
 class LChunkBuilder BASE_EMBEDDED {
  public:
-  LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : chunk_(NULL),
+        info_(info),
+        graph_(graph),
+        zone_(graph->isolate()->zone()),
+        status_(UNUSED),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        argument_count_(0),
+        allocator_(allocator),
+        position_(RelocInfo::kNoPosition),
+        instruction_pending_deoptimization_environment_(NULL),
+        pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
 
   // Build the sequence for the graph.
-  LChunk* Build() {
-    UNIMPLEMENTED();
-    return NULL;
-  };
+  LChunk* Build();
 
   // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
-    UNIMPLEMENTED(); \
-    return NULL; \
-  }
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
   HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
 #undef DECLARE_DO
 
+ private:
+  enum Status {
+    UNUSED,
+    BUILDING,
+    DONE,
+    ABORTED
+  };
+
+  LChunk* chunk() const { return chunk_; }
+  CompilationInfo* info() const { return info_; }
+  HGraph* graph() const { return graph_; }
+  Zone* zone() const { return zone_; }
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_building() const { return status_ == BUILDING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  void Abort(const char* format, ...);
+
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+                           LUnallocated* result);
+  template<int I, int T>
+      LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+                                    int index);
+  template<int I, int T>
+      LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+                                Register reg);
+  template<int I, int T>
+      LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+                                      DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+  LInstruction* MarkAsSaveDoubles(LInstruction* instr);
+
+  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+      LInstruction* instr, int ast_id);
+  void ClearInstructionPendingDeoptimizationEnvironment();
+
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
+
+  void VisitInstruction(HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+
+  LChunk* chunk_;
+  CompilationInfo* info_;
+  HGraph* const graph_;
+  Zone* zone_;
+  Status status_;
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  int argument_count_;
+  LAllocator* allocator_;
+  int position_;
+  LInstruction* instruction_pending_deoptimization_environment_;
+  int pending_deoptimization_ast_id_;
+
   DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
 };
 
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::internal
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 1c0af5d..e93a417 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,7 +42,8 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true) {
+      allow_stub_calls_(true),
+      has_frame_(false) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -80,36 +81,16 @@
 }
 
 
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register address,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, ne, &not_in_new_space);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
-  }
-
-  // Calculate page address: Clear bits from 0 to kPageSizeBits.
-  if (mips32r2) {
-    Ins(object, zero_reg, 0, kPageSizeBits);
+void MacroAssembler::LoadHeapObject(Register result,
+                                    Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    li(result, Operand(cell));
+    lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
   } else {
-    // The Ins macro is slow on r1, so use shifts instead.
-    srl(object, object, kPageSizeBits);
-    sll(object, object, kPageSizeBits);
+    li(result, Operand(object));
   }
-
-  // Calculate region number.
-  Ext(address, address, Page::kRegionSizeLog2,
-      kPageSizeBits - Page::kRegionSizeLog2);
-
-  // Mark region dirty.
-  lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-  li(at, Operand(1));
-  sllv(at, at, address);
-  or_(scratch, scratch, at);
-  sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
 }
 
 
@@ -119,7 +100,9 @@
   // stack, so adjust the stack for unsaved registers.
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
   ASSERT(num_unsaved >= 0);
-  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+  if (num_unsaved > 0) {
+    Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+  }
   MultiPush(kSafepointSavedRegisters);
 }
 
@@ -127,7 +110,9 @@
 void MacroAssembler::PopSafepointRegisters() {
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
   MultiPop(kSafepointSavedRegisters);
-  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+  if (num_unsaved > 0) {
+    Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+  }
 }
 
 
@@ -180,6 +165,7 @@
 
 
 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+  UNIMPLEMENTED_MIPS();
   // General purpose registers are pushed last on the stack.
   int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
@@ -187,8 +173,6 @@
 }
 
 
-
-
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
                                 Condition cc,
@@ -200,38 +184,53 @@
 }
 
 
-// Will clobber 4 registers: object, scratch0, scratch1, at. The
-// register 'object' contains a heap object pointer.  The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
-                                 Operand offset,
-                                 Register scratch0,
-                                 Register scratch1) {
-  // The compiled code assumes that record write doesn't change the
-  // context register, so we check that none of the clobbered
-  // registers are cp.
-  ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    RAStatus ra_status,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
+  ASSERT(!AreAliased(value, dst, t8, object));
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
   Label done;
 
-  // First, test that the object is not in the new space.  We cannot set
-  // region marks for new space pages.
-  InNewSpace(object, scratch0, eq, &done);
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
 
-  // Add offset into the object.
-  Addu(scratch0, object, offset);
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
 
-  // Record the actual write.
-  RecordWriteHelper(object, scratch0, scratch1);
+  Addu(dst, object, Operand(offset - kHeapObjectTag));
+  if (emit_debug_code()) {
+    Label ok;
+    And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
+    Branch(&ok, eq, t8, Operand(zero_reg));
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  RecordWrite(object,
+              dst,
+              value,
+              ra_status,
+              save_fp,
+              remembered_set_action,
+              OMIT_SMI_CHECK);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    li(object, Operand(BitCast<int32_t>(kZapValue)));
-    li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
-    li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+    li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
+    li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
   }
 }
 
@@ -241,29 +240,102 @@
 // tag is shifted away.
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register scratch) {
+                                 Register value,
+                                 RAStatus ra_status,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
+  ASSERT(!AreAliased(object, address, value, t8));
+  ASSERT(!AreAliased(object, address, value, t9));
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are cp.
-  ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+  ASSERT(!address.is(cp) && !value.is(cp));
+
+  if (emit_debug_code()) {
+    lw(at, MemOperand(address));
+    Assert(
+        eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
+  }
 
   Label done;
 
-  // First, test that the object is not in the new space.  We cannot set
-  // region marks for new space pages.
-  InNewSpace(object, scratch, eq, &done);
+  if (smi_check == INLINE_SMI_CHECK) {
+    ASSERT_EQ(0, kSmiTag);
+    JumpIfSmi(value, &done);
+  }
+
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                eq,
+                &done);
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                eq,
+                &done);
 
   // Record the actual write.
-  RecordWriteHelper(object, address, scratch);
+  if (ra_status == kRAHasNotBeenSaved) {
+    push(ra);
+  }
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
+  if (ra_status == kRAHasNotBeenSaved) {
+    pop(ra);
+  }
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    li(object, Operand(BitCast<int32_t>(kZapValue)));
-    li(address, Operand(BitCast<int32_t>(kZapValue)));
-    li(scratch, Operand(BitCast<int32_t>(kZapValue)));
+    li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
+    li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+  }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register address,
+                                         Register scratch,
+                                         SaveFPRegsMode fp_mode,
+                                         RememberedSetFinalAction and_then) {
+  Label done;
+  if (emit_debug_code()) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok);
+    stop("Remembered set pointer is in new space");
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  li(t8, Operand(store_buffer));
+  lw(scratch, MemOperand(t8));
+  // Store pointer to buffer and increment buffer top.
+  sw(address, MemOperand(scratch));
+  Addu(scratch, scratch, kPointerSize);
+  // Write back new top of buffer.
+  sw(scratch, MemOperand(t8));
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kFallThroughAtEnd) {
+    Branch(&done, eq, t8, Operand(zero_reg));
+  } else {
+    ASSERT(and_then == kReturnAtEnd);
+    Ret(eq, t8, Operand(zero_reg));
+  }
+  push(ra);
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(fp_mode);
+  CallStub(&store_buffer_overflow);
+  pop(ra);
+  bind(&done);
+  if (and_then == kReturnAtEnd) {
+    Ret();
   }
 }
 
@@ -372,8 +444,10 @@
   xor_(reg0, reg0, at);
 
   // hash = hash * 2057;
-  li(scratch, Operand(2057));
-  mul(reg0, reg0, scratch);
+  sll(scratch, reg0, 11);
+  sll(at, reg0, 3);
+  addu(reg0, reg0, at);
+  addu(reg0, reg0, scratch);
 
   // hash = hash ^ (hash >> 16);
   srl(at, reg0, 16);
@@ -500,12 +574,22 @@
 
 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
-    mul(rd, rs, rt.rm());
+    if (kArchVariant == kLoongson) {
+      mult(rs, rt.rm());
+      mflo(rd);
+    } else {
+      mul(rd, rs, rt.rm());
+    }
   } else {
     // li handles the relocation.
     ASSERT(!rs.is(at));
     li(at, rt);
-    mul(rd, rs, at);
+    if (kArchVariant == kLoongson) {
+      mult(rs, at);
+      mflo(rd);
+    } else {
+      mul(rd, rs, at);
+    }
   }
 }
 
@@ -660,7 +744,7 @@
 
 
 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
-  if (mips32r2) {
+  if (kArchVariant == kMips32r2) {
     if (rt.is_reg()) {
       rotrv(rd, rs, rt.rm());
     } else {
@@ -684,31 +768,30 @@
   }
 }
 
-
 //------------Pseudo-instructions-------------
 
-void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
+void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
   ASSERT(!j.is_reg());
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  if (!MustUseReg(j.rmode_) && !gen2instr) {
+  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
     // Normal load of an immediate value which does not need Relocation Info.
     if (is_int16(j.imm32_)) {
       addiu(rd, zero_reg, j.imm32_);
     } else if (!(j.imm32_ & kHiMask)) {
       ori(rd, zero_reg, j.imm32_);
     } else if (!(j.imm32_ & kImm16Mask)) {
-      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+      lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
     } else {
-      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+      lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
       ori(rd, rd, (j.imm32_ & kImm16Mask));
     }
-  } else if (MustUseReg(j.rmode_) || gen2instr) {
+  } else {
     if (MustUseReg(j.rmode_)) {
       RecordRelocInfo(j.rmode_, j.imm32_);
     }
-    // We need always the same number of instructions as we may need to patch
+    // We always need the same number of instructions as we may need to patch
     // this code to load another value which may need 2 instructions to load.
-    lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+    lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
     ori(rd, rd, (j.imm32_ & kImm16Mask));
   }
 }
@@ -719,7 +802,7 @@
   int16_t stack_offset = num_to_push * kPointerSize;
 
   Subu(sp, sp, Operand(stack_offset));
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
     if ((regs & (1 << i)) != 0) {
       stack_offset -= kPointerSize;
       sw(ToRegister(i), MemOperand(sp, stack_offset));
@@ -758,7 +841,7 @@
 void MacroAssembler::MultiPopReversed(RegList regs) {
   int16_t stack_offset = 0;
 
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
     if ((regs & (1 << i)) != 0) {
       lw(ToRegister(i), MemOperand(sp, stack_offset));
       stack_offset += kPointerSize;
@@ -774,7 +857,7 @@
   int16_t stack_offset = num_to_push * kDoubleSize;
 
   Subu(sp, sp, Operand(stack_offset));
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
     if ((regs & (1 << i)) != 0) {
       stack_offset -= kDoubleSize;
       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
@@ -816,7 +899,7 @@
   CpuFeatures::Scope scope(FPU);
   int16_t stack_offset = 0;
 
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
     if ((regs & (1 << i)) != 0) {
       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
       stack_offset += kDoubleSize;
@@ -826,6 +909,21 @@
 }
 
 
+void MacroAssembler::FlushICache(Register address, unsigned instructions) {
+  RegList saved_regs = kJSCallerSaved | ra.bit();
+  MultiPush(saved_regs);
+  AllowExternalCallThatCantCauseGC scope(this);
+
+  // Save to a0 in case address == t0.
+  Move(a0, address);
+  PrepareCallCFunction(2, t0);
+
+  li(a1, instructions * kInstrSize);
+  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
+  MultiPop(saved_regs);
+}
+
+
 void MacroAssembler::Ext(Register rt,
                          Register rs,
                          uint16_t pos,
@@ -833,7 +931,7 @@
   ASSERT(pos < 32);
   ASSERT(pos + size < 33);
 
-  if (mips32r2) {
+  if (kArchVariant == kMips32r2) {
     ext_(rt, rs, pos, size);
   } else {
     // Move rs to rt and shift it left then right to get the
@@ -854,34 +952,21 @@
                          uint16_t pos,
                          uint16_t size) {
   ASSERT(pos < 32);
-  ASSERT(pos + size < 32);
+  ASSERT(pos + size <= 32);
+  ASSERT(size != 0);
 
-  if (mips32r2) {
+  if (kArchVariant == kMips32r2) {
     ins_(rt, rs, pos, size);
   } else {
     ASSERT(!rt.is(t8) && !rs.is(t8));
-
-    srl(t8, rt, pos + size);
-    // The left chunk from rt that needs to
-    // be saved is on the right side of t8.
-    sll(at, t8, pos + size);
-    // The 'at' register now contains the left chunk on
-    // the left (proper position) and zeroes.
-    sll(t8, rt, 32 - pos);
-    // t8 now contains the right chunk on the left and zeroes.
-    srl(t8, t8, 32 - pos);
-    // t8 now contains the right chunk on
-    // the right (proper position) and zeroes.
-    or_(rt, at, t8);
-    // rt now contains the left and right chunks from the original rt
-    // in their proper position and zeroes in the middle.
-    sll(t8, rs, 32 - size);
-    // t8 now contains the chunk from rs on the left and zeroes.
-    srl(t8, t8, 32 - size - pos);
-    // t8 now contains the original chunk from rs in
-    // the middle (proper position).
-    or_(rt, rt, t8);
-    // rt now contains the result of the ins instruction in R2 mode.
+    Subu(at, zero_reg, Operand(1));
+    srl(at, at, 32 - size);
+    and_(t8, rs, at);
+    sll(t8, t8, pos);
+    sll(at, at, pos);
+    nor(at, at, zero_reg);
+    and_(at, rt, at);
+    or_(rt, t8, at);
   }
 }
 
@@ -940,6 +1025,48 @@
   mtc1(t8, fd);
 }
 
+void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
+  if (kArchVariant == kLoongson && fd.is(fs)) {
+    mfc1(t8, FPURegister::from_code(fs.code() + 1));
+    trunc_w_d(fd, fs);
+    mtc1(t8, FPURegister::from_code(fs.code() + 1));
+  } else {
+    trunc_w_d(fd, fs);
+  }
+}
+
+void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
+  if (kArchVariant == kLoongson && fd.is(fs)) {
+    mfc1(t8, FPURegister::from_code(fs.code() + 1));
+    round_w_d(fd, fs);
+    mtc1(t8, FPURegister::from_code(fs.code() + 1));
+  } else {
+    round_w_d(fd, fs);
+  }
+}
+
+
+void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
+  if (kArchVariant == kLoongson && fd.is(fs)) {
+    mfc1(t8, FPURegister::from_code(fs.code() + 1));
+    floor_w_d(fd, fs);
+    mtc1(t8, FPURegister::from_code(fs.code() + 1));
+  } else {
+    floor_w_d(fd, fs);
+  }
+}
+
+
+void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
+  if (kArchVariant == kLoongson && fd.is(fs)) {
+    mfc1(t8, FPURegister::from_code(fs.code() + 1));
+    ceil_w_d(fd, fs);
+    mtc1(t8, FPURegister::from_code(fs.code() + 1));
+  } else {
+    ceil_w_d(fd, fs);
+  }
+}
+
 
 void MacroAssembler::Trunc_uw_d(FPURegister fd,
                                 Register rs,
@@ -952,11 +1079,9 @@
   mtc1(at, FPURegister::from_code(scratch.code() + 1));
   mtc1(zero_reg, scratch);
   // Test if scratch > fd.
-  c(OLT, D, fd, scratch);
-
-  Label simple_convert;
   // If fd < 2^31 we can convert it normally.
-  bc1t(&simple_convert);
+  Label simple_convert;
+  BranchF(&simple_convert, NULL, lt, fd, scratch);
 
   // First we subtract 2^31 from fd, then trunc it to rs
   // and add 2^31 to rs.
@@ -976,6 +1101,200 @@
 }
 
 
+void MacroAssembler::BranchF(Label* target,
+                             Label* nan,
+                             Condition cc,
+                             FPURegister cmp1,
+                             FPURegister cmp2,
+                             BranchDelaySlot bd) {
+  if (cc == al) {
+    Branch(bd, target);
+    return;
+  }
+
+  ASSERT(nan || target);
+  // Check for unordered (NaN) cases.
+  if (nan) {
+    c(UN, D, cmp1, cmp2);
+    bc1t(nan);
+  }
+
+  if (target) {
+    // Here NaN cases were either handled by this function or are assumed to
+    // have been handled by the caller.
+    // Unsigned conditions are treated as their signed counterpart.
+    switch (cc) {
+      case Uless:
+      case less:
+        c(OLT, D, cmp1, cmp2);
+        bc1t(target);
+        break;
+      case Ugreater:
+      case greater:
+        c(ULE, D, cmp1, cmp2);
+        bc1f(target);
+        break;
+      case Ugreater_equal:
+      case greater_equal:
+        c(ULT, D, cmp1, cmp2);
+        bc1f(target);
+        break;
+      case Uless_equal:
+      case less_equal:
+        c(OLE, D, cmp1, cmp2);
+        bc1t(target);
+        break;
+      case eq:
+        c(EQ, D, cmp1, cmp2);
+        bc1t(target);
+        break;
+      case ne:
+        c(EQ, D, cmp1, cmp2);
+        bc1f(target);
+        break;
+      default:
+        CHECK(0);
+    };
+  }
+
+  if (bd == PROTECT) {
+    nop();
+  }
+}
+
+
+void MacroAssembler::Move(FPURegister dst, double imm) {
+  ASSERT(CpuFeatures::IsEnabled(FPU));
+  static const DoubleRepresentation minus_zero(-0.0);
+  static const DoubleRepresentation zero(0.0);
+  DoubleRepresentation value(imm);
+  // Handle special values first.
+  bool force_load = dst.is(kDoubleRegZero);
+  if (value.bits == zero.bits && !force_load) {
+    mov_d(dst, kDoubleRegZero);
+  } else if (value.bits == minus_zero.bits && !force_load) {
+    neg_d(dst, kDoubleRegZero);
+  } else {
+    uint32_t lo, hi;
+    DoubleAsTwoUInt32(imm, &lo, &hi);
+    // Move the low part of the double into the lower of the corresponding FPU
+    // register of FPU register pair.
+    if (lo != 0) {
+      li(at, Operand(lo));
+      mtc1(at, dst);
+    } else {
+      mtc1(zero_reg, dst);
+    }
+    // Move the high part of the double into the higher of the corresponding FPU
+    // register of FPU register pair.
+    if (hi != 0) {
+      li(at, Operand(hi));
+      mtc1(at, dst.high());
+    } else {
+      mtc1(zero_reg, dst.high());
+    }
+  }
+}
+
+
+void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
+  if (kArchVariant == kLoongson) {
+    Label done;
+    Branch(&done, ne, rt, Operand(zero_reg));
+    mov(rd, rs);
+    bind(&done);
+  } else {
+    movz(rd, rs, rt);
+  }
+}
+
+
+void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
+  if (kArchVariant == kLoongson) {
+    Label done;
+    Branch(&done, eq, rt, Operand(zero_reg));
+    mov(rd, rs);
+    bind(&done);
+  } else {
+    movn(rd, rs, rt);
+  }
+}
+
+
+void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
+  if (kArchVariant == kLoongson) {
+    // Tests an FP condition code and then conditionally move rs to rd.
+    // We do not currently use any FPU cc bit other than bit 0.
+    ASSERT(cc == 0);
+    ASSERT(!(rs.is(t8) || rd.is(t8)));
+    Label done;
+    Register scratch = t8;
+    // For testing purposes we need to fetch content of the FCSR register and
+    // than test its cc (floating point condition code) bit (for cc = 0, it is
+    // 24. bit of the FCSR).
+    cfc1(scratch, FCSR);
+    // For the MIPS I, II and III architectures, the contents of scratch is
+    // UNPREDICTABLE for the instruction immediately following CFC1.
+    nop();
+    srl(scratch, scratch, 16);
+    andi(scratch, scratch, 0x0080);
+    Branch(&done, eq, scratch, Operand(zero_reg));
+    mov(rd, rs);
+    bind(&done);
+  } else {
+    movt(rd, rs, cc);
+  }
+}
+
+
+void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
+  if (kArchVariant == kLoongson) {
+    // Tests an FP condition code and then conditionally move rs to rd.
+    // We do not currently use any FPU cc bit other than bit 0.
+    ASSERT(cc == 0);
+    ASSERT(!(rs.is(t8) || rd.is(t8)));
+    Label done;
+    Register scratch = t8;
+    // For testing purposes we need to fetch content of the FCSR register and
+    // than test its cc (floating point condition code) bit (for cc = 0, it is
+    // 24. bit of the FCSR).
+    cfc1(scratch, FCSR);
+    // For the MIPS I, II and III architectures, the contents of scratch is
+    // UNPREDICTABLE for the instruction immediately following CFC1.
+    nop();
+    srl(scratch, scratch, 16);
+    andi(scratch, scratch, 0x0080);
+    Branch(&done, ne, scratch, Operand(zero_reg));
+    mov(rd, rs);
+    bind(&done);
+  } else {
+    movf(rd, rs, cc);
+  }
+}
+
+
+void MacroAssembler::Clz(Register rd, Register rs) {
+  if (kArchVariant == kLoongson) {
+    ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
+    Register mask = t8;
+    Register scratch = t9;
+    Label loop, end;
+    mov(at, rs);
+    mov(rd, zero_reg);
+    lui(mask, 0x8000);
+    bind(&loop);
+    and_(scratch, at, mask);
+    Branch(&end, ne, scratch, Operand(zero_reg));
+    addiu(rd, rd, 1);
+    Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
+    srl(mask, mask, 1);
+    bind(&end);
+  } else {
+    clz(rd, rs);
+  }
+}
+
+
 // Tries to get a signed int32 out of a double precision floating point heap
 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
 // 32bits signed integer range.
@@ -1008,7 +1327,7 @@
   Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
 
   // We know the exponent is smaller than 30 (biased).  If it is less than
-  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
   // it rounds to zero.
   const uint32_t zero_exponent =
       (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
@@ -1066,14 +1385,61 @@
     subu(scratch2, zero_reg, scratch);
     // Trick to check sign bit (msb) held in dest, count leading zero.
     // 0 indicates negative, save negative version with conditional move.
-    clz(dest, dest);
-    movz(scratch, scratch2, dest);
+    Clz(dest, dest);
+    Movz(scratch, scratch2, dest);
     mov(dest, scratch);
   }
   bind(&done);
 }
 
 
+void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
+                                     FPURegister result,
+                                     DoubleRegister double_input,
+                                     Register scratch1,
+                                     Register except_flag,
+                                     CheckForInexactConversion check_inexact) {
+  ASSERT(CpuFeatures::IsSupported(FPU));
+  CpuFeatures::Scope scope(FPU);
+
+  int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
+
+  if (check_inexact == kDontCheckForInexactConversion) {
+    // Ingore inexact exceptions.
+    except_mask &= ~kFCSRInexactFlagMask;
+  }
+
+  // Save FCSR.
+  cfc1(scratch1, FCSR);
+  // Disable FPU exceptions.
+  ctc1(zero_reg, FCSR);
+
+  // Do operation based on rounding mode.
+  switch (rounding_mode) {
+    case kRoundToNearest:
+      Round_w_d(result, double_input);
+      break;
+    case kRoundToZero:
+      Trunc_w_d(result, double_input);
+      break;
+    case kRoundToPlusInf:
+      Ceil_w_d(result, double_input);
+      break;
+    case kRoundToMinusInf:
+      Floor_w_d(result, double_input);
+      break;
+  }  // End of switch-statement.
+
+  // Retrieve FCSR.
+  cfc1(except_flag, FCSR);
+  // Restore FCSR.
+  ctc1(scratch1, FCSR);
+
+  // Check for fpu exceptions.
+  And(except_flag, except_flag, Operand(except_mask));
+}
+
+
 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
                                                  Register input_high,
                                                  Register input_low,
@@ -1087,7 +1453,7 @@
 
   // Check for Infinity and NaNs, which should return 0.
   Subu(scratch, result, HeapNumber::kExponentMask);
-  movz(result, zero_reg, scratch);
+  Movz(result, zero_reg, scratch);
   Branch(&done, eq, scratch, Operand(zero_reg));
 
   // Express exponent as delta to (number of mantissa bits + 31).
@@ -1151,7 +1517,7 @@
   result = sign;
   sign = no_reg;
   Subu(result, zero_reg, input_high);
-  movz(result, input_high, scratch);
+  Movz(result, input_high, scratch);
   bind(&done);
 }
 
@@ -1160,22 +1526,21 @@
                                       FPURegister double_input,
                                       FPURegister single_scratch,
                                       Register scratch,
-                                      Register input_high,
-                                      Register input_low) {
+                                      Register scratch2,
+                                      Register scratch3) {
   CpuFeatures::Scope scope(FPU);
-  ASSERT(!input_high.is(result));
-  ASSERT(!input_low.is(result));
-  ASSERT(!input_low.is(input_high));
+  ASSERT(!scratch2.is(result));
+  ASSERT(!scratch3.is(result));
+  ASSERT(!scratch3.is(scratch2));
   ASSERT(!scratch.is(result) &&
-         !scratch.is(input_high) &&
-         !scratch.is(input_low));
+         !scratch.is(scratch2) &&
+         !scratch.is(scratch3));
   ASSERT(!single_scratch.is(double_input));
 
   Label done;
   Label manual;
 
   // Clear cumulative exception flags and save the FCSR.
-  Register scratch2 = input_high;
   cfc1(scratch2, FCSR);
   ctc1(zero_reg, FCSR);
   // Try a conversion to a signed integer.
@@ -1192,6 +1557,8 @@
   Branch(&done, eq, scratch, Operand(zero_reg));
 
   // Load the double value and perform a manual truncation.
+  Register input_high = scratch2;
+  Register input_low = scratch3;
   Move(input_low, input_high, double_input);
   EmitOutOfInt32RangeTruncate(result,
                               input_high,
@@ -1223,15 +1590,6 @@
     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
 
 
-bool MacroAssembler::UseAbsoluteCodePointers() {
-  if (is_trampoline_emitted()) {
-    return true;
-  } else {
-    return false;
-  }
-}
-
-
 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
   BranchShort(offset, bdslot);
 }
@@ -1245,11 +1603,18 @@
 
 
 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Jr(L, bdslot);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchShort(L, bdslot);
+    } else {
+      Jr(L, bdslot);
+    }
   } else {
-    BranchShort(L, bdslot);
+    if (is_trampoline_emitted()) {
+      Jr(L, bdslot);
+    } else {
+      BranchShort(L, bdslot);
+    }
   }
 }
 
@@ -1257,19 +1622,40 @@
 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
                             const Operand& rt,
                             BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Label skip;
-    Condition neg_cond = NegateCondition(cond);
-    BranchShort(&skip, neg_cond, rs, rt);
-    Jr(L, bdslot);
-    bind(&skip);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchShort(L, cond, rs, rt, bdslot);
+    } else {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jr(L, bdslot);
+      bind(&skip);
+    }
   } else {
-    BranchShort(L, cond, rs, rt, bdslot);
+    if (is_trampoline_emitted()) {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jr(L, bdslot);
+      bind(&skip);
+    } else {
+      BranchShort(L, cond, rs, rt, bdslot);
+    }
   }
 }
 
 
+void MacroAssembler::Branch(Label* L,
+                            Condition cond,
+                            Register rs,
+                            Heap::RootListIndex index,
+                            BranchDelaySlot bdslot) {
+  LoadRoot(at, index);
+  Branch(L, cond, rs, Operand(at), bdslot);
+}
+
+
 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
   b(offset);
 
@@ -1288,8 +1674,8 @@
   Register scratch = at;
 
   if (rt.is_reg()) {
-    // We don't want any other register but scratch clobbered.
-    ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
+    // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
+    // rt.
     r2 = rt.rm_;
     switch (cond) {
       case cc_always:
@@ -1791,11 +2177,18 @@
 
 
 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Jalr(L, bdslot);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchAndLinkShort(L, bdslot);
+    } else {
+      Jalr(L, bdslot);
+    }
   } else {
-    BranchAndLinkShort(L, bdslot);
+    if (is_trampoline_emitted()) {
+      Jalr(L, bdslot);
+    } else {
+      BranchAndLinkShort(L, bdslot);
+    }
   }
 }
 
@@ -1803,15 +2196,26 @@
 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
                                    const Operand& rt,
                                    BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Label skip;
-    Condition neg_cond = NegateCondition(cond);
-    BranchShort(&skip, neg_cond, rs, rt);
-    Jalr(L, bdslot);
-    bind(&skip);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    } else {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jalr(L, bdslot);
+      bind(&skip);
+    }
   } else {
-    BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    if (is_trampoline_emitted()) {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jalr(L, bdslot);
+      bind(&skip);
+    } else {
+      BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    }
   }
 }
 
@@ -2043,8 +2447,15 @@
                           Register rs,
                           const Operand& rt,
                           BranchDelaySlot bd) {
+  Label skip;
+  if (cond != cc_always) {
+    Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
+  }
+  // The first instruction of 'li' may be placed in the delay slot.
+  // This is not an issue, t9 is expected to be clobbered anyway.
   li(t9, Operand(target, rmode));
-  Jump(t9, cond, rs, rt, bd);
+  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
+  bind(&skip);
 }
 
 
@@ -2139,7 +2550,7 @@
   // Must record previous source positions before the
   // li() generates a new code target.
   positions_recorder()->WriteRecordedPositions();
-  li(t9, Operand(target_int, rmode), true);
+  li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
   Call(t9, cond, rs, rt, bd);
   ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
             SizeOfCodeGeneratedSince(&start));
@@ -2174,7 +2585,7 @@
     rmode = RelocInfo::CODE_TARGET_WITH_ID;
   }
   Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
-  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
+  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
             SizeOfCodeGeneratedSince(&start));
 }
 
@@ -2244,14 +2655,16 @@
     nop();
 }
 
+void MacroAssembler::DropAndRet(int drop) {
+  Ret(USE_DELAY_SLOT);
+  addiu(sp, sp, drop * kPointerSize);
+}
 
 void MacroAssembler::DropAndRet(int drop,
                                 Condition cond,
                                 Register r1,
                                 const Operand& r2) {
-  // This is a workaround to make sure only one branch instruction is
-  // generated. It relies on Drop and Ret not creating branches if
-  // cond == cc_always.
+  // Both Drop and Ret need to be conditional.
   Label skip;
   if (cond != cc_always) {
     Branch(&skip, NegateCondition(cond), r1, r2);
@@ -2318,10 +2731,10 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 void MacroAssembler::DebugBreak() {
-  ASSERT(allow_stub_calls());
-  mov(a0, zero_reg);
-  li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+  PrepareCEntryArgs(0);
+  PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
   CEntryStub ces(1);
+  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 
@@ -2331,61 +2744,43 @@
 // ---------------------------------------------------------------------------
 // Exception handling.
 
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
-                                    HandlerType type) {
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+                                    int handler_index) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // The return address is passed in register ra.
-  if (try_location == IN_JAVASCRIPT) {
-    if (type == TRY_CATCH_HANDLER) {
-      li(t0, Operand(StackHandler::TRY_CATCH));
-    } else {
-      li(t0, Operand(StackHandler::TRY_FINALLY));
-    }
-    // Save the current handler as the next handler.
-    li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-    lw(t1, MemOperand(t2));
+  // For the JSEntry handler, we must preserve a0-a3 and s0.
+  // t1-t3 are available. We will build up the handler from the bottom by
+  // pushing on the stack.
+  // Set up the code object (t1) and the state (t2) for pushing.
+  unsigned state =
+      StackHandler::IndexField::encode(handler_index) |
+      StackHandler::KindField::encode(kind);
+  li(t1, Operand(CodeObject()), CONSTANT_SIZE);
+  li(t2, Operand(state));
 
-    addiu(sp, sp, -StackHandlerConstants::kSize);
-    sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
-    sw(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
-    sw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
-    sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
-    sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
-    // Link this handler as the new current one.
-    sw(sp, MemOperand(t2));
-
+  // Push the frame pointer, context, state, and code object.
+  if (kind == StackHandler::JS_ENTRY) {
+    ASSERT_EQ(Smi::FromInt(0), 0);
+    // The second zero_reg indicates no context.
+    // The first zero_reg is the NULL frame pointer.
+    // The operands are reversed to match the order of MultiPush/Pop.
+    Push(zero_reg, zero_reg, t2, t1);
   } else {
-    // Must preserve a0-a3, and s0 (argv).
-    ASSERT(try_location == IN_JS_ENTRY);
-    // The frame pointer does not point to a JS frame so we save NULL
-    // for fp. We expect the code throwing an exception to check fp
-    // before dereferencing it to restore the context.
-    li(t0, Operand(StackHandler::ENTRY));
-
-    // Save the current handler as the next handler.
-    li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-    lw(t1, MemOperand(t2));
-
-    ASSERT(Smi::FromInt(0) == 0);  // Used for no context.
-
-    addiu(sp, sp, -StackHandlerConstants::kSize);
-    sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
-    sw(zero_reg, MemOperand(sp, StackHandlerConstants::kFPOffset));
-    sw(zero_reg, MemOperand(sp, StackHandlerConstants::kContextOffset));
-    sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
-    sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
-    // Link this handler as the new current one.
-    sw(sp, MemOperand(t2));
+    MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
   }
+
+  // Link the current handler as the next handler.
+  li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  lw(t1, MemOperand(t2));
+  push(t1);
+  // Set this new handler as the current one.
+  sw(sp, MemOperand(t2));
 }
 
 
@@ -2398,19 +2793,36 @@
 }
 
 
-void MacroAssembler::Throw(Register value) {
-  // v0 is expected to hold the exception.
-  Move(v0, value);
+void MacroAssembler::JumpToHandlerEntry() {
+  // Compute the handler entry address and jump to it.  The handler table is
+  // a fixed array of (smi-tagged) code offsets.
+  // v0 = exception, a1 = code object, a2 = state.
+  lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));  // Handler table.
+  Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  srl(a2, a2, StackHandler::kKindWidth);  // Handler index.
+  sll(a2, a2, kPointerSizeLog2);
+  Addu(a2, a3, a2);
+  lw(a2, MemOperand(a2));  // Smi-tagged offset.
+  Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
+  sra(t9, a2, kSmiTagSize);
+  Addu(t9, t9, a1);
+  Jump(t9);  // Jump.
+}
 
+
+void MacroAssembler::Throw(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // Drop the sp to the top of the handler.
+  // The exception is expected in v0.
+  Move(v0, value);
+
+  // Drop the stack pointer to the top of the top handler.
   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
                                    isolate())));
   lw(sp, MemOperand(a3));
@@ -2419,132 +2831,60 @@
   pop(a2);
   sw(a2, MemOperand(a3));
 
-  // Restore context and frame pointer, discard state (a3).
-  MultiPop(a3.bit() | cp.bit() | fp.bit());
+  // Get the code object (a1) and state (a2).  Restore the context and frame
+  // pointer.
+  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (a3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
-  // of them.
+  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+  // or cp.
   Label done;
-  Branch(&done, eq, fp, Operand(zero_reg));
+  Branch(&done, eq, cp, Operand(zero_reg));
   sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   bind(&done);
 
-#ifdef DEBUG
-  // When emitting debug_code, set ra as return address for the jump.
-  // 5 instructions: add: 1, pop: 2, jump: 2.
-  const int kOffsetRaInstructions = 5;
-  Label find_ra;
-
-  if (emit_debug_code()) {
-    // Compute ra for the Jump(t9).
-    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
-
-    // This branch-and-link sequence is needed to get the current PC on mips,
-    // saved to the ra register. Then adjusted for instruction count.
-    bal(&find_ra);  // bal exposes branch-delay.
-    nop();  // Branch delay slot nop.
-    bind(&find_ra);
-    addiu(ra, ra, kOffsetRaBytes);
-  }
-#endif
-
-  pop(t9);  // 2 instructions: lw, add sp.
-  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
-
-  if (emit_debug_code()) {
-    // Make sure that the expected number of instructions were generated.
-    ASSERT_EQ(kOffsetRaInstructions,
-              InstructionsGeneratedSince(&find_ra));
-  }
+  JumpToHandlerEntry();
 }
 
 
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
-                                      Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // v0 is expected to hold the exception.
-  Move(v0, value);
-
-  // Drop sp to the top stack handler.
+  // The exception is expected in v0.
+  if (!value.is(v0)) {
+    mov(v0, value);
+  }
+  // Drop the stack pointer to the top of the top stack handler.
   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   lw(sp, MemOperand(a3));
 
   // Unwind the handlers until the ENTRY handler is found.
-  Label loop, done;
-  bind(&loop);
-  // Load the type of the current stack handler.
-  const int kStateOffset = StackHandlerConstants::kStateOffset;
-  lw(a2, MemOperand(sp, kStateOffset));
-  Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
-  // Fetch the next handler in the list.
-  const int kNextOffset = StackHandlerConstants::kNextOffset;
-  lw(sp, MemOperand(sp, kNextOffset));
-  jmp(&loop);
-  bind(&done);
+  Label fetch_next, check_kind;
+  jmp(&check_kind);
+  bind(&fetch_next);
+  lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
 
-  // Set the top handler address to next handler past the current ENTRY handler.
+  bind(&check_kind);
+  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+  lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+  And(a2, a2, Operand(StackHandler::KindField::kMask));
+  Branch(&fetch_next, ne, a2, Operand(zero_reg));
+
+  // Set the top handler address to next handler past the top ENTRY handler.
   pop(a2);
   sw(a2, MemOperand(a3));
 
-  if (type == OUT_OF_MEMORY) {
-    // Set external caught exception to false.
-    ExternalReference external_caught(
-           Isolate::kExternalCaughtExceptionAddress, isolate());
-    li(a0, Operand(false, RelocInfo::NONE));
-    li(a2, Operand(external_caught));
-    sw(a0, MemOperand(a2));
+  // Get the code object (a1) and state (a2).  Clear the context and frame
+  // pointer (0 was saved in the handler).
+  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
 
-    // Set pending exception and v0 to out of memory exception.
-    Failure* out_of_memory = Failure::OutOfMemoryException();
-    li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
-    li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                        isolate())));
-    sw(v0, MemOperand(a2));
-  }
-
-  // Stack layout at this point. See also StackHandlerConstants.
-  // sp ->   state (ENTRY)
-  //         cp
-  //         fp
-  //         ra
-
-  // Restore context and frame pointer, discard state (r2).
-  MultiPop(a2.bit() | cp.bit() | fp.bit());
-
-#ifdef DEBUG
-  // When emitting debug_code, set ra as return address for the jump.
-  // 5 instructions: add: 1, pop: 2, jump: 2.
-  const int kOffsetRaInstructions = 5;
-  Label find_ra;
-
-  if (emit_debug_code()) {
-    // Compute ra for the Jump(t9).
-    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
-
-    // This branch-and-link sequence is needed to get the current PC on mips,
-    // saved to the ra register. Then adjusted for instruction count.
-    bal(&find_ra);  // bal exposes branch-delay slot.
-    nop();  // Branch delay slot nop.
-    bind(&find_ra);
-    addiu(ra, ra, kOffsetRaBytes);
-  }
-#endif
-  pop(t9);  // 2 instructions: lw, add sp.
-  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
-
-  if (emit_debug_code()) {
-    // Make sure that the expected number of instructions were generated.
-    ASSERT_EQ(kOffsetRaInstructions,
-              InstructionsGeneratedSince(&find_ra));
-  }
+  JumpToHandlerEntry();
 }
 
 
@@ -2647,6 +2987,7 @@
   ASSERT(!result.is(scratch1));
   ASSERT(!result.is(scratch2));
   ASSERT(!scratch1.is(scratch2));
+  ASSERT(!object_size.is(t9));
   ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
 
   // Check relative positions of allocation top and limit addresses.
@@ -2984,26 +3325,185 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  Branch(&entry);
+  bind(&loop);
+  sw(filler, MemOperand(start_offset));
+  Addu(start_offset, start_offset, kPointerSize);
+  bind(&entry);
+  Branch(&loop, lt, start_offset, Operand(end_offset));
+}
+
+
 void MacroAssembler::CheckFastElements(Register map,
                                        Register scratch,
                                        Label* fail) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Register scratch,
+                                             Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  Branch(fail, ls, scratch,
+         Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  Branch(fail, hi, scratch,
+         Operand(Map::kMaximumBitField2FastElementValue));
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Register scratch,
+                                              Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  Branch(fail, hi, scratch,
+         Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+                                                 Register key_reg,
+                                                 Register receiver_reg,
+                                                 Register elements_reg,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Register scratch3,
+                                                 Register scratch4,
+                                                 Label* fail) {
+  Label smi_value, maybe_nan, have_double_value, is_nan, done;
+  Register mantissa_reg = scratch2;
+  Register exponent_reg = scratch3;
+
+  // Handle smi values specially.
+  JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  CheckMap(value_reg,
+           scratch1,
+           Heap::kHeapNumberMapRootIndex,
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+  // in the exponent.
+  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
+
+  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+  bind(&have_double_value);
+  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  Addu(scratch1, scratch1, elements_reg);
+  sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  sw(exponent_reg, FieldMemOperand(scratch1, offset));
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
+  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
+  bind(&is_nan);
+  // Load canonical NaN for storing into the double array.
+  uint64_t nan_int64 = BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+  li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+  li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+  jmp(&have_double_value);
+
+  bind(&smi_value);
+  Addu(scratch1, elements_reg,
+      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  Addu(scratch1, scratch1, scratch2);
+  // scratch1 is now effective address of the double element
+
+  FloatingPointHelper::Destination destination;
+  if (CpuFeatures::IsSupported(FPU)) {
+    destination = FloatingPointHelper::kFPURegisters;
+  } else {
+    destination = FloatingPointHelper::kCoreRegisters;
+  }
+
+  Register untagged_value = receiver_reg;
+  SmiUntag(untagged_value, value_reg);
+  FloatingPointHelper::ConvertIntToDouble(this,
+                                          untagged_value,
+                                          destination,
+                                          f0,
+                                          mantissa_reg,
+                                          exponent_reg,
+                                          scratch4,
+                                          f2);
+  if (destination == FloatingPointHelper::kFPURegisters) {
+    CpuFeatures::Scope scope(FPU);
+    sdc1(f0, MemOperand(scratch1, 0));
+  } else {
+    sw(mantissa_reg, MemOperand(scratch1, 0));
+    sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::CompareMapAndBranch(Register obj,
+                                         Register scratch,
+                                         Handle<Map> map,
+                                         Label* early_success,
+                                         Condition cond,
+                                         Label* branch_to,
+                                         CompareMapMode mode) {
+  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  Operand right = Operand(map);
+  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+    Map* transitioned_fast_element_map(
+        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+    ASSERT(transitioned_fast_element_map == NULL ||
+           map->elements_kind() != FAST_ELEMENTS);
+    if (transitioned_fast_element_map != NULL) {
+      Branch(early_success, eq, scratch, right);
+      right = Operand(Handle<Map>(transitioned_fast_element_map));
+    }
+
+    Map* transitioned_double_map(
+        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+    ASSERT(transitioned_double_map == NULL ||
+           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+    if (transitioned_double_map != NULL) {
+      Branch(early_success, eq, scratch, right);
+      right = Operand(Handle<Map>(transitioned_double_map));
+    }
+  }
+
+  Branch(branch_to, cond, scratch, right);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
                               Label* fail,
-                              SmiCheckType smi_check_type) {
+                              SmiCheckType smi_check_type,
+                              CompareMapMode mode) {
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
-  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
-  li(at, Operand(map));
-  Branch(fail, ne, scratch, Operand(at));
+  Label success;
+  CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
+  bind(&success);
 }
 
 
@@ -3110,10 +3610,12 @@
                                     Handle<Code> code_constant,
                                     Register code_reg,
                                     Label* done,
+                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
+  *definitely_mismatches = false;
   Label regular_invoke;
 
   // Check whether the expected and actual arguments count match. If not,
@@ -3144,6 +3646,7 @@
         // arguments.
         definitely_matches = true;
       } else {
+        *definitely_mismatches = true;
         li(a2, Operand(expected.immediate()));
       }
     }
@@ -3167,7 +3670,9 @@
       SetCallKind(t1, call_kind);
       Call(adaptor);
       call_wrapper.AfterCall();
-      jmp(done);
+      if (!*definitely_mismatches) {
+        Branch(done);
+      }
     } else {
       SetCallKind(t1, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -3183,21 +3688,30 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
 
-  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, Handle<Code>::null(), code,
+                 &done, &definitely_mismatches, flag,
                  call_wrapper, call_kind);
-  if (flag == CALL_FUNCTION) {
-    SetCallKind(t1, call_kind);
-    Call(code);
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(t1, call_kind);
-    Jump(code);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      SetCallKind(t1, call_kind);
+      Call(code);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(t1, call_kind);
+      Jump(code);
+    }
+    // Continue here if InvokePrologue does handle the invocation due to
+    // mismatched parameter counts.
+    bind(&done);
   }
-  // Continue here if InvokePrologue does handle the invocation due to
-  // mismatched parameter counts.
-  bind(&done);
 }
 
 
@@ -3207,20 +3721,27 @@
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
 
-  InvokePrologue(expected, actual, code, no_reg, &done, flag,
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, code, no_reg,
+                 &done, &definitely_mismatches, flag,
                  NullCallWrapper(), call_kind);
-  if (flag == CALL_FUNCTION) {
-    SetCallKind(t1, call_kind);
-    Call(code, rmode);
-  } else {
-    SetCallKind(t1, call_kind);
-    Jump(code, rmode);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      SetCallKind(t1, call_kind);
+      Call(code, rmode);
+    } else {
+      SetCallKind(t1, call_kind);
+      Jump(code, rmode);
+    }
+    // Continue here if InvokePrologue does handle the invocation due to
+    // mismatched parameter counts.
+    bind(&done);
   }
-  // Continue here if InvokePrologue does handle the invocation due to
-  // mismatched parameter counts.
-  bind(&done);
 }
 
 
@@ -3229,6 +3750,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   // Contract with called JS functions requires that function is passed in a1.
   ASSERT(function.is(a1));
   Register expected_reg = a2;
@@ -3247,24 +3771,24 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
+                                    const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  ASSERT(function->is_compiled());
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  li(a1, Operand(Handle<JSFunction>(function)));
+  LoadHeapObject(a1, function);
   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
-  // Invoke the cached code.
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  if (V8::UseCrankshaft()) {
-    UNIMPLEMENTED_MIPS();
-  } else {
-    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
@@ -3305,7 +3829,8 @@
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
-                                             Label* miss) {
+                                             Label* miss,
+                                             bool miss_on_bound_function) {
   // Check that the receiver isn't a smi.
   JumpIfSmi(function, miss);
 
@@ -3313,6 +3838,16 @@
   GetObjectType(function, result, scratch);
   Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
 
+  if (miss_on_bound_function) {
+    lw(scratch,
+       FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    lw(scratch,
+       FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+    And(scratch, scratch,
+        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
+    Branch(miss, ne, scratch, Operand(zero_reg));
+  }
+
   // Make sure that the function has an instance prototype.
   Label non_instance;
   lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
@@ -3359,53 +3894,29 @@
 // -----------------------------------------------------------------------------
 // Runtime calls.
 
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
-                              Register r1, const Operand& r2) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
-}
-
-
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
-                                         Register r1, const Operand& r2) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
-      kNoASTId, cond, r1, r2);
-  return result;
+void MacroAssembler::CallStub(CodeStub* stub,
+                              Condition cond,
+                              Register r1,
+                              const Operand& r2,
+                              BranchDelaySlot bd) {
+  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2, bd);
 }
 
 
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
-                                             Condition cond,
-                                             Register r1,
-                                             const Operand& r2) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
-  return result;
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
-    ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+                                              int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -3472,15 +3983,13 @@
   lw(t1, MemOperand(at));
   Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
   li(s0, Operand(stack_space));
-  LeaveExitFrame(false, s0);
-  Ret();
+  LeaveExitFrame(false, s0, true);
 
   bind(&promote_scheduled_exception);
-  MaybeObject* result = TryTailCallExternalReference(
-      ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+      0,
+      1);
 
   // HandleScope limit has changed. Delete allocated extensions.
   bind(&delete_allocated_handles);
@@ -3493,8 +4002,12 @@
       1);
   mov(v0, s0);
   jmp(&leave_exit_frame);
+}
 
-  return result;
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
 }
 
 
@@ -3578,7 +4091,16 @@
   ASSERT(!overflow_dst.is(scratch));
   ASSERT(!overflow_dst.is(left));
   ASSERT(!overflow_dst.is(right));
-  ASSERT(!left.is(right));
+
+  if (left.is(right) && dst.is(left)) {
+    ASSERT(!dst.is(t9));
+    ASSERT(!scratch.is(t9));
+    ASSERT(!left.is(t9));
+    ASSERT(!right.is(t9));
+    ASSERT(!overflow_dst.is(t9));
+    mov(t9, right);
+    right = t9;
+  }
 
   if (dst.is(left)) {
     mov(scratch, left);  // Preserve left.
@@ -3611,10 +4133,17 @@
   ASSERT(!overflow_dst.is(scratch));
   ASSERT(!overflow_dst.is(left));
   ASSERT(!overflow_dst.is(right));
-  ASSERT(!left.is(right));
   ASSERT(!scratch.is(left));
   ASSERT(!scratch.is(right));
 
+  // This happens with some crankshaft code. Since Subu works fine if
+  // left == right, let's not make that restriction here.
+  if (left.is(right)) {
+    mov(dst, zero_reg);
+    mov(overflow_dst, zero_reg);
+    return;
+  }
+
   if (dst.is(left)) {
     mov(scratch, left);  // Preserve left.
     subu(dst, left, right);  // Left is overwritten.
@@ -3652,8 +4181,8 @@
   // arguments passed in because it is constant. At some point we
   // should remove this need and make the runtime routine entry code
   // smarter.
-  li(a0, num_arguments);
-  li(a1, Operand(ExternalReference(f, isolate())));
+  PrepareCEntryArgs(num_arguments);
+  PrepareCEntryFunction(ExternalReference(f, isolate()));
   CEntryStub stub(1);
   CallStub(&stub);
 }
@@ -3661,10 +4190,9 @@
 
 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
   const Runtime::Function* function = Runtime::FunctionForId(id);
-  li(a0, Operand(function->nargs));
-  li(a1, Operand(ExternalReference(function, isolate())));
-  CEntryStub stub(1);
-  stub.SaveDoubles();
+  PrepareCEntryArgs(function->nargs);
+  PrepareCEntryFunction(ExternalReference(function, isolate()));
+  CEntryStub stub(1, kSaveFPRegs);
   CallStub(&stub);
 }
 
@@ -3675,12 +4203,13 @@
 
 
 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
-                                           int num_arguments) {
-  li(a0, Operand(num_arguments));
-  li(a1, Operand(ext));
+                                           int num_arguments,
+                                           BranchDelaySlot bd) {
+  PrepareCEntryArgs(num_arguments);
+  PrepareCEntryFunction(ext);
 
   CEntryStub stub(1);
-  CallStub(&stub);
+  CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
 }
 
 
@@ -3691,22 +4220,11 @@
   // arguments passed in because it is constant. At some point we
   // should remove this need and make the runtime routine entry code
   // smarter.
-  li(a0, Operand(num_arguments));
+  PrepareCEntryArgs(num_arguments);
   JumpToExternalReference(ext);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  li(a0, num_arguments);
-  return TryJumpToExternalReference(ext);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -3716,24 +4234,25 @@
 }
 
 
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
-  li(a1, Operand(builtin));
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+                                             BranchDelaySlot bd) {
+  PrepareCEntryFunction(builtin);
   CEntryStub stub(1);
-  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& builtin) {
-  li(a1, Operand(builtin));
-  CEntryStub stub(1);
-  return TryTailCallStub(&stub);
+  Jump(stub.GetCode(),
+       RelocInfo::CODE_TARGET,
+       al,
+       zero_reg,
+       Operand(zero_reg),
+       bd);
 }
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   GetBuiltinEntry(t9, id);
   if (flag == CALL_FUNCTION) {
     call_wrapper.BeforeCall(CallSize(t9));
@@ -3866,14 +4385,20 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
 
   li(a0, Operand(p0));
   push(a0);
   li(a0, Operand(Smi::FromInt(p1 - p0)));
   push(a0);
-  CallRuntime(Runtime::kAbort, 2);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
   // Will not return here.
   if (is_trampoline_pool_blocked()) {
     // If the calling code cares about the exact number of
@@ -3907,6 +4432,46 @@
 }
 
 
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+    ElementsKind expected_kind,
+    ElementsKind transitioned_kind,
+    Register map_in_out,
+    Register scratch,
+    Label* no_map_match) {
+  // Load the global or builtins object from the current context.
+  lw(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+  // Check that the function's map is the same as the expected cached map.
+  int expected_index =
+      Context::GetContextMapIndexFromElementsKind(expected_kind);
+  lw(at, MemOperand(scratch, Context::SlotOffset(expected_index)));
+  Branch(no_map_match, ne, map_in_out, Operand(at));
+
+  // Use the transitioned cached map.
+  int trans_index =
+      Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+  lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(
+    Register function_in, Register scratch, Register map_out) {
+  ASSERT(!function_in.is(map_out));
+  Label done;
+  lw(map_out, FieldMemOperand(function_in,
+                              JSFunction::kPrototypeOrInitialMapOffset));
+  if (!FLAG_smi_only_arrays) {
+    LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                        FAST_ELEMENTS,
+                                        map_out,
+                                        scratch,
+                                        &done);
+  }
+  bind(&done);
+}
+
+
 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   // Load the global or builtins object from the current context.
   lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -3937,7 +4502,7 @@
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   addiu(sp, sp, -5 * kPointerSize);
   li(t8, Operand(Smi::FromInt(type)));
-  li(t9, Operand(CodeObject()));
+  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
   sw(ra, MemOperand(sp, 4 * kPointerSize));
   sw(fp, MemOperand(sp, 3 * kPointerSize));
   sw(cp, MemOperand(sp, 2 * kPointerSize));
@@ -3957,7 +4522,7 @@
 
 void MacroAssembler::EnterExitFrame(bool save_doubles,
                                     int stack_space) {
-  // Setup the frame structure on the stack.
+  // Set up the frame structure on the stack.
   STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
   STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
   STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
@@ -3975,13 +4540,14 @@
   addiu(sp, sp, -4 * kPointerSize);
   sw(ra, MemOperand(sp, 3 * kPointerSize));
   sw(fp, MemOperand(sp, 2 * kPointerSize));
-  addiu(fp, sp, 2 * kPointerSize);  // Setup new frame pointer.
+  addiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
 
   if (emit_debug_code()) {
     sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
   }
 
-  li(t8, Operand(CodeObject()));  // Accessed from ExitFrame::code_slot.
+  // Accessed from ExitFrame::code_slot.
+  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
   sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
 
   // Save the frame pointer and the context in top.
@@ -4025,7 +4591,8 @@
 
 
 void MacroAssembler::LeaveExitFrame(bool save_doubles,
-                                    Register argument_count) {
+                                    Register argument_count,
+                                    bool do_return) {
   // Optionally restore all double registers.
   if (save_doubles) {
     // Remember: we only need to restore every 2nd double FPU value.
@@ -4051,11 +4618,17 @@
   mov(sp, fp);  // Respect ABI stack constraint.
   lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
   lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
-  addiu(sp, sp, 8);
+
   if (argument_count.is_valid()) {
     sll(t8, argument_count, kPointerSizeLog2);
     addu(sp, sp, t8);
   }
+
+  if (do_return) {
+    Ret(USE_DELAY_SLOT);
+    // If returning, the instruction in the delay slot will be the addiu below.
+  }
+  addiu(sp, sp, 8);
 }
 
 
@@ -4120,14 +4693,71 @@
 }
 
 
+void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
+  ASSERT(!reg.is(overflow));
+  mov(overflow, reg);  // Save original value.
+  SmiTag(reg);
+  xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
+}
+
+
+void MacroAssembler::SmiTagCheckOverflow(Register dst,
+                                         Register src,
+                                         Register overflow) {
+  if (dst.is(src)) {
+    // Fall back to slower case.
+    SmiTagCheckOverflow(dst, overflow);
+  } else {
+    ASSERT(!dst.is(src));
+    ASSERT(!dst.is(overflow));
+    ASSERT(!src.is(overflow));
+    SmiTag(dst, src);
+    xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
+  }
+}
+
+
+void MacroAssembler::UntagAndJumpIfSmi(Register dst,
+                                       Register src,
+                                       Label* smi_case) {
+  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
+  SmiUntag(dst, src);
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
+                                          Register src,
+                                          Label* non_smi_case) {
+  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
+  SmiUntag(dst, src);
+}
+
+void MacroAssembler::JumpIfSmi(Register value,
+                               Label* smi_label,
+                               Register scratch,
+                               BranchDelaySlot bd) {
+  ASSERT_EQ(0, kSmiTag);
+  andi(scratch, value, kSmiTagMask);
+  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpIfNotSmi(Register value,
+                                  Label* not_smi_label,
+                                  Register scratch,
+                                  BranchDelaySlot bd) {
+  ASSERT_EQ(0, kSmiTag);
+  andi(scratch, value, kSmiTagMask);
+  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
+}
+
+
 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
                                       Register reg2,
                                       Label* on_not_both_smi) {
   STATIC_ASSERT(kSmiTag == 0);
   ASSERT_EQ(1, kSmiTagMask);
   or_(at, reg1, reg2);
-  andi(at, at, kSmiTagMask);
-  Branch(on_not_both_smi, ne, at, Operand(zero_reg));
+  JumpIfNotSmi(at, on_not_both_smi);
 }
 
 
@@ -4138,8 +4768,7 @@
   ASSERT_EQ(1, kSmiTagMask);
   // Both Smi tags must be 1 (not Smi).
   and_(at, reg1, reg2);
-  andi(at, at, kSmiTagMask);
-  Branch(on_either_smi, eq, at, Operand(zero_reg));
+  JumpIfSmi(at, on_either_smi);
 }
 
 
@@ -4217,8 +4846,7 @@
   // Check that neither is a smi.
   STATIC_ASSERT(kSmiTag == 0);
   And(scratch1, first, Operand(second));
-  And(scratch1, scratch1, Operand(kSmiTagMask));
-  Branch(failure, eq, scratch1, Operand(zero_reg));
+  JumpIfSmi(scratch1, failure);
   JumpIfNonSmisNotBothSequentialAsciiStrings(first,
                                              second,
                                              scratch1,
@@ -4257,7 +4885,23 @@
 
 static const int kRegisterPassedArguments = 4;
 
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+                                              int num_double_arguments) {
+  int stack_passed_words = 0;
+  num_reg_arguments += 2 * num_double_arguments;
+
+  // Up to four simple arguments are passed in registers a0..a3.
+  if (num_reg_arguments > kRegisterPassedArguments) {
+    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+  }
+  stack_passed_words += kCArgSlotCount;
+  return stack_passed_words;
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          int num_double_arguments,
+                                          Register scratch) {
   int frame_alignment = ActivationFrameAlignment();
 
   // Up to four simple arguments are passed in registers a0..a3.
@@ -4265,9 +4909,8 @@
   // mips, even though those argument slots are not normally used.
   // Remaining arguments are pushed on the stack, above (higher address than)
   // the argument slots.
-  int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
-                                 0 : num_arguments - kRegisterPassedArguments) +
-                                kCArgSlotCount;
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
   if (frame_alignment > kPointerSize) {
     // Make stack end at alignment and make room for num_arguments - 4 words
     // and the original value of sp.
@@ -4282,26 +4925,43 @@
 }
 
 
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          Register scratch) {
+  PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
 void MacroAssembler::CallCFunction(ExternalReference function,
-                                   int num_arguments) {
-  CallCFunctionHelper(no_reg, function, t8, num_arguments);
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  li(t8, Operand(function));
+  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
-                                   Register scratch,
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
-  CallCFunctionHelper(function,
-                      ExternalReference::the_hole_value_location(isolate()),
-                      scratch,
-                      num_arguments);
+  CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+                                   int num_arguments) {
+  CallCFunction(function, num_arguments, 0);
 }
 
 
 void MacroAssembler::CallCFunctionHelper(Register function,
-                                         ExternalReference function_reference,
-                                         Register scratch,
-                                         int num_arguments) {
+                                         int num_reg_arguments,
+                                         int num_double_arguments) {
+  ASSERT(has_frame());
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -4329,19 +4989,15 @@
   // allow preemption, so the return address in the link register
   // stays correct.
 
-  if (function.is(no_reg)) {
-    function = t9;
-    li(function, Operand(function_reference));
-  } else if (!function.is(t9)) {
+  if (!function.is(t9)) {
     mov(t9, function);
     function = t9;
   }
 
   Call(function);
 
-  int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
-                                0 : num_arguments - kRegisterPassedArguments) +
-                               kCArgSlotCount;
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
 
   if (OS::ActivationFrameAlignment() > kPointerSize) {
     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@@ -4354,17 +5010,370 @@
 #undef BRANCH_ARGS_CHECK
 
 
+void MacroAssembler::PatchRelocatedValue(Register li_location,
+                                         Register scratch,
+                                         Register new_value) {
+  lw(scratch, MemOperand(li_location));
+  // At this point scratch is a lui(at, ...) instruction.
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, "The instruction to patch should be a lui.",
+        scratch, Operand(LUI));
+    lw(scratch, MemOperand(li_location));
+  }
+  srl(t9, new_value, kImm16Bits);
+  Ins(scratch, t9, 0, kImm16Bits);
+  sw(scratch, MemOperand(li_location));
+
+  lw(scratch, MemOperand(li_location, kInstrSize));
+  // scratch is now ori(at, ...).
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, "The instruction to patch should be an ori.",
+        scratch, Operand(ORI));
+    lw(scratch, MemOperand(li_location, kInstrSize));
+  }
+  Ins(scratch, new_value, 0, kImm16Bits);
+  sw(scratch, MemOperand(li_location, kInstrSize));
+
+  // Update the I-cache so the new lui and ori can be executed.
+  FlushICache(li_location, 2);
+}
+
+void MacroAssembler::GetRelocatedValue(Register li_location,
+                                       Register value,
+                                       Register scratch) {
+  lw(value, MemOperand(li_location));
+  if (emit_debug_code()) {
+    And(value, value, kOpcodeMask);
+    Check(eq, "The instruction should be a lui.",
+        value, Operand(LUI));
+    lw(value, MemOperand(li_location));
+  }
+
+  // value now holds a lui instruction. Extract the immediate.
+  sll(value, value, kImm16Bits);
+
+  lw(scratch, MemOperand(li_location, kInstrSize));
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, "The instruction should be an ori.",
+        scratch, Operand(ORI));
+    lw(scratch, MemOperand(li_location, kInstrSize));
+  }
+  // "scratch" now holds an ori instruction. Extract the immediate.
+  andi(scratch, scratch, kImm16Mask);
+
+  // Merge the results.
+  or_(value, value, scratch);
+}
+
+
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met) {
+  And(scratch, object, Operand(~Page::kPageAlignmentMask));
+  lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+  And(scratch, scratch, Operand(mask));
+  Branch(condition_met, cc, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register scratch0,
+                                 Register scratch1,
+                                 Label* on_black) {
+  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+                              Register bitmap_scratch,
+                              Register mask_scratch,
+                              Label* has_color,
+                              int first_bit,
+                              int second_bit) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color, word_boundary;
+  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  And(t8, t9, Operand(mask_scratch));
+  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
+  // Shift left 1 by adding.
+  Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
+  Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
+  And(t8, t9, Operand(mask_scratch));
+  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
+  jmp(&other_color);
+
+  bind(&word_boundary);
+  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+  And(t9, t9, Operand(1));
+  Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
+  bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects.  This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+                                      Register scratch,
+                                      Label* not_data_object) {
+  ASSERT(!AreAliased(value, scratch, t8, no_reg));
+  Label is_data_object;
+  lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+  Branch(&is_data_object, eq, t8, Operand(scratch));
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  Branch(not_data_object, ne, t8, Operand(zero_reg));
+  bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+  sll(t8, t8, kPointerSizeLog2);
+  Addu(bitmap_reg, bitmap_reg, t8);
+  li(t8, Operand(1));
+  sllv(mask_reg, t8, mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Register load_scratch,
+    Label* value_is_white_and_not_data) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  And(t8, mask_scratch, load_scratch);
+  Branch(&done, ne, t8, Operand(zero_reg));
+
+  if (emit_debug_code()) {
+    // Check for impossible bit pattern.
+    Label ok;
+    // sll may overflow, making the check conservative.
+    sll(t8, mask_scratch, 1);
+    And(t8, load_scratch, t8);
+    Branch(&ok, eq, t8, Operand(zero_reg));
+    stop("Impossible marking bit pattern");
+    bind(&ok);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = load_scratch;  // Holds map while checking type.
+  Register length = load_scratch;  // Holds length of object after testing type.
+  Label is_data_object;
+
+  // Check for heap-number
+  lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
+  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+  {
+    Label skip;
+    Branch(&skip, ne, t8, Operand(map));
+    li(length, HeapNumber::kSize);
+    Branch(&is_data_object);
+    bind(&skip);
+  }
+
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = load_scratch;
+  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  And(t8, instance_type, Operand(kExternalStringTag));
+  {
+    Label skip;
+    Branch(&skip, eq, t8, Operand(zero_reg));
+    li(length, ExternalString::kSize);
+    Branch(&is_data_object);
+    bind(&skip);
+  }
+
+  // Sequential string, either ASCII or UC16.
+  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+  // getting the length multiplied by 2.
+  ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  lw(t9, FieldMemOperand(value, String::kLengthOffset));
+  And(t8, instance_type, Operand(kStringEncodingMask));
+  {
+    Label skip;
+    Branch(&skip, eq, t8, Operand(zero_reg));
+    srl(t9, t9, 1);
+    bind(&skip);
+  }
+  Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+  And(length, length, Operand(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  Or(t8, t8, Operand(mask_scratch));
+  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+  Addu(t8, t8, Operand(length));
+  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+  bind(&done);
+}
+
+
 void MacroAssembler::LoadInstanceDescriptors(Register map,
                                              Register descriptors) {
   lw(descriptors,
      FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
   Label not_smi;
   JumpIfNotSmi(descriptors, &not_smi);
-  li(descriptors, Operand(FACTORY->empty_descriptor_array()));
+  LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
   bind(&not_smi);
 }
 
 
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+  Label next;
+  // Preload a couple of values used in the loop.
+  Register  empty_fixed_array_value = t2;
+  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Register empty_descriptor_array_value = t3;
+  LoadRoot(empty_descriptor_array_value,
+           Heap::kEmptyDescriptorArrayRootIndex);
+  mov(a1, a0);
+  bind(&next);
+
+  // Check that there are no elements.  Register a1 contains the
+  // current JS object we've reached through the prototype chain.
+  lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+  Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in a2 for the subsequent
+  // prototype load.
+  lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
+  JumpIfSmi(a3, call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (a3).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
+  JumpIfSmi(a3, call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  Branch(&check_prototype, eq, a1, Operand(a0));
+  lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  Branch(call_runtime, ne, a3, Operand(empty_fixed_array_value));
+
+  // Load the prototype from the map and loop if non-null.
+  bind(&check_prototype);
+  lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
+  Branch(&next, ne, a1, Operand(null_value));
+}
+
+
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+  ASSERT(!output_reg.is(input_reg));
+  Label done;
+  li(output_reg, Operand(255));
+  // Normal branch: nop in delay slot.
+  Branch(&done, gt, input_reg, Operand(output_reg));
+  // Use delay slot in this branch.
+  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
+  mov(output_reg, zero_reg);  // In delay slot.
+  mov(output_reg, input_reg);  // Value is in range 0..255.
+  bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+                                        DoubleRegister input_reg,
+                                        DoubleRegister temp_double_reg) {
+  Label above_zero;
+  Label done;
+  Label in_bounds;
+
+  Move(temp_double_reg, 0.0);
+  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
+
+  // Double value is less than zero, NaN or Inf, return 0.
+  mov(result_reg, zero_reg);
+  Branch(&done);
+
+  // Double value is >= 255, return 255.
+  bind(&above_zero);
+  Move(temp_double_reg, 255.0);
+  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
+  li(result_reg, Operand(255));
+  Branch(&done);
+
+  // In 0-255 range, round and truncate.
+  bind(&in_bounds);
+  round_w_d(temp_double_reg, input_reg);
+  mfc1(result_reg, temp_double_reg);
+  bind(&done);
+}
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index c968ffc..f57418f 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,15 +50,6 @@
 // trying to update gp register for position-independent-code. Whenever
 // MIPS generated code calls C code, it must be via t9 register.
 
-// Registers aliases
-// cp is assumed to be a callee saved register.
-const Register roots = s6;  // Roots array pointer.
-const Register cp = s7;     // JavaScript context pointer.
-const Register fp = s8_fp;  // Alias for fp.
-// Registers used for condition evaluation.
-const Register condReg1 = s4;
-const Register condReg2 = s5;
-
 
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
@@ -90,6 +81,53 @@
   PROTECT
 };
 
+// Flags used for the li macro-assembler function.
+enum LiFlags {
+  // If the constant value can be represented in just 16 bits, then
+  // optimize the li to use a single instruction, rather than lui/ori pair.
+  OPTIMIZE_SIZE = 0,
+  // Always use 2 instructions (lui/ori pair), even if the constant could
+  // be loaded with just one, so that this value is patchable later.
+  CONSTANT_SIZE = 1
+};
+
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+inline MemOperand ContextOperand(Register context, int index) {
+  return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+inline MemOperand GlobalObjectOperand()  {
+  return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+inline MemOperand CFunctionArgumentOperand(int index) {
+  ASSERT(index > kCArgSlotCount);
+  // Argument 5 takes the slot just past the four Arg-slots.
+  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+  return MemOperand(sp, offset);
+}
+
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -138,23 +176,30 @@
   void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
-  int CallSize(Register target, COND_ARGS);
+  static int CallSize(Register target, COND_ARGS);
   void Call(Register target, COND_ARGS);
-  int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+  static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
-  int CallSize(Handle<Code> code,
-               RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
-               unsigned ast_id = kNoASTId,
-               COND_ARGS);
+  static int CallSize(Handle<Code> code,
+                      RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+                      unsigned ast_id = kNoASTId,
+                      COND_ARGS);
   void Call(Handle<Code> code,
             RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
             unsigned ast_id = kNoASTId,
             COND_ARGS);
   void Ret(COND_ARGS);
-  inline void Ret(BranchDelaySlot bd) {
-    Ret(al, zero_reg, Operand(zero_reg), bd);
+  inline void Ret(BranchDelaySlot bd, Condition cond = al,
+    Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
+    Ret(cond, rs, rt, bd);
   }
 
+  void Branch(Label* L,
+              Condition cond,
+              Register rs,
+              Heap::RootListIndex index,
+              BranchDelaySlot bdslot = PROTECT);
+
 #undef COND_ARGS
 
   // Emit code to discard a non-negative number of pointer-sized elements
@@ -164,10 +209,14 @@
             Register reg = no_reg,
             const Operand& op = Operand(no_reg));
 
-  void DropAndRet(int drop = 0,
-                  Condition cond = cc_always,
-                  Register reg = no_reg,
-                  const Operand& op = Operand(no_reg));
+  // Trivial case of DropAndRet that utilizes the delay slot and only emits
+  // 2 instructions.
+  void DropAndRet(int drop);
+
+  void DropAndRet(int drop,
+                  Condition cond,
+                  Register reg,
+                  const Operand& op);
 
   // Swap two registers.  If the scratch register is omitted then a slightly
   // less efficient form using xor instead of mov is emitted.
@@ -197,6 +246,15 @@
     mtc1(src_high, FPURegister::from_code(dst.code() + 1));
   }
 
+  // Conditional move.
+  void Move(FPURegister dst, double imm);
+  void Movz(Register rd, Register rs, Register rt);
+  void Movn(Register rd, Register rs, Register rt);
+  void Movt(Register rd, Register rs, uint16_t cc = 0);
+  void Movf(Register rd, Register rs, uint16_t cc = 0);
+
+  void Clz(Register rd, Register rs);
+
   // Jump unconditionally to given label.
   // We NEED a nop in the branch delay slot, as it used by v8, for example in
   // CodeGenerator::ProcessDeferred().
@@ -220,40 +278,137 @@
                  Heap::RootListIndex index,
                  Condition cond, Register src1, const Operand& src2);
 
+  void LoadHeapObject(Register dst, Handle<HeapObject> object);
 
-  // Check if object is in new space.
-  // scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,  // eq for new space, ne otherwise.
-                  Label* branch);
+  void LoadObject(Register result, Handle<Object> object) {
+    if (object->IsHeapObject()) {
+      LoadHeapObject(result, Handle<HeapObject>::cast(object));
+    } else {
+      li(result, object);
+    }
+  }
+
+  // ---------------------------------------------------------------------------
+  // GC Support
+
+  void IncrementalMarkingRecordWriteHelper(Register object,
+                                           Register value,
+                                           Register address);
+
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
 
 
-  // For the page containing |object| mark the region covering [address]
-  // dirty. The object address must be in the first 8K of an allocated page.
-  void RecordWriteHelper(Register object,
-                         Register address,
-                         Register scratch);
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
 
-  // For the page containing |object| mark the region covering
-  // [object+offset] dirty. The object address must be in the first 8K
-  // of an allocated page.  The 'scratch' registers are used in the
-  // implementation and all 3 registers are clobbered by the
-  // operation, as well as the 'at' register. RecordWrite updates the
-  // write barrier even when storing smis.
-  void RecordWrite(Register object,
-                   Operand offset,
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch) {
+    InNewSpace(object, scratch, ne, branch);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch) {
+    InNewSpace(object, scratch, eq, branch);
+  }
+
+  // Check if an object has a given incremental marking color.
+  void HasColor(Register object,
+                Register scratch0,
+                Register scratch1,
+                Label* has_color,
+                int first_bit,
+                int second_bit);
+
+  void JumpIfBlack(Register object,
                    Register scratch0,
-                   Register scratch1);
+                   Register scratch1,
+                   Label* on_black);
 
-  // For the page containing |object| mark the region covering
-  // [address] dirty. The object address must be in the first 8K of an
-  // allocated page.  All 3 registers are clobbered by the operation,
-  // as well as the ip register. RecordWrite updates the write barrier
-  // even when storing smis.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register scratch);
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Register scratch3,
+                      Label* object_is_white_and_not_data);
+
+  // Detects conservatively whether an object is data-only, i.e. it does need to
+  // be scanned by the garbage collector.
+  void JumpIfDataObject(Register value,
+                        Register scratch,
+                        Label* not_data_object);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      RAStatus ra_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // MemOperand(reg, off).
+  inline void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      RAStatus ra_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     ra_status,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // For a given |object| notify the garbage collector that the slot |address|
+  // has been written.  |value| is the object being stored. The value and
+  // address registers are clobbered by the operation.
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      RAStatus ra_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
 
   // ---------------------------------------------------------------------------
@@ -282,7 +437,7 @@
   }
 
   // Check if the given instruction is a 'type' marker.
-  // ie. check if it is a sll zero_reg, zero_reg, <type> (referenced as
+  // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
   // nop(type)). These instructions are generated to mark special location in
   // the code, like some special IC code.
   static inline bool IsMarkedCode(Instr instr, int type) {
@@ -439,12 +594,13 @@
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
 
   // Load int32 in the rd register.
-  void li(Register rd, Operand j, bool gen2instr = false);
-  inline void li(Register rd, int32_t j, bool gen2instr = false) {
-    li(rd, Operand(j), gen2instr);
+  void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+  inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
+    li(rd, Operand(j), mode);
   }
-  inline void li(Register dst, Handle<Object> value, bool gen2instr = false) {
-    li(dst, Operand(value), gen2instr);
+  inline void li(Register dst, Handle<Object> value,
+                 LiFlags mode = OPTIMIZE_SIZE) {
+    li(dst, Operand(value), mode);
   }
 
   // Push multiple registers on the stack.
@@ -518,6 +674,14 @@
     Addu(sp, sp, 2 * kPointerSize);
   }
 
+  // Pop three registers. Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3) {
+    lw(src3, MemOperand(sp, 0 * kPointerSize));
+    lw(src2, MemOperand(sp, 1 * kPointerSize));
+    lw(src1, MemOperand(sp, 2 * kPointerSize));
+    Addu(sp, sp, 3 * kPointerSize);
+  }
+
   void Pop(uint32_t count = 1) {
     Addu(sp, sp, Operand(count * kPointerSize));
   }
@@ -536,10 +700,17 @@
   // into register dst.
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
+  // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
+  // Does not handle errors.
+  void FlushICache(Register address, unsigned instructions);
+
   // MIPS32 R2 instruction macro.
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
 
+  // ---------------------------------------------------------------------------
+  // FPU macros. These do not handle special cases like NaN or +- inf.
+
   // Convert unsigned word to double.
   void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
   void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
@@ -548,6 +719,28 @@
   void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
   void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
 
+  void Trunc_w_d(FPURegister fd, FPURegister fs);
+  void Round_w_d(FPURegister fd, FPURegister fs);
+  void Floor_w_d(FPURegister fd, FPURegister fs);
+  void Ceil_w_d(FPURegister fd, FPURegister fs);
+  // Wrapper function for the different cmp/branch types.
+  void BranchF(Label* target,
+               Label* nan,
+               Condition cc,
+               FPURegister cmp1,
+               FPURegister cmp2,
+               BranchDelaySlot bd = PROTECT);
+
+  // Alternate (inline) version for better readability with USE_DELAY_SLOT.
+  inline void BranchF(BranchDelaySlot bd,
+                      Label* target,
+                      Label* nan,
+                      Condition cc,
+                      FPURegister cmp1,
+                      FPURegister cmp2) {
+    BranchF(target, nan, cc, cmp1, cmp2, bd);
+  };
+
   // Convert the HeapNumber pointed to by source to a 32bits signed integer
   // dest. If the HeapNumber does not fit into a 32bits signed integer branch
   // to not_int32 label. If FPU is available double_scratch is used but not
@@ -559,6 +752,18 @@
                       FPURegister double_scratch,
                       Label *not_int32);
 
+  // Truncates a double using a specific rounding mode.
+  // The except_flag will contain any exceptions caused by the instruction.
+  // If check_inexact is kDontCheckForInexactConversion, then the inexacat
+  // exception is masked.
+  void EmitFPUTruncate(FPURoundingMode rounding_mode,
+                       FPURegister result,
+                       DoubleRegister double_input,
+                       Register scratch1,
+                       Register except_flag,
+                       CheckForInexactConversion check_inexact
+                           = kDontCheckForInexactConversion);
+
   // Helper for EmitECMATruncate.
   // This will truncate a floating-point value outside of the singed 32bit
   // integer range to a 32bit signed integer.
@@ -580,15 +785,6 @@
                         Register scratch2,
                         Register scratch3);
 
-  // -------------------------------------------------------------------------
-  // Activation frames.
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter exit frame.
   // argc - argument count to be dropped by LeaveExitFrame.
   // save_doubles - saves FPU registers on stack, currently disabled.
@@ -597,7 +793,9 @@
                       int stack_space = 0);
 
   // Leave the current exit frame.
-  void LeaveExitFrame(bool save_doubles, Register arg_count);
+  void LeaveExitFrame(bool save_doubles,
+                      Register arg_count,
+                      bool do_return = false);
 
   // Get the actual activation frame alignment for target environment.
   static int ActivationFrameAlignment();
@@ -607,6 +805,22 @@
 
   void LoadContext(Register dst, int context_chain_length);
 
+  // Conditionally load the cached Array transitioned map of type
+  // transitioned_kind from the global context if the map in register
+  // map_in_out is the cached Array map in the global context of
+  // expected_kind.
+  void LoadTransitionedArrayMapConditional(
+      ElementsKind expected_kind,
+      ElementsKind transitioned_kind,
+      Register map_in_out,
+      Register scratch,
+      Label* no_map_match);
+
+  // Load the initial map for new Arrays from a JSFunction.
+  void LoadInitialArrayMap(Register function_in,
+                           Register scratch,
+                           Register map_out);
+
   void LoadGlobalFunction(int index, Register function);
 
   // Load the initial map from the global function. The registers
@@ -615,10 +829,16 @@
                                     Register map,
                                     Register scratch);
 
+  void InitializeRootRegister() {
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(isolate());
+    li(kRootRegister, Operand(roots_array_start));
+  }
+
   // -------------------------------------------------------------------------
   // JavaScript invokes.
 
-  // Setup call kind marking in t1. The method takes t1 as an
+  // Set up call kind marking in t1. The method takes t1 as an
   // explicit first parameter to make the code more readable at the
   // call sites.
   void SetCallKind(Register dst, CallKind kind);
@@ -646,9 +866,10 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
+                      const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
 
@@ -672,30 +893,23 @@
   void DebugBreak();
 #endif
 
-  void InitializeRootRegister() {
-    ExternalReference roots_address =
-        ExternalReference::roots_address(isolate());
-    li(kRootRegister, Operand(roots_address));
-  }
 
   // -------------------------------------------------------------------------
   // Exception handling.
 
   // Push a new try handler and link into try handler chain.
-  // The return address must be passed in register ra.
-  // Clobber t0, t1, t2.
-  void PushTryHandler(CodeLocation try_location, HandlerType type);
+  void PushTryHandler(StackHandler::Kind kind, int handler_index);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   // Must preserve the result register.
   void PopTryHandler();
 
-  // Passes thrown value (in v0) to the handler of top of the try handler chain.
+  // Passes thrown value to the handler of top of the try handler chain.
   void Throw(Register value);
 
   // Propagates an uncatchable exception to the top of the current JS stack's
   // handler chain.
-  void ThrowUncatchable(UncatchableExceptionType type, Register value);
+  void ThrowUncatchable(Register value);
 
   // Copies a fixed number of fields of heap objects from src to dst.
   void CopyFields(Register dst, Register src, RegList temps, int field_count);
@@ -708,6 +922,13 @@
                  Register length,
                  Register scratch);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
   // -------------------------------------------------------------------------
   // Support functions.
 
@@ -719,7 +940,8 @@
   void TryGetFunctionPrototype(Register function,
                                Register result,
                                Register scratch,
-                               Label* miss);
+                               Label* miss,
+                               bool miss_on_bound_function = false);
 
   void GetObjectType(Register function,
                      Register map,
@@ -731,15 +953,55 @@
                          Register scratch,
                          Label* fail);
 
-  // Check if the map of an object is equal to a specified map (either
-  // given directly or as an index into the root list) and branch to
-  // label if not. Skip the smi check if not required (object is known
-  // to be a heap object).
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Register scratch,
+                               Label* fail);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Register scratch,
+                                Label* fail);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements. Otherwise jump to fail, in which
+  // case scratch2, scratch3 and scratch4 are unmodified.
+  void StoreNumberToDoubleElements(Register value_reg,
+                                   Register key_reg,
+                                   Register receiver_reg,
+                                   Register elements_reg,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Register scratch3,
+                                   Register scratch4,
+                                   Label* fail);
+
+  // Compare an object's map with the specified map and its transitioned
+  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
+  // "branch_to" if the result of the comparison is "cond". If multiple map
+  // compares are required, the compare sequences branches to early_success.
+  void CompareMapAndBranch(Register obj,
+                           Register scratch,
+                           Handle<Map> map,
+                           Label* early_success,
+                           Condition cond,
+                           Label* branch_to,
+                           CompareMapMode mode = REQUIRE_EXACT_MAP);
+
+  // Check if the map of an object is equal to a specified map and branch to
+  // label if not. Skip the smi check if not required (object is known to be a
+  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+  // against maps that are ElementsKind transition maps of the specificed map.
   void CheckMap(Register obj,
                 Register scratch,
                 Handle<Map> map,
                 Label* fail,
-                SmiCheckType smi_check_type);
+                SmiCheckType smi_check_type,
+                CompareMapMode mode = REQUIRE_EXACT_MAP);
+
 
   void CheckMap(Register obj,
                 Register scratch,
@@ -760,6 +1022,21 @@
   // occurred.
   void IllegalOperation(int num_arguments);
 
+
+  // Load and check the instance type of an object for being a string.
+  // Loads the type into the second argument register.
+  // Returns a condition that will be enabled if the object was a string.
+  Condition IsObjectStringType(Register obj,
+                               Register type,
+                               Register result) {
+    lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+    lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+    And(type, type, Operand(kIsNotStringMask));
+    ASSERT_EQ(0, kStringTag);
+    return eq;
+  }
+
+
   // Picks out an array index from the hash field.
   // Register use:
   //   hash - holds the index's hash. Clobbered.
@@ -829,31 +1106,26 @@
   // -------------------------------------------------------------------------
   // Runtime calls.
 
-  // Call a code stub.
-  void CallStub(CodeStub* stub, Condition cond = cc_always,
-                Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  // See comments at the beginning of CEntryStub::Generate.
+  inline void PrepareCEntryArgs(int num_args) {
+    li(s0, num_args);
+    li(s1, (num_args - 1) * kPointerSize);
+  }
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
-                                           Condition cond = cc_always,
-                                           Register r1 = zero_reg,
-                                           const Operand& r2 =
-                                               Operand(zero_reg));
+  inline void PrepareCEntryFunction(const ExternalReference& ref) {
+    li(s2, Operand(ref));
+  }
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub,
+                Condition cond = cc_always,
+                Register r1 = zero_reg,
+                const Operand& r2 = Operand(zero_reg),
+                BranchDelaySlot bd = PROTECT);
 
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
-                                               Condition cond = cc_always,
-                                               Register r1 = zero_reg,
-                                               const Operand& r2 =
-                                                   Operand(zero_reg));
-
   void CallJSExitStub(CodeStub* stub);
 
   // Call a runtime routine.
@@ -865,7 +1137,8 @@
 
   // Convenience function: call an external reference.
   void CallExternalReference(const ExternalReference& ext,
-                             int num_arguments);
+                             int num_arguments,
+                             BranchDelaySlot bd = PROTECT);
 
   // Tail call of a runtime routine (jump).
   // Like JumpToExternalReference, but also takes care of passing the number
@@ -874,17 +1147,14 @@
                                  int num_arguments,
                                  int result_size);
 
-  // Tail call of a runtime routine (jump). Try to generate the code if
-  // necessary. Do not perform a GC but instead return a retry after GC
-  // failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
+  int CalculateStackPassedWords(int num_reg_arguments,
+                                int num_double_arguments);
+
   // Before calling a C-function from generated code, align arguments on stack
   // and add space for the four mips argument slots.
   // After aligning the frame, non-register arguments must be stored on the
@@ -894,7 +1164,11 @@
   // C++ code.
   // Needs a scratch register to do some arithmetic. This register will be
   // trashed.
-  void PrepareCallCFunction(int num_arguments, Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            int num_double_registers,
+                            Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            Register scratch);
 
   // Arguments 1-4 are placed in registers a0 thru a3 respectively.
   // Arguments 5..n are stored to stack using following:
@@ -906,7 +1180,13 @@
   // return address (unless this is somehow accounted for by the called
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
-  void CallCFunction(Register function, Register scratch, int num_arguments);
+  void CallCFunction(Register function, int num_arguments);
+  void CallCFunction(ExternalReference function,
+                     int num_reg_arguments,
+                     int num_double_arguments);
+  void CallCFunction(Register function,
+                     int num_reg_arguments,
+                     int num_double_arguments);
   void GetCFunctionDoubleResult(const DoubleRegister dst);
 
   // There are two ways of passing double arguments on MIPS, depending on
@@ -917,15 +1197,15 @@
   void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
   void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
 
-  // Calls an API function. Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions. Restores context.
-  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
-                                           int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Restores context.  stack_space
+  // - space to be unwound on exit (includes the call JS arguments space and
+  // the additional space allocated for the fast call).
+  void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
 
   // Jump to the builtin routine.
-  void JumpToExternalReference(const ExternalReference& builtin);
-
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+  void JumpToExternalReference(const ExternalReference& builtin,
+                               BranchDelaySlot bd = PROTECT);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
@@ -982,6 +1262,9 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   // ---------------------------------------------------------------------------
   // Number utilities.
@@ -997,18 +1280,14 @@
   // -------------------------------------------------------------------------
   // Smi utilities.
 
-  // Try to convert int32 to smi. If the value is to large, preserve
-  // the original value and jump to not_a_smi. Destroys scratch and
-  // sets flags.
-  // This is only used by crankshaft atm so it is unimplemented on MIPS.
-  void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
-    UNIMPLEMENTED_MIPS();
-  }
-
   void SmiTag(Register reg) {
     Addu(reg, reg, reg);
   }
 
+  // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+  void SmiTagCheckOverflow(Register reg, Register overflow);
+  void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
+
   void SmiTag(Register dst, Register src) {
     Addu(dst, src, src);
   }
@@ -1021,21 +1300,25 @@
     sra(dst, src, kSmiTagSize);
   }
 
+  // Untag the source value into destination and jump if source is a smi.
+  // Souce and destination can be the same register.
+  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+  // Untag the source value into destination and jump if source is not a smi.
+  // Souce and destination can be the same register.
+  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
   // Jump the register contains a smi.
-  inline void JumpIfSmi(Register value, Label* smi_label,
-                        Register scratch = at) {
-    ASSERT_EQ(0, kSmiTag);
-    andi(scratch, value, kSmiTagMask);
-    Branch(smi_label, eq, scratch, Operand(zero_reg));
-  }
+  void JumpIfSmi(Register value,
+                 Label* smi_label,
+                 Register scratch = at,
+                 BranchDelaySlot bd = PROTECT);
 
   // Jump if the register contains a non-smi.
-  inline void JumpIfNotSmi(Register value, Label* not_smi_label,
-                           Register scratch = at) {
-    ASSERT_EQ(0, kSmiTag);
-    andi(scratch, value, kSmiTagMask);
-    Branch(not_smi_label, ne, scratch, Operand(zero_reg));
-  }
+  void JumpIfNotSmi(Register value,
+                    Label* not_smi_label,
+                    Register scratch = at,
+                    BranchDelaySlot bd = PROTECT);
 
   // Jump if either of the registers contain a non-smi.
   void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
@@ -1096,13 +1379,37 @@
                                            Register scratch2,
                                            Label* failure);
 
+  void ClampUint8(Register output_reg, Register input_reg);
+
+  void ClampDoubleToUint8(Register result_reg,
+                          DoubleRegister input_reg,
+                          DoubleRegister temp_double_reg);
+
+
   void LoadInstanceDescriptors(Register map, Register descriptors);
 
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
+  // Patch the relocated value (lui/ori pair).
+  void PatchRelocatedValue(Register li_location,
+                           Register scratch,
+                           Register new_value);
+  // Get the relocatad value (loaded data) from the lui/ori pair.
+  void GetRelocatedValue(Register li_location,
+                         Register value,
+                         Register scratch);
+
+  // Expects object in a0 and returns map with validated enum cache
+  // in a0.  Assumes that any other register can be used as a scratch.
+  void CheckEnumCache(Register null_value, Label* call_runtime);
+
  private:
   void CallCFunctionHelper(Register function,
-                           ExternalReference function_reference,
-                           Register scratch,
-                           int num_arguments);
+                           int num_reg_arguments,
+                           int num_double_arguments);
 
   void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
   void BranchShort(int16_t offset, Condition cond, Register rs,
@@ -1130,6 +1437,7 @@
                       Handle<Code> code_constant,
                       Register code_reg,
                       Label* done,
+                      bool* definitely_mismatches,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
@@ -1138,25 +1446,37 @@
   // the function in the 'resolved' flag.
   Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void InitializeNewString(Register string,
                            Register length,
                            Heap::RootListIndex map_index,
                            Register scratch1,
                            Register scratch2);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cond,  // eq for new space, ne otherwise.
+                  Label* branch);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Leaves addr_reg unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
+
+  // Helper for throwing exceptions.  Compute a handler address and jump to
+  // it.  See the implementation for register usage.
+  void JumpToHandlerEntry();
+
   // Compute memory operands for safepoint stack slots.
   static int SafepointRegisterStackIndex(int reg_code);
   MemOperand SafepointRegisterSlot(Register reg);
   MemOperand SafepointRegistersAndDoublesSlot(Register reg);
 
-  bool UseAbsoluteCodePointers();
-
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -1197,34 +1517,6 @@
 };
 
 
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-static MemOperand ContextOperand(Register context, int index) {
-  return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-static inline MemOperand GlobalObjectOperand()  {
-  return ContextOperand(cp, Context::GLOBAL_INDEX);
-}
-
-
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
-  return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate a MemOperand for storing arguments 5..N on the stack
-// when calling CallCFunction().
-static inline MemOperand CFunctionArgumentOperand(int index) {
-  ASSERT(index > kCArgSlotCount);
-  // Argument 5 takes the slot just past the four Arg-slots.
-  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
-  return MemOperand(sp, offset);
-}
-
 
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 63e836f..ae4da93 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -377,13 +377,16 @@
     // Isolate.
     __ li(a3, Operand(ExternalReference::isolate_address()));
 
-    ExternalReference function =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-    __ CallCFunction(function, argument_count);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference function =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+      __ CallCFunction(function, argument_count);
+    }
 
     // Restore regexp engine registers.
     __ MultiPop(regexp_registers_to_retain);
-    __ li(code_pointer(), Operand(masm_->CodeObject()));
+    __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
     __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
 
     // Check if function returned non-zero for success or zero for failure.
@@ -607,6 +610,12 @@
 
     // Entry code:
     __ bind(&entry_label_);
+
+    // Tell the system that we have a stack frame.  Because the type is MANUAL,
+    // no is generated.
+    FrameScope scope(masm_, StackFrame::MANUAL);
+
+    // Actually emit code to start a new stack frame.
     // Push arguments
     // Save callee-save registers.
     // Start new stack frame.
@@ -669,7 +678,7 @@
     // string, and store that value in a local variable.
     __ mov(t5, a1);
     __ li(a1, Operand(1));
-    __ movn(a1, zero_reg, t5);
+    __ Movn(a1, zero_reg, t5);
     __ sw(a1, MemOperand(frame_pointer(), kAtStart));
 
     if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
@@ -689,7 +698,7 @@
     // Initialize backtrack stack pointer.
     __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
     // Initialize code pointer register
-    __ li(code_pointer(), Operand(masm_->CodeObject()));
+    __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
     // Load previous char as initial value of current character register.
     Label at_start;
     __ lw(a0, MemOperand(frame_pointer(), kAtStart));
@@ -774,7 +783,7 @@
 
       // String might have moved: Reload end of string from frame.
       __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
-      __ li(code_pointer(), Operand(masm_->CodeObject()));
+      __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
       SafeReturn();
     }
 
@@ -804,7 +813,7 @@
       // Otherwise use return value as new stack pointer.
       __ mov(backtrack_stackpointer(), v0);
       // Restore saved registers and continue.
-      __ li(code_pointer(), Operand(masm_->CodeObject()));
+      __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
       __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
       SafeReturn();
     }
@@ -1001,7 +1010,7 @@
   __ PrepareCallCFunction(num_arguments, scratch);
   __ mov(a2, frame_pointer());
   // Code* of self.
-  __ li(a1, Operand(masm_->CodeObject()));
+  __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
   // a0 becomes return address pointer.
   ExternalReference stack_guard_check =
       ExternalReference::re_check_stack_guard_state(masm_->isolate());
@@ -1047,7 +1056,7 @@
   ASSERT(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
-  MaybeObject* result = Execution::HandleStackGuardInterrupt();
+  MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
 
   if (*code_handle != re_code) {  // Return address no longer valid.
     int delta = code_handle->address() - re_code->address();
@@ -1103,6 +1112,11 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+    // Subject string might have been a ConsString that underwent
+    // short-circuiting during GC. That will not change start_address but
+    // will change pointer inside the subject handle.
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
@@ -1215,7 +1229,7 @@
   if (OS::ActivationFrameAlignment() != 0) {
     __ lw(sp, MemOperand(sp, 16));
   }
-  __ li(code_pointer(), Operand(masm_->CodeObject()));
+  __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
 }
 
 
@@ -1244,13 +1258,14 @@
   if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
   // Stack is already aligned for call, so decrement by alignment
   // to make room for storing the return address.
-  __ Subu(sp, sp, Operand(stack_alignment));
-  __ sw(ra, MemOperand(sp, 0));
-  __ mov(a0, sp);
+  __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
+  const int return_address_offset = kCArgsSlotsSize;
+  __ Addu(a0, sp, return_address_offset);
+  __ sw(ra, MemOperand(a0, 0));
   __ mov(t9, t1);
   __ Call(t9);
-  __ lw(ra, MemOperand(sp, 0));
-  __ Addu(sp, sp, Operand(stack_alignment));
+  __ lw(ra, MemOperand(sp, return_address_offset));
+  __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
   __ Jump(ra);
 }
 
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 17c1897..f31ce7e 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -72,7 +72,7 @@
 // code.
 class MipsDebugger {
  public:
-  explicit MipsDebugger(Simulator* sim);
+  explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
   ~MipsDebugger();
 
   void Stop(Instruction* instr);
@@ -105,10 +105,6 @@
   void RedoBreakpoints();
 };
 
-MipsDebugger::MipsDebugger(Simulator* sim) {
-  sim_ = sim;
-}
-
 
 MipsDebugger::~MipsDebugger() {
 }
@@ -391,6 +387,13 @@
     if (line == NULL) {
       break;
     } else {
+      char* last_input = sim_->last_debugger_input();
+      if (strcmp(line, "\n") == 0 && last_input != NULL) {
+        line = last_input;
+      } else {
+        // Ownership is transferred to sim_;
+        sim_->set_last_debugger_input(line);
+      }
       // Use sscanf to parse the individual parts of the command line. At the
       // moment no command expects more than two parameters.
       int argc = SScanF(line,
@@ -757,7 +760,6 @@
         PrintF("Unknown command: %s\n", cmd);
       }
     }
-    DeleteArray(line);
   }
 
   // Add all the breakpoints back to stop execution and enter the debugger
@@ -791,6 +793,12 @@
 }
 
 
+void Simulator::set_last_debugger_input(char* input) {
+  DeleteArray(last_debugger_input_);
+  last_debugger_input_ = input;
+}
+
+
 void Simulator::FlushICache(v8::internal::HashMap* i_cache,
                             void* start_addr,
                             size_t size) {
@@ -880,7 +888,7 @@
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
-  // Setup simulator support first. Some of this information is needed to
+  // Set up simulator support first. Some of this information is needed to
   // setup the architecture state.
   stack_ = reinterpret_cast<char*>(malloc(stack_size_));
   pc_modified_ = false;
@@ -889,7 +897,7 @@
   break_pc_ = NULL;
   break_instr_ = 0;
 
-  // Setup architecture state.
+  // Set up architecture state.
   // All registers are initialized to zero to start with.
   for (int i = 0; i < kNumSimuRegisters; i++) {
     registers_[i] = 0;
@@ -911,6 +919,8 @@
   for (int i = 0; i < kNumExceptions; i++) {
     exceptions[i] = 0;
   }
+
+  last_debugger_input_ = NULL;
 }
 
 
@@ -1359,9 +1369,9 @@
 
 // Returns the limit of the stack area to enable checking for stack overflows.
 uintptr_t Simulator::StackLimit() const {
-  // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+  // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
   // pushing values.
-  return reinterpret_cast<uintptr_t>(stack_) + 256;
+  return reinterpret_cast<uintptr_t>(stack_) + 1024;
 }
 
 
@@ -1934,7 +1944,7 @@
   // Next pc
   int32_t next_pc = 0;
 
-  // Setup the variables if needed before executing the instruction.
+  // Set up the variables if needed before executing the instruction.
   ConfigureTypeRegister(instr,
                         alu_out,
                         i64hilo,
@@ -2281,7 +2291,7 @@
 }
 
 
-// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq).
+// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
 void Simulator::DecodeTypeImmediate(Instruction* instr) {
   // Instruction fields.
   Opcode   op     = instr->OpcodeFieldRaw();
@@ -2604,7 +2614,7 @@
 }
 
 
-// Type 3: instructions using a 26 bytes immediate. (eg: j, jal).
+// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
 void Simulator::DecodeTypeJump(Instruction* instr) {
   // Get current pc.
   int32_t current_pc = get_pc();
@@ -2701,7 +2711,7 @@
 int32_t Simulator::Call(byte* entry, int argument_count, ...) {
   va_list parameters;
   va_start(parameters, argument_count);
-  // Setup arguments.
+  // Set up arguments.
 
   // First four arguments passed in registers.
   ASSERT(argument_count >= 4);
@@ -2748,7 +2758,7 @@
   int32_t sp_val = get_register(sp);
   int32_t fp_val = get_register(fp);
 
-  // Setup the callee-saved registers with a known value. To be able to check
+  // Set up the callee-saved registers with a known value. To be able to check
   // that they are preserved properly across JS execution.
   int32_t callee_saved_value = icount_;
   set_register(s0, callee_saved_value);
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 69dddfa..1e72939 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -221,6 +221,10 @@
   // Pop an address from the JS stack.
   uintptr_t PopAddress();
 
+  // Debugger input.
+  void set_last_debugger_input(char* input);
+  char* last_debugger_input() { return last_debugger_input_; }
+
   // ICache checking.
   static void FlushICache(v8::internal::HashMap* i_cache, void* start,
                           size_t size);
@@ -305,6 +309,14 @@
   void InstructionDecode(Instruction* instr);
   // Execute one instruction placed in a branch delay slot.
   void BranchDelayInstructionDecode(Instruction* instr) {
+    if (instr->InstructionBits() == nopInstr) {
+      // Short-cut generic nop instructions. They are always valid and they
+      // never change the simulator state.
+      set_register(pc, reinterpret_cast<int32_t>(instr) +
+                       Instruction::kInstrSize);
+      return;
+    }
+
     if (instr->IsForbiddenInBranchDelay()) {
       V8_Fatal(__FILE__, __LINE__,
                "Eror:Unexpected %i opcode in a branch delay slot.",
@@ -358,6 +370,9 @@
   int icount_;
   int break_count_;
 
+  // Debugger input.
+  char* last_debugger_input_;
+
   // Icache simulation.
   v8::internal::HashMap* i_cache_;
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 5b94973..294bc0a 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,51 +43,74 @@
                        MacroAssembler* masm,
                        Code::Flags flags,
                        StubCache::Table table,
+                       Register receiver,
                        Register name,
+                       // Number of the cache entry, not scaled.
                        Register offset,
                        Register scratch,
-                       Register scratch2) {
+                       Register scratch2,
+                       Register offset_scratch) {
   ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
   ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
 
   uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
   uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
 
   // Check the relative positions of the address fields.
   ASSERT(value_off_addr > key_off_addr);
   ASSERT((value_off_addr - key_off_addr) % 4 == 0);
   ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+  ASSERT(map_off_addr > key_off_addr);
+  ASSERT((map_off_addr - key_off_addr) % 4 == 0);
+  ASSERT((map_off_addr - key_off_addr) < (256 * 4));
 
   Label miss;
-  Register offsets_base_addr = scratch;
+  Register base_addr = scratch;
+  scratch = no_reg;
+
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ sll(offset_scratch, offset, 1);
+  __ Addu(offset_scratch, offset_scratch, offset);
+
+  // Calculate the base address of the entry.
+  __ li(base_addr, Operand(key_offset));
+  __ sll(at, offset_scratch, kPointerSizeLog2);
+  __ Addu(base_addr, base_addr, at);
 
   // Check that the key in the entry matches the name.
-  __ li(offsets_base_addr, Operand(key_offset));
-  __ sll(scratch2, offset, 1);
-  __ addu(scratch2, offsets_base_addr, scratch2);
-  __ lw(scratch2, MemOperand(scratch2));
-  __ Branch(&miss, ne, name, Operand(scratch2));
+  __ lw(at, MemOperand(base_addr, 0));
+  __ Branch(&miss, ne, name, Operand(at));
+
+  // Check the map matches.
+  __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
+  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, at, Operand(scratch2));
 
   // Get the code entry from the cache.
-  __ Addu(offsets_base_addr, offsets_base_addr,
-         Operand(value_off_addr - key_off_addr));
-  __ sll(scratch2, offset, 1);
-  __ addu(scratch2, offsets_base_addr, scratch2);
-  __ lw(scratch2, MemOperand(scratch2));
+  Register code = scratch2;
+  scratch2 = no_reg;
+  __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
 
   // Check that the flags match what we're looking for.
-  __ lw(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
-  __ And(scratch2, scratch2, Operand(~Code::kFlagsNotUsedInLookup));
-  __ Branch(&miss, ne, scratch2, Operand(flags));
+  Register flags_reg = base_addr;
+  base_addr = no_reg;
+  __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+  __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
+  __ Branch(&miss, ne, flags_reg, Operand(flags));
 
-  // Re-load code entry from cache.
-  __ sll(offset, offset, 1);
-  __ addu(offset, offset, offsets_base_addr);
-  __ lw(offset, MemOperand(offset));
+#ifdef DEBUG
+    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+      __ jmp(&miss);
+    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+      __ jmp(&miss);
+    }
+#endif
 
   // Jump to the first instruction in the code stub.
-  __ Addu(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(offset);
+  __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
 
   // Miss: fall through.
   __ bind(&miss);
@@ -99,13 +122,12 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss_label,
-    Register receiver,
-    String* name,
-    Register scratch0,
-    Register scratch1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                             Label* miss_label,
+                                             Register receiver,
+                                             Handle<String> name,
+                                             Register scratch0,
+                                             Register scratch1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -120,9 +142,8 @@
   Register map = scratch1;
   __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
-  __ Branch(miss_label, ne, at, Operand(zero_reg));
-
+  __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
 
   // Check that receiver is a JSObject.
   __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -140,20 +161,16 @@
   // Restore the temporarily used register.
   __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
 
-  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
-      masm,
-      miss_label,
-      &done,
-      receiver,
-      properties,
-      name,
-      scratch1);
-  if (result->IsFailure()) return result;
 
+  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     receiver,
+                                                     properties,
+                                                     name,
+                                                     scratch1);
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  return result;
 }
 
 
@@ -163,13 +180,14 @@
                               Register name,
                               Register scratch,
                               Register extra,
-                              Register extra2) {
+                              Register extra2,
+                              Register extra3) {
   Isolate* isolate = masm->isolate();
   Label miss;
 
-  // Make sure that code is valid. The shifting code relies on the
-  // entry size being 8.
-  ASSERT(sizeof(Entry) == 8);
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 12.
+  ASSERT(sizeof(Entry) == 12);
 
   // Make sure the flags does not name a specific type.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -185,39 +203,66 @@
   ASSERT(!extra2.is(scratch));
   ASSERT(!extra2.is(extra));
 
-  // Check scratch, extra and extra2 registers are valid.
+  // Check register validity.
   ASSERT(!scratch.is(no_reg));
   ASSERT(!extra.is(no_reg));
   ASSERT(!extra2.is(no_reg));
+  ASSERT(!extra3.is(no_reg));
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
+                      extra2, extra3);
 
   // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss, t0);
+  __ JumpIfSmi(receiver, &miss);
 
   // Get the map of the receiver and compute the hash.
   __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
-  __ lw(t8, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Addu(scratch, scratch, Operand(t8));
-  __ Xor(scratch, scratch, Operand(flags));
-  __ And(scratch,
-         scratch,
-         Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+  __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Addu(scratch, scratch, at);
+  uint32_t mask = kPrimaryTableSize - 1;
+  // We shift out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.
+  __ srl(scratch, scratch, kHeapObjectTagSize);
+  __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
+  __ And(scratch, scratch, Operand(mask));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
+  ProbeTable(isolate,
+             masm,
+             flags,
+             kPrimary,
+             receiver,
+             name,
+             scratch,
+             extra,
+             extra2,
+             extra3);
 
   // Primary miss: Compute hash for secondary probe.
-  __ Subu(scratch, scratch, Operand(name));
-  __ Addu(scratch, scratch, Operand(flags));
-  __ And(scratch,
-         scratch,
-         Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+  __ srl(at, name, kHeapObjectTagSize);
+  __ Subu(scratch, scratch, at);
+  uint32_t mask2 = kSecondaryTableSize - 1;
+  __ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
+  __ And(scratch, scratch, Operand(mask2));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
+  ProbeTable(isolate,
+             masm,
+             flags,
+             kSecondary,
+             receiver,
+             name,
+             scratch,
+             extra,
+             extra2,
+             extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
   __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
+                      extra2, extra3);
 }
 
 
@@ -240,7 +285,10 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -248,8 +296,8 @@
   __ li(at, isolate->global());
   __ Branch(miss, ne, prototype, Operand(at));
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(isolate->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ li(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -261,8 +309,10 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst, Register src,
-                                            JSObject* holder, int index) {
+                                            Register dst,
+                                            Register src,
+                                            Handle<JSObject> holder,
+                                            int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -283,8 +333,7 @@
                                            Register scratch,
                                            Label* miss_label) {
   // Check that the receiver isn't a smi.
-  __ And(scratch, receiver, Operand(kSmiTagMask));
-  __ Branch(miss_label, eq, scratch, Operand(zero_reg));
+  __ JumpIfSmi(receiver, miss_label);
 
   // Check that the object is a JS array.
   __ GetObjectType(receiver, scratch, scratch);
@@ -370,22 +419,18 @@
 // After executing generated code, the receiver_reg and name_reg
 // may be clobbered.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      JSObject* object,
+                                      Handle<JSObject> object,
                                       int index,
-                                      Map* transition,
+                                      Handle<Map> transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
                                       Label* miss_label) {
   // a0 : value.
   Label exit;
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver_reg, miss_label, scratch);
-
-  // Check that the map of the receiver hasn't changed.
-  __ lw(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-  __ Branch(miss_label, ne, scratch, Operand(Handle<Map>(object->map())));
+  // Check that the map of the object hasn't changed.
+  __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
+              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -397,11 +442,11 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ push(receiver_reg);
-    __ li(a2, Operand(Handle<Map>(transition)));
+    __ li(a2, Operand(transition));
     __ Push(a2, a0);
     __ TailCallExternalReference(
            ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -410,10 +455,10 @@
     return;
   }
 
-  if (transition != NULL) {
+  if (!transition.is_null()) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
-    __ li(t0, Operand(Handle<Map>(transition)));
+    __ li(t0, Operand(transition));
     __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
   }
 
@@ -432,7 +477,13 @@
 
     // Update the write barrier for the array address.
     // Pass the now unused name_reg as a scratch register.
-    __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+    __ mov(name_reg, a0);
+    __ RecordWriteField(receiver_reg,
+                        offset,
+                        name_reg,
+                        scratch,
+                        kRAHasNotBeenSaved,
+                        kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -445,7 +496,13 @@
 
     // Update the write barrier for the array address.
     // Ok to clobber receiver_reg and name_reg, since we return.
-    __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+    __ mov(name_reg, a0);
+    __ RecordWriteField(scratch,
+                        offset,
+                        name_reg,
+                        receiver_reg,
+                        kRAHasNotBeenSaved,
+                        kDontSaveFPRegs);
   }
 
   // Return the value (register v0).
@@ -457,20 +514,15 @@
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Code* code = NULL;
-  if (kind == Code::LOAD_IC) {
-    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
-  } else {
-    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
-  }
-
-  Handle<Code> ic(code);
-  __ Jump(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code = (kind == Code::LOAD_IC)
+      ? masm->isolate()->builtins()->LoadIC_Miss()
+      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
 static void GenerateCallFunction(MacroAssembler* masm,
-                                 Object* object,
+                                 Handle<Object> object,
                                  const ParameterCount& arguments,
                                  Label* miss,
                                  Code::ExtraICState extra_ic_state) {
@@ -502,30 +554,31 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
   Register scratch = name;
-  __ li(scratch, Operand(Handle<Object>(interceptor)));
+  __ li(scratch, Operand(interceptor));
   __ Push(scratch, receiver, holder);
   __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
   __ push(scratch);
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
           masm->isolate());
-  __ li(a0, Operand(5));
-  __ li(a1, Operand(ref));
+  __ PrepareCEntryArgs(5);
+  __ PrepareCEntryFunction(ref);
 
   CEntryStub stub(1);
   __ CallStub(&stub);
@@ -554,34 +607,34 @@
 }
 
 
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
                                       const CallOptimization& optimization,
                                       int argc) {
   // ----------- S t a t e -------------
   //  -- sp[0]              : holder (set by CheckPrototypes)
-  //  -- sp[4]              : callee js function
+  //  -- sp[4]              : callee JS function
   //  -- sp[8]              : call data
-  //  -- sp[12]             : last js argument
+  //  -- sp[12]             : last JS argument
   //  -- ...
-  //  -- sp[(argc + 3) * 4] : first js argument
+  //  -- sp[(argc + 3) * 4] : first JS argument
   //  -- sp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ li(t1, Operand(Handle<JSFunction>(function)));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ LoadHeapObject(t1, function);
   __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ li(a0, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ li(a0, api_call_info);
     __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
   } else {
-    __ li(t2, Operand(Handle<Object>(call_data)));
+    __ li(t2, call_data);
   }
 
-  // Store js function and call data.
+  // Store JS function and call data.
   __ sw(t1, MemOperand(sp, 1 * kPointerSize));
   __ sw(t2, MemOperand(sp, 2 * kPointerSize));
 
@@ -589,12 +642,9 @@
   // (refer to layout above).
   __ Addu(a2, sp, Operand(2 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
   const int kApiStackSpace = 4;
 
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
   // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
@@ -617,16 +667,15 @@
   // v8::Arguments::is_construct_call = 0
   __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated). Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
   const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_API_CALL,
                         masm->isolate());
-  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 class CallInterceptorCompiler BASE_EMBEDDED {
@@ -640,86 +689,63 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
     // Check that the receiver isn't a smi.
     __ JumpIfSmi(receiver, miss);
-
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
-
     Counters* counters = masm->isolate()->counters();
-
     int depth1 = kInvalidProtoDepth;
     int depth2 = kInvalidProtoDepth;
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
-        !lookup->holder()->IsGlobalObject()) {
-      depth1 =
-          optimization.GetPrototypeDepthOfExpectedType(object,
-                                                      interceptor_holder);
+          !lookup->holder()->IsGlobalObject()) {
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 =
-            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                        lookup->holder());
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
       }
-      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                             (depth2 != kInvalidProtoDepth);
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     __ IncrementCounter(counters->call_const_interceptor(), 1,
-                      scratch1, scratch2);
+                        scratch1, scratch2);
 
     if (can_do_fast_api_call) {
       __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -732,9 +758,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-      stub_compiler_->CheckPrototypes(object, receiver,
-                                      interceptor_holder, scratch1,
-                                      scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -747,10 +773,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -761,16 +788,13 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result = GenerateFastApiDirectCall(masm,
-                                                      optimization,
-                                                      arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
           : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION, call_kind);
+                        JUMP_FUNCTION, NullCallWrapper(), call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -785,66 +809,57 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
     }
-
-    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
     // Call a runtime function to load the interceptor property.
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     __ CallExternalReference(
           ExternalReference(
               IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
               masm->isolate()),
           5);
-
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
 
-    __ Push(holder, name_);
-
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
-
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
-
+      __ Push(holder, name_);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+    }
     // If interceptor returns no-result sentinel, call the constant function.
     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
     __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
@@ -861,52 +876,41 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+                                      Handle<GlobalObject> global,
+                                      Handle<String> name,
+                                      Register scratch,
+                                      Label* miss) {
+  Handle<JSGlobalPropertyCell> cell =
+      GlobalObject::EnsurePropertyCell(global, name);
   ASSERT(cell->value()->IsTheHole());
-  __ li(scratch, Operand(Handle<Object>(cell)));
+  __ li(scratch, Operand(cell));
   __ lw(scratch,
         FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   __ Branch(miss, ne, scratch, Operand(at));
-  return cell;
 }
 
 
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
-    MacroAssembler* masm,
-    JSObject* object,
-    JSObject* holder,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  JSObject* current = object;
-  while (current != holder) {
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+                                       Handle<JSObject> object,
+                                       Handle<JSObject> holder,
+                                       Handle<String> name,
+                                       Register scratch,
+                                       Label* miss) {
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
     if (current->IsGlobalObject()) {
-      // Returns a cell or a failure.
-      MaybeObject* result = GenerateCheckPropertyCell(
-          masm,
-          GlobalObject::cast(current),
-          name,
-          scratch,
-          miss);
-      if (result->IsFailure()) return result;
+      GenerateCheckPropertyCell(masm,
+                                Handle<GlobalObject>::cast(current),
+                                name,
+                                scratch,
+                                miss);
     }
-    ASSERT(current->IsJSObject());
-    current = JSObject::cast(current->GetPrototype());
+    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
   }
-  return NULL;
 }
 
 
@@ -939,7 +943,7 @@
     __ And(fval, ival, Operand(kBinary32SignMask));
     // Negate value if it is negative.
     __ subu(scratch1, zero_reg, ival);
-    __ movn(ival, scratch1, fval);
+    __ Movn(ival, scratch1, fval);
 
     // We have -1, 0 or 1, which we treat specially. Register ival contains
     // absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -953,14 +957,14 @@
     __ Xor(scratch1, ival, Operand(1));
     __ li(scratch2, exponent_word_for_1);
     __ or_(scratch2, fval, scratch2);
-    __ movz(fval, scratch2, scratch1);  // Only if ival is equal to 1.
+    __ Movz(fval, scratch2, scratch1);  // Only if ival is equal to 1.
     __ Branch(&done);
 
     __ bind(&not_special);
     // Count leading zeros.
     // Gets the wrong answer for 0, but we already checked for that case above.
     Register zeros = scratch2;
-    __ clz(zeros, ival);
+    __ Clz(zeros, ival);
 
     // Compute exponent and or it into the exponent register.
     __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
@@ -1030,13 +1034,13 @@
 #define __ ACCESS_MASM(masm())
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
                                        Register object_reg,
-                                       JSObject* holder,
+                                       Handle<JSObject> holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       String* name,
+                                       Handle<String> name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -1054,81 +1058,50 @@
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
-  JSObject* current = object;
-  while (current != holder) {
-    depth++;
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
+    ++depth;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    ASSERT(current->GetPrototype()->IsJSObject());
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
-        Object* lookup_result = NULL;  // Initialization to please compiler.
-        if (!maybe_lookup_result->ToObject(&lookup_result)) {
-          set_failure(Failure::cast(maybe_lookup_result));
-          return reg;
-        }
-        name = String::cast(lookup_result);
+        name = factory()->LookupSymbol(name);
       }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
+      ASSERT(current->property_dictionary()->FindEntry(*name) ==
              StringDictionary::kNotFound);
 
-      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
-                                                                      miss,
-                                                                      reg,
-                                                                      name,
-                                                                      scratch1,
-                                                                      scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+                                       scratch1, scratch2);
 
       __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now the object is in holder_reg.
-      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-
-      // Branch on the result of the map check.
-      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-        // Restore scratch register to be the map of the object.  In the
-        // new space case below, we load the prototype from the map in
-        // the scratch register.
-        __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-
-      reg = holder_reg;  // From now the object is in holder_reg.
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
+      reg = holder_reg;  // From now on the object will be in holder_reg.
       __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      // Check the map of the current object.
-      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      // Branch on the result of the map check.
-      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
+      Handle<Map> current_map(current->map());
+      __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
+                  ALLOW_ELEMENT_TRANSITION_MAPS);
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
       }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // From now the object is in holder_reg.
-      __ li(reg, Operand(Handle<JSObject>(prototype)));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (heap()->InNewSpace(*prototype)) {
+        // The prototype is in new space; we cannot store a reference to it
+        // in the code.  Load it from the map.
+        __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        // The prototype is in old space; load it directly.
+        __ li(reg, Operand(prototype));
+      }
     }
 
     if (save_at_depth == depth) {
@@ -1139,65 +1112,57 @@
     current = prototype;
   }
 
-  // Check the holder map.
-  __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-  __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-
   // Log the check depth.
   LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+
+  // Check the holder map.
+  __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
+              DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+
   // Perform security check for access to the global object.
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
   if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  };
+  }
 
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-
-  MaybeObject* result = GenerateCheckPropertyCells(masm(),
-                                                   object,
-                                                   holder,
-                                                   name,
-                                                   scratch1,
-                                                   miss);
-  if (result->IsFailure()) set_failure(Failure::cast(result));
+  // If we've skipped any global objects, it's not enough to verify that
+  // their maps haven't changed.  We also need to check that the property
+  // cell for the property is still empty.
+  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(JSObject* object,
-                                     JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+                                     Handle<JSObject> holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     String* name,
+                                     Handle<String> name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
-  __ And(scratch1, receiver, Operand(kSmiTagMask));
-  __ Branch(miss, eq, scratch1, Operand(zero_reg));
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
-                      name, miss);
+  Register reg = CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
   GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
   __ Ret();
 }
 
 
-void StubCompiler::GenerateLoadConstant(JSObject* object,
-                                        JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Object* value,
-                                        String* name,
+                                        Handle<JSFunction> value,
+                                        Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss, scratch1);
@@ -1208,83 +1173,77 @@
                       scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ li(v0, Operand(Handle<Object>(value)));
+  __ LoadHeapObject(v0, value);
   __ Ret();
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss, scratch1);
 
   // Check that the maps haven't changed.
-  Register reg =
-    CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
-                    name, miss);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Build AccessorInfo::args_ list on the stack and push property name below
   // the exit frame to make GC aware of them and store pointers to them.
   __ push(receiver);
   __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
-  Handle<AccessorInfo> callback_handle(callback);
-  if (heap()->InNewSpace(callback_handle->data())) {
-    __ li(scratch3, callback_handle);
+  if (heap()->InNewSpace(callback->data())) {
+    __ li(scratch3, callback);
     __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
   } else {
-    __ li(scratch3, Handle<Object>(callback_handle->data()));
+    __ li(scratch3, Handle<Object>(callback->data()));
   }
   __ Push(reg, scratch3, name_reg);
   __ mov(a2, scratch2);  // Saved in case scratch2 == a1.
   __ mov(a1, sp);  // a1 (first argument - see note below) = Handle<String>
 
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
   // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
   // struct from the function (which is currently the case). This means we pass
   // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
   // will handle setting up a0.
 
   const int kApiStackSpace = 1;
-
+  FrameScope frame_scope(masm(), StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
+
   // Create AccessorInfo instance on the stack above the exit frame with
-  // scratch2 (internal::Object **args_) as the data.
+  // scratch2 (internal::Object** args_) as the data.
   __ sw(a2, MemOperand(sp, kPointerSize));
   // a2 (second argument - see note above) = AccessorInfo&
   __ Addu(a2, sp, kPointerSize);
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
+  const int kStackUnwindSpace = 4;
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_GETTER_CALL,
                         masm()->isolate());
-  // 4 args - will be freed later by LeaveExitFrame.
-  return masm()->TryCallApiFunctionAndReturn(ref, 4);
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1296,13 +1255,13 @@
   // and CALLBACKS, so inline only them, other cases may be added
   // later.
   bool compile_followup_inline = false;
-  if (lookup->IsProperty() && lookup->IsCacheable()) {
+  if (lookup->IsFound() && lookup->IsCacheable()) {
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+        lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1317,47 +1276,44 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ Push(receiver, holder_reg, name_reg);
+      } else {
+        __ Push(holder_reg, name_reg);
+      }
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method).
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+      __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+      frame_scope.GenerateLeaveFrame();
+      __ Ret();
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ Push(receiver, holder_reg, name_reg);
-    } else {
-      __ Push(holder_reg, name_reg);
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+      // Leave the internal frame.
     }
-
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method).
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
-    __ LeaveInternalFrame();
-    __ Ret();
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
-
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1369,21 +1325,21 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), v0, holder_reg,
-                               lookup->holder(), lookup->GetFieldIndex());
+                               Handle<JSObject>(lookup->holder()),
+                               lookup->GetFieldIndex());
       __ Ret();
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
       // Important invariant in CALLBACKS case: the code above must be
       // structured to never clobber |receiver| register.
-      __ li(scratch2, Handle<AccessorInfo>(callback));
+      __ li(scratch2, callback);
       // holder_reg is either receiver or scratch1.
       if (!receiver.is(holder_reg)) {
         ASSERT(scratch1.is(holder_reg));
@@ -1419,16 +1375,16 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ Branch(miss, ne, a2, Operand(Handle<String>(name)));
+    __ Branch(miss, ne, a2, Operand(name));
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1438,27 +1394,22 @@
   // Get the receiver from the stack.
   __ lw(a0, MemOperand(sp, argc * kPointerSize));
 
-  // If the object is the holder then we know that it's a global
-  // object which can only happen for contextual calls. In this case,
-  // the receiver cannot be a smi.
-  if (object != holder) {
-    __ JumpIfSmi(a0, miss);
-  }
-
   // Check that the maps haven't changed.
+  __ JumpIfSmi(a0, miss);
   CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
-  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ li(a3, Operand(cell));
   __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(function)) {
+  if (heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1473,27 +1424,24 @@
     __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
     __ Branch(miss, ne, t0, Operand(a3));
   } else {
-    __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
+    __ Branch(miss, ne, a1, Operand(function));
   }
 }
 
 
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+  Handle<Code> code =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_ic_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
+                                               extra_state_);
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1513,23 +1461,23 @@
   Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
   GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1539,7 +1487,7 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss;
 
@@ -1555,8 +1503,8 @@
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object), receiver,
-                  holder, a3, v0, t0, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
+                  name, &miss);
 
   if (argc == 0) {
     // Nothing to do, just return the length.
@@ -1565,22 +1513,20 @@
     __ Ret();
   } else {
     Label call_builtin;
-
-    Register elements = a3;
-    Register end_elements = t1;
-
-    // Get the elements array of the object.
-    __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
-    // Check that the elements are in fast mode and writable.
-    __ CheckMap(elements,
-                v0,
-                Heap::kFixedArrayMapRootIndex,
-                &call_builtin,
-                DONT_DO_SMI_CHECK);
-
     if (argc == 1) {  // Otherwise fall through to call the builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements;
+
+      Register elements = t2;
+      Register end_elements = t1;
+      // Get the elements array of the object.
+      __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+      // Check that the elements are in fast mode and writable.
+      __ CheckMap(elements,
+                  v0,
+                  Heap::kFixedArrayMapRootIndex,
+                  &call_builtin,
+                  DONT_DO_SMI_CHECK);
 
       // Get the array's length into v0 and calculate new length.
       __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1588,35 +1534,77 @@
       STATIC_ASSERT(kSmiTag == 0);
       __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
 
-      // Get the element's length.
+      // Get the elements' length.
       __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
       __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
 
+      // Check if value is a smi.
+      Label with_write_barrier;
+      __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+      __ JumpIfNotSmi(t0, &with_write_barrier);
+
       // Save new length.
       __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
 
-      // Push the element.
-      __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+      // Store the value.
       // We may need a register containing the address end_elements below,
       // so write back the value in end_elements.
       __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
       __ Addu(end_elements, elements, end_elements);
       const int kEndElementsOffset =
           FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
-      __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
-      __ Addu(end_elements, end_elements, kPointerSize);
+      __ Addu(end_elements, end_elements, kEndElementsOffset);
+      __ sw(t0, MemOperand(end_elements));
 
       // Check for a smi.
-      __ JumpIfNotSmi(t0, &with_write_barrier);
-      __ bind(&exit);
       __ Drop(argc + 1);
       __ Ret();
 
       __ bind(&with_write_barrier);
-      __ InNewSpace(elements, t0, eq, &exit);
-      __ RecordWriteHelper(elements, end_elements, t0);
+
+      __ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+      if (FLAG_smi_only_arrays  && !FLAG_trace_elements_transitions) {
+        Label fast_object, not_fast_object;
+        __ CheckFastObjectElements(a3, t3, &not_fast_object);
+        __ jmp(&fast_object);
+        // In case of fast smi-only, convert to fast object, otherwise bail out.
+        __ bind(&not_fast_object);
+        __ CheckFastSmiOnlyElements(a3, t3, &call_builtin);
+        // edx: receiver
+        // r3: map
+        __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                               FAST_ELEMENTS,
+                                               a3,
+                                               t3,
+                                               &call_builtin);
+        __ mov(a2, receiver);
+        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+        __ bind(&fast_object);
+      } else {
+        __ CheckFastObjectElements(a3, a3, &call_builtin);
+      }
+
+      // Save new length.
+      __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+      // Store the value.
+      // We may need a register containing the address end_elements below,
+      // so write back the value in end_elements.
+      __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+      __ Addu(end_elements, elements, end_elements);
+      __ Addu(end_elements, end_elements, kEndElementsOffset);
+      __ sw(t0, MemOperand(end_elements));
+
+      __ RecordWrite(elements,
+                     end_elements,
+                     t0,
+                     kRAHasNotBeenSaved,
+                     kDontSaveFPRegs,
+                     EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
       __ Drop(argc + 1);
       __ Ret();
 
@@ -1628,6 +1616,15 @@
         __ Branch(&call_builtin);
       }
 
+      __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
+      // Growing elements that are SMI-only requires special handling in case
+      // the new element is non-Smi. For now, delegate to the builtin.
+      Label no_fast_elements_check;
+      __ JumpIfSmi(a2, &no_fast_elements_check);
+      __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(t3, t3, &call_builtin);
+      __ bind(&no_fast_elements_check);
+
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(
               masm()->isolate());
@@ -1641,24 +1638,23 @@
       __ Addu(end_elements, elements, end_elements);
       __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
       __ li(t3, Operand(new_space_allocation_top));
-      __ lw(t2, MemOperand(t3));
-      __ Branch(&call_builtin, ne, end_elements, Operand(t2));
+      __ lw(a3, MemOperand(t3));
+      __ Branch(&call_builtin, ne, end_elements, Operand(a3));
 
       __ li(t5, Operand(new_space_allocation_limit));
       __ lw(t5, MemOperand(t5));
-      __ Addu(t2, t2, Operand(kAllocationDelta * kPointerSize));
-      __ Branch(&call_builtin, hi, t2, Operand(t5));
+      __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
+      __ Branch(&call_builtin, hi, a3, Operand(t5));
 
       // We fit and could grow elements.
       // Update new_space_allocation_top.
-      __ sw(t2, MemOperand(t3));
+      __ sw(a3, MemOperand(t3));
       // Push the argument.
-      __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
-      __ sw(t2, MemOperand(end_elements));
+      __ sw(a2, MemOperand(end_elements));
       // Fill the rest with holes.
-      __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+      __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
-        __ sw(t2, MemOperand(end_elements, i * kPointerSize));
+        __ sw(a3, MemOperand(end_elements, i * kPointerSize));
       }
 
       // Update elements' and array's sizes.
@@ -1679,19 +1675,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1701,25 +1697,22 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss, return_undefined, call_builtin;
-
   Register receiver = a1;
   Register elements = a3;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
-
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object),
-                  receiver, holder, elements, t0, v0, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+                  t0, v0, name, &miss);
 
   // Get the elements array of the object.
   __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1768,20 +1761,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -1791,10 +1783,9 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
@@ -1802,7 +1793,7 @@
   Label* index_out_of_range_label = &index_out_of_range;
 
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
@@ -1814,13 +1805,12 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             v0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
-                  a1, a3, t0, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  v0, holder, a1, a3, t0, name, &miss);
 
   Register receiver = a1;
   Register index = t1;
-  Register scratch = a3;
   Register result = v0;
   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
   if (argc > 0) {
@@ -1829,20 +1819,19 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1853,22 +1842,21 @@
 
   __ bind(&miss);
   // Restore function name in a2.
-  __ li(a2, Handle<String>(name));
+  __ li(a2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -1878,21 +1866,18 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
-
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
   GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
@@ -1900,14 +1885,13 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             v0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
-                  a1, a3, t0, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  v0, holder, a1, a3, t0, name, &miss);
 
   Register receiver = v0;
   Register index = t1;
-  Register scratch1 = a1;
-  Register scratch2 = a3;
+  Register scratch = a3;
   Register result = v0;
   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
   if (argc > 0) {
@@ -1916,21 +1900,20 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1941,22 +1924,21 @@
 
   __ bind(&miss);
   // Restore function name in a2.
-  __ li(a2, Handle<String>(name));
+  __ li(a2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -1969,22 +1951,23 @@
 
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
 
-    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2000,34 +1983,35 @@
   // Convert the smi code to uint16.
   __ And(code, code, Operand(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, v0);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, v0);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // a2: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2036,30 +2020,29 @@
   //  -- sp[argc * 4]           : receiver
   // -----------------------------------
 
-  if (!CpuFeatures::IsSupported(FPU))
-    return heap()->undefined_value();
+  if (!CpuFeatures::IsSupported(FPU)) {
+    return Handle<Code>::null();
+  }
+
   CpuFeatures::Scope scope_fpu(FPU);
-
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss, slow;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2145,23 +2128,24 @@
   __ bind(&slow);
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // a2: function name.
-  MaybeObject* obj = GenerateMissBranch();
-  if (obj->IsFailure()) return obj;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2171,25 +2155,23 @@
   // -----------------------------------
 
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
-
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2247,37 +2229,37 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // a2: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
 
   Counters* counters = isolate()->counters();
 
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
 
@@ -2296,40 +2278,37 @@
   ReserveSpaceForFastApiCall(masm(), a0);
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+  CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
                   depth, &miss);
 
-  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiDirectCall(masm(), optimization, argc);
 
   __ bind(&miss);
   FreeSpaceForFastApiCall(masm());
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // Undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
@@ -2342,23 +2321,20 @@
 
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
-    __ And(t1, a1, Operand(kSmiTagMask));
-    __ Branch(&miss, eq, t1, Operand(zero_reg));
+    __ JumpIfSmi(a1, &miss);
   }
 
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(masm()->isolate()->counters()->call_const(),
           1, a0, a3);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
-                      &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
+                      name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2369,50 +2345,46 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         // Check that the object is a two-byte string or a symbol.
         __ GetObjectType(a1, a3, a3);
         __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
-                        a1, t0, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            a0, holder, a3, a1, t0, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
       Label fast;
         // Check that the object is a smi or a heap number.
-        __ And(t1, a1, Operand(kSmiTagMask));
-        __ Branch(&fast, eq, t1, Operand(zero_reg));
+        __ JumpIfSmi(a1, &fast);
         __ GetObjectType(a1, a0, a0);
         __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
-                        a1, t0, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            a0, holder, a3, a1, t0, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ LoadRoot(t0, Heap::kTrueValueRootIndex);
@@ -2423,35 +2395,36 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
-                        a1, t0, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            a0, holder, a3, a1, t0, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
     }
 
-    default:
-      UNREACHABLE();
-  }
-
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
 
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -2463,71 +2436,54 @@
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ lw(a1, MemOperand(sp, argc * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), a2, extra_ic_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         a1,
-                                         a3,
-                                         t0,
-                                         a0,
-                                         &miss);
-  if (result->IsFailure()) {
-    return result;
-  }
+  CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
+  compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
+                   &miss);
 
   // Move returned value, the function to call, to a1.
   __ mov(a1, v0);
   // Restore receiver.
   __ lw(a0, MemOperand(sp, argc * kPointerSize));
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // Undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
@@ -2538,40 +2494,37 @@
     __ sw(a3, MemOperand(sp, argc * kPointerSize));
   }
 
-  // Setup the context (function already in r1).
+  // Set up the context (function already in r1).
   __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
   // Jump to the cached code (tail call).
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
-  ASSERT(function->is_compiled());
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  if (V8::UseCrankshaft()) {
-    UNIMPLEMENTED_MIPS();
-  } else {
-    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
-                  JUMP_FUNCTION, call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
+                NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                   int index,
-                                                  Map* transition,
-                                                  String* name) {
+                                                  Handle<Map> transition,
+                                                  Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2581,25 +2534,21 @@
   Label miss;
 
   // Name register might be clobbered.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     a1, a2, a3,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, a1, a2, a3, &miss);
   __ bind(&miss);
   __ li(a2, Operand(Handle<String>(name)));  // Restore name.
   Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
-                                                     AccessorInfo* callback,
-                                                     String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+    Handle<JSObject> object,
+    Handle<AccessorInfo> callback,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2608,12 +2557,9 @@
   // -----------------------------------
   Label miss;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(a1, &miss);
-
   // Check that the map of the object hasn't changed.
-  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
-  __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+  __ CheckMap(a1, a3, Handle<Map>(object->map()), &miss,
+              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -2625,7 +2571,7 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   __ push(a1);  // Receiver.
-  __ li(a3, Operand(Handle<AccessorInfo>(callback)));  // Callback info.
+  __ li(a3, Operand(callback));  // Callback info.
   __ Push(a3, a2, a0);
 
   // Do tail-call to the runtime system.
@@ -2644,8 +2590,9 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
-                                                        String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+    Handle<JSObject> receiver,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2654,12 +2601,9 @@
   // -----------------------------------
   Label miss;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(a1, &miss);
-
   // Check that the map of the object hasn't changed.
-  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
-  __ Branch(&miss, ne, a3, Operand(Handle<Map>(receiver->map())));
+  __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss,
+              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
@@ -2691,9 +2635,10 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
-                                                   JSGlobalPropertyCell* cell,
-                                                   String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+    Handle<GlobalObject> object,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2710,7 +2655,7 @@
   // cell could have been deleted and reintroducing the global needs
   // to update the property details in the property dictionary of the
   // global object. We bail out to the runtime system to do that.
-  __ li(t0, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ li(t0, Operand(cell));
   __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
   __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
   __ Branch(&miss, eq, t1, Operand(t2));
@@ -2718,6 +2663,8 @@
   // Store the value in the cell.
   __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
   __ mov(v0, a0);  // Stored value must be returned in v0.
+  // Cells are always rescanned, so no write barrier here.
+
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
   __ Ret();
@@ -2733,9 +2680,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
-                                                      JSObject* object,
-                                                      JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+                                                      Handle<JSObject> object,
+                                                      Handle<JSObject> last) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- ra    : return address
@@ -2751,15 +2698,8 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                  GlobalObject::cast(last),
-                                                  name,
-                                                  a1,
-                                                  &miss);
-    if (cell->IsFailure()) {
-      miss.Unuse();
-      return cell;
-    }
+    GenerateCheckPropertyCell(
+        masm(), Handle<GlobalObject>::cast(last), name, a1, &miss);
   }
 
   // Return undefined if maps of the full prototype chain is still the same.
@@ -2770,14 +2710,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, heap()->empty_string());
+  return GetCode(NONEXISTENT, factory()->empty_string());
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2796,24 +2736,19 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
   Label miss;
-
-  MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
-                                             callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2822,10 +2757,10 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value,
-                                                   String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> value,
+                                                   Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2842,9 +2777,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2853,17 +2788,9 @@
   // -----------------------------------
   Label miss;
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(object,
-                          holder,
-                          &lookup,
-                          a0,
-                          a2,
-                          a3,
-                          a1,
-                          t0,
-                          name,
+  GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2873,11 +2800,12 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 String* name,
-                                                 bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name,
+    bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2885,19 +2813,12 @@
   // -----------------------------------
   Label miss;
 
-  // If the object is the holder then we know that it's a global
-  // object which can only happen for contextual calls. In this case,
-  // the receiver cannot be a smi.
-  if (object != holder) {
-    __ And(t0, a0, Operand(kSmiTagMask));
-    __ Branch(&miss, eq, t0, Operand(zero_reg));
-  }
-
   // Check that the map of the global has not changed.
+  __ JumpIfSmi(a0, &miss);
   CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
 
   // Get the value from the cell.
-  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ li(a3, Operand(cell));
   __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -2920,9 +2841,9 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
-                                                     JSObject* receiver,
-                                                     JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+                                                     Handle<JSObject> receiver,
+                                                     Handle<JSObject> holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
@@ -2932,7 +2853,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
   __ bind(&miss);
@@ -2942,11 +2863,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -2955,15 +2876,10 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
-                                             t0, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2971,10 +2887,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
-                                                        JSObject* receiver,
-                                                        JSObject* holder,
-                                                        Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<JSFunction> value) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -2983,7 +2900,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
   __ bind(&miss);
@@ -2994,9 +2911,10 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3005,19 +2923,11 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          a1,
-                          a0,
-                          a2,
-                          a3,
-                          t0,
-                          name,
+  GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3026,7 +2936,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3035,7 +2946,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   GenerateLoadArrayLength(masm(), a1, a2, &miss);
   __ bind(&miss);
@@ -3045,7 +2956,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3057,7 +2969,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
   __ bind(&miss);
@@ -3069,7 +2981,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3081,7 +2994,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
 
   // Check the name hasn't changed.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
   __ bind(&miss);
@@ -3092,33 +3005,29 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
   //  -- a1    : receiver
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
-  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(a1,
-                 a2,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+  __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3130,9 +3039,8 @@
   int receiver_count = receiver_maps->length();
   __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
   for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    Handle<Code> code(handler_ics->at(current));
-    __ Jump(code, RelocInfo::CODE_TARGET, eq, a2, Operand(map));
+    __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET,
+        eq, a2, Operand(receiver_maps->at(current)));
   }
 
   __ bind(&miss);
@@ -3140,14 +3048,14 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                        int index,
-                                                       Map* transition,
-                                                       String* name) {
+                                                       Handle<Map> transition,
+                                                       Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -3161,16 +3069,11 @@
   __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
 
   // Check that the name has not changed.
-  __ Branch(&miss, ne, a1, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a1, Operand(name));
 
   // a3 is used as scratch register. a1 and a2 keep their values if a jump to
   // the miss label is generated.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     a2, a1, a3,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, a2, a1, a3, &miss);
   __ bind(&miss);
 
   __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
@@ -3178,11 +3081,12 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -3190,29 +3094,25 @@
   //  -- ra    : return address
   //  -- a3    : scratch
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  MaybeObject* maybe_stub =
-      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(a2,
-                 a3,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub =
+      KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
+
+  __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -3225,10 +3125,17 @@
 
   int receiver_count = receiver_maps->length();
   __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    Handle<Code> code(handler_ics->at(current));
-    __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
+  for (int i = 0; i < receiver_count; ++i) {
+    if (transitioned_maps->at(i).is_null()) {
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
+          a3, Operand(receiver_maps->at(i)));
+    } else {
+      Label next_map;
+      __ Branch(&next_map, ne, a3, Operand(receiver_maps->at(i)));
+      __ li(a3, Operand(transitioned_maps->at(i)));
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
   }
 
   __ bind(&miss);
@@ -3236,11 +3143,12 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // a0    : argc
   // a1    : constructor
   // ra    : return address
@@ -3263,8 +3171,7 @@
   // a1: constructor function
   // t7: undefined
   __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-  __ And(t0, a2, Operand(kSmiTagMask));
-  __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
+  __ JumpIfSmi(a2, &generic_stub_call);
   __ GetObjectType(a2, a3, t0);
   __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
 
@@ -3285,12 +3192,7 @@
   // a2: initial map
   // t7: undefined
   __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-  __ AllocateInNewSpace(a3,
-                        t4,
-                        t5,
-                        t6,
-                        &generic_stub_call,
-                        SIZE_IN_WORDS);
+  __ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
 
   // Allocated the JSObject, now initialize the fields. Map is set to initial
   // map and properties and elements are set to empty fixed array.
@@ -3325,7 +3227,7 @@
   // t7: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       Label not_passed, next;
@@ -3457,6 +3359,7 @@
 
     case EXTERNAL_FLOAT_ELEMENTS:
     case EXTERNAL_DOUBLE_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
@@ -3553,6 +3456,7 @@
       }
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3719,7 +3623,7 @@
 
       __ li(t0, 0x7ff);
       __ Xor(t1, t5, Operand(0xFF));
-      __ movz(t5, t0, t1);  // Set t5 to 0x7ff only if t5 is equal to 0xff.
+      __ Movz(t5, t0, t1);  // Set t5 to 0x7ff only if t5 is equal to 0xff.
       __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
 
       // Rebias exponent.
@@ -3795,9 +3699,9 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
@@ -3828,7 +3732,6 @@
   __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
 
   // Check that the index is in range.
-  __ SmiUntag(t0, key);
   __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
   __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
@@ -3836,7 +3739,6 @@
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
   // a3: external array.
-  // t0: key (integer).
 
   if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
     // Double to pixel conversion is only implemented in the runtime for now.
@@ -3848,7 +3750,6 @@
   __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
 
   // a3: base pointer of external storage.
-  // t0: key (integer).
   // t1: value (integer).
 
   switch (elements_kind) {
@@ -3865,33 +3766,36 @@
       __ mov(v0, t1);  // Value is in range 0..255.
       __ bind(&done);
       __ mov(t1, v0);
-      __ addu(t8, a3, t0);
+
+      __ srl(t8, key, 1);
+      __ addu(t8, a3, t8);
       __ sb(t1, MemOperand(t8, 0));
       }
       break;
     case EXTERNAL_BYTE_ELEMENTS:
     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      __ addu(t8, a3, t0);
+      __ srl(t8, key, 1);
+      __ addu(t8, a3, t8);
       __ sb(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_SHORT_ELEMENTS:
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ sll(t8, t0, 1);
-      __ addu(t8, a3, t8);
+      __ addu(t8, a3, key);
       __ sh(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      __ sll(t8, t0, 2);
+      __ sll(t8, key, 1);
       __ addu(t8, a3, t8);
       __ sw(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_FLOAT_ELEMENTS:
       // Perform int-to-float conversion and store to memory.
+      __ SmiUntag(t0, key);
       StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
       break;
     case EXTERNAL_DOUBLE_ELEMENTS:
-      __ sll(t8, t0, 3);
+      __ sll(t8, key, 2);
       __ addu(a3, a3, t8);
       // a3: effective address of the double element
       FloatingPointHelper::Destination destination;
@@ -3913,6 +3817,7 @@
       }
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3921,12 +3826,11 @@
   }
 
   // Entry registers are intact, a0 holds the value which is the return value.
-  __ mov(v0, value);
+  __ mov(v0, a0);
   __ Ret();
 
   if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
     // a3: external array.
-    // t0: index (integer).
     __ bind(&check_heap_number);
     __ GetObjectType(value, t1, t2);
     __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
@@ -3934,7 +3838,6 @@
     __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
 
     // a3: base pointer of external storage.
-    // t0: key (integer).
 
     // The WebGL specification leaves the behavior of storing NaN and
     // +/-Infinity into integer arrays basically undefined. For more
@@ -3947,11 +3850,11 @@
 
       if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
         __ cvt_s_d(f0, f0);
-        __ sll(t8, t0, 2);
+        __ sll(t8, key, 1);
         __ addu(t8, a3, t8);
         __ swc1(f0, MemOperand(t8, 0));
       } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-        __ sll(t8, t0, 3);
+        __ sll(t8, key, 2);
         __ addu(t8, a3, t8);
         __ sdc1(f0, MemOperand(t8, 0));
       } else {
@@ -3960,18 +3863,18 @@
         switch (elements_kind) {
           case EXTERNAL_BYTE_ELEMENTS:
           case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-            __ addu(t8, a3, t0);
+            __ srl(t8, key, 1);
+            __ addu(t8, a3, t8);
             __ sb(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_SHORT_ELEMENTS:
           case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-            __ sll(t8, t0, 1);
-            __ addu(t8, a3, t8);
+            __ addu(t8, a3, key);
             __ sh(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_INT_ELEMENTS:
           case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-            __ sll(t8, t0, 2);
+            __ sll(t8, key, 1);
             __ addu(t8, a3, t8);
             __ sw(t3, MemOperand(t8, 0));
             break;
@@ -3979,6 +3882,7 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
+          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3989,7 +3893,7 @@
 
       // Entry registers are intact, a0 holds the value
       // which is the return value.
-      __ mov(v0, value);
+      __ mov(v0, a0);
       __ Ret();
     } else {
       // FPU is not available, do manual conversions.
@@ -4013,7 +3917,7 @@
 
         __ xor_(t1, t6, t5);
         __ li(t2, kBinary32ExponentMask);
-        __ movz(t6, t2, t1);  // Only if t6 is equal to t5.
+        __ Movz(t6, t2, t1);  // Only if t6 is equal to t5.
         __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
 
         // Rebias exponent.
@@ -4026,12 +3930,12 @@
         __ Slt(t1, t1, t6);
         __ And(t2, t3, Operand(HeapNumber::kSignMask));
         __ Or(t2, t2, Operand(kBinary32ExponentMask));
-        __ movn(t3, t2, t1);  // Only if t6 is gt kBinary32MaxExponent.
+        __ Movn(t3, t2, t1);  // Only if t6 is gt kBinary32MaxExponent.
         __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
 
         __ Slt(t1, t6, Operand(kBinary32MinExponent));
         __ And(t2, t3, Operand(HeapNumber::kSignMask));
-        __ movn(t3, t2, t1);  // Only if t6 is lt kBinary32MinExponent.
+        __ Movn(t3, t2, t1);  // Only if t6 is lt kBinary32MinExponent.
         __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
 
         __ And(t7, t3, Operand(HeapNumber::kSignMask));
@@ -4044,13 +3948,13 @@
         __ or_(t3, t7, t6);
 
         __ bind(&done);
-        __ sll(t9, a1, 2);
+        __ sll(t9, key, 1);
         __ addu(t9, a2, t9);
         __ sw(t3, MemOperand(t9, 0));
 
         // Entry registers are intact, a0 holds the value which is the return
         // value.
-        __ mov(v0, value);
+        __ mov(v0, a0);
         __ Ret();
 
         __ bind(&nan_or_infinity_or_zero);
@@ -4068,6 +3972,7 @@
         // t8: effective address of destination element.
         __ sw(t4, MemOperand(t8, 0));
         __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
+        __ mov(v0, a0);
         __ Ret();
       } else {
         bool is_signed_type = IsElementTypeSigned(elements_kind);
@@ -4080,11 +3985,11 @@
         // and infinities. All these should be converted to 0.
         __ li(t5, HeapNumber::kExponentMask);
         __ and_(t6, t3, t5);
-        __ movz(t3, zero_reg, t6);  // Only if t6 is equal to zero.
+        __ Movz(t3, zero_reg, t6);  // Only if t6 is equal to zero.
         __ Branch(&done, eq, t6, Operand(zero_reg));
 
         __ xor_(t2, t6, t5);
-        __ movz(t3, zero_reg, t2);  // Only if t6 is equal to t5.
+        __ Movz(t3, zero_reg, t2);  // Only if t6 is equal to t5.
         __ Branch(&done, eq, t6, Operand(t5));
 
         // Unbias exponent.
@@ -4092,13 +3997,13 @@
         __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
         // If exponent is negative then result is 0.
         __ slt(t2, t6, zero_reg);
-        __ movn(t3, zero_reg, t2);  // Only if exponent is negative.
+        __ Movn(t3, zero_reg, t2);  // Only if exponent is negative.
         __ Branch(&done, lt, t6, Operand(zero_reg));
 
         // If exponent is too big then result is minimal value.
         __ slti(t1, t6, meaningfull_bits - 1);
         __ li(t2, min_value);
-        __ movz(t3, t2, t1);  // Only if t6 is ge meaningfull_bits - 1.
+        __ Movz(t3, t2, t1);  // Only if t6 is ge meaningfull_bits - 1.
         __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
 
         __ And(t5, t3, Operand(HeapNumber::kSignMask));
@@ -4109,7 +4014,7 @@
         __ subu(t6, t9, t6);
         __ slt(t1, t6, zero_reg);
         __ srlv(t2, t3, t6);
-        __ movz(t3, t2, t1);  // Only if t6 is positive.
+        __ Movz(t3, t2, t1);  // Only if t6 is positive.
         __ Branch(&sign, ge, t6, Operand(zero_reg));
 
         __ subu(t6, zero_reg, t6);
@@ -4121,7 +4026,7 @@
 
         __ bind(&sign);
         __ subu(t2, t3, zero_reg);
-        __ movz(t3, t2, t5);  // Only if t5 is zero.
+        __ Movz(t3, t2, t5);  // Only if t5 is zero.
 
         __ bind(&done);
 
@@ -4130,18 +4035,18 @@
         switch (elements_kind) {
           case EXTERNAL_BYTE_ELEMENTS:
           case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-            __ addu(t8, a3, t0);
+            __ srl(t8, key, 1);
+            __ addu(t8, a3, t8);
             __ sb(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_SHORT_ELEMENTS:
           case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-            __ sll(t8, t0, 1);
-            __ addu(t8, a3, t8);
+            __ addu(t8, a3, key);
             __ sh(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_INT_ELEMENTS:
           case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-            __ sll(t8, t0, 2);
+            __ sll(t8, key, 1);
             __ addu(t8, a3, t8);
             __ sw(t3, MemOperand(t8, 0));
             break;
@@ -4149,6 +4054,7 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
+          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4201,7 +4107,8 @@
   // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
-  __ JumpIfNotSmi(a0, &miss_force_generic);
+  __ JumpIfNotSmi(a0, &miss_force_generic, at, USE_DELAY_SLOT);
+  // The delay slot can be safely used here, a1 is an object pointer.
 
   // Get the elements array.
   __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
@@ -4209,7 +4116,7 @@
 
   // Check that the key is within bounds.
   __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
-  __ Branch(&miss_force_generic, hs, a0, Operand(a3));
+  __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
 
   // Load the result and make sure it's not the hole.
   __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4219,13 +4126,13 @@
   __ lw(t0, MemOperand(t0));
   __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
   __ Branch(&miss_force_generic, eq, t0, Operand(t1));
+  __ Ret(USE_DELAY_SLOT);
   __ mov(v0, t0);
-  __ Ret();
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
@@ -4298,8 +4205,11 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind,
+    KeyedAccessGrowMode grow_mode) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -4308,15 +4218,17 @@
   //  -- a3    : scratch
   //  -- a4    : scratch (elements)
   // -----------------------------------
-  Label miss_force_generic;
+  Label miss_force_generic, transition_elements_kind, grow, slow;
+  Label finish_store, check_capacity;
 
   Register value_reg = a0;
   Register key_reg = a1;
   Register receiver_reg = a2;
-  Register scratch = a3;
-  Register elements_reg = t0;
-  Register scratch2 = t1;
-  Register scratch3 = t2;
+  Register scratch = t0;
+  Register elements_reg = a3;
+  Register length_reg = t1;
+  Register scratch2 = t2;
+  Register scratch3 = t3;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -4324,32 +4236,59 @@
   // Check that the key is a smi.
   __ JumpIfNotSmi(key_reg, &miss_force_generic);
 
-  // Get the elements array and make sure it is a fast element array, not 'cow'.
-  __ lw(elements_reg,
-        FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-  __ CheckMap(elements_reg,
-              scratch,
-              Heap::kFixedArrayMapRootIndex,
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+  }
 
   // Check that the key is within bounds.
+  __ lw(elements_reg,
+        FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
   if (is_js_array) {
     __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
   } else {
     __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
   }
   // Compare smis.
-  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+    __ Branch(&grow, hs, key_reg, Operand(scratch));
+  } else {
+    __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+  }
 
-  __ Addu(scratch,
-          elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(scratch3, scratch2, scratch);
-  __ sw(value_reg, MemOperand(scratch3));
-  __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
+  // Make sure elements is a fast element array, not 'cow'.
+  __ CheckMap(elements_reg,
+              scratch,
+              Heap::kFixedArrayMapRootIndex,
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
 
+  __ bind(&finish_store);
+
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ Addu(scratch,
+            elements_reg,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+    __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(scratch, scratch, scratch2);
+    __ sw(value_reg, MemOperand(scratch));
+  } else {
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    __ Addu(scratch,
+            elements_reg,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+    __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(scratch, scratch, scratch2);
+    __ sw(value_reg, MemOperand(scratch));
+    __ mov(receiver_reg, value_reg);
+  ASSERT(elements_kind == FAST_ELEMENTS);
+    __ RecordWrite(elements_reg,  // Object.
+                   scratch,       // Address.
+                   receiver_reg,  // Value.
+                   kRAHasNotBeenSaved,
+                   kDontSaveFPRegs);
+  }
   // value_reg (a0) is preserved.
   // Done.
   __ Ret();
@@ -4358,12 +4297,83 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+    // Grow the array by a single element if possible.
+    __ bind(&grow);
+
+    // Make sure the array is only growing by a single element, anything else
+    // must be handled by the runtime.
+    __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch));
+
+    // Check for the empty array, and preallocate a small backing store if
+    // possible.
+    __ lw(length_reg,
+          FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ lw(elements_reg,
+          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+    __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+    __ Branch(&check_capacity, ne, elements_reg, Operand(at));
+
+    int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+    __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
+                          TAG_OBJECT);
+
+    __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+    __ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+    __ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+    __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+      __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
+    }
+
+    // Store the element at index zero.
+    __ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
+
+    // Install the new backing store in the JSArray.
+    __ sw(elements_reg,
+          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+    __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+                        scratch, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                        EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+    // Increment the length of the array.
+    __ li(length_reg, Operand(Smi::FromInt(1)));
+    __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ Ret();
+
+    __ bind(&check_capacity);
+    // Check for cow elements, in general they are not handled by this stub
+    __ CheckMap(elements_reg,
+                scratch,
+                Heap::kFixedCOWArrayMapRootIndex,
+                &miss_force_generic,
+                DONT_DO_SMI_CHECK);
+
+    __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+    __ Branch(&slow, hs, length_reg, Operand(scratch));
+
+    // Grow the array and finish the store.
+    __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
+    __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ jmp(&finish_store);
+
+    __ bind(&slow);
+    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+    __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+  }
 }
 
 
 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
     MacroAssembler* masm,
-    bool is_js_array) {
+    bool is_js_array,
+    KeyedAccessGrowMode grow_mode) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -4375,16 +4385,18 @@
   //  -- t2    : scratch (exponent_reg)
   //  -- t3    : scratch4
   // -----------------------------------
-  Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+  Label miss_force_generic, transition_elements_kind, grow, slow;
+  Label finish_store, check_capacity;
 
   Register value_reg = a0;
   Register key_reg = a1;
   Register receiver_reg = a2;
-  Register scratch = a3;
-  Register elements_reg = t0;
-  Register mantissa_reg = t1;
-  Register exponent_reg = t2;
+  Register elements_reg = a3;
+  Register scratch1 = t0;
+  Register scratch2 = t1;
+  Register scratch3 = t2;
   Register scratch4 = t3;
+  Register length_reg = t3;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -4395,90 +4407,31 @@
 
   // Check that the key is within bounds.
   if (is_js_array) {
-    __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
   } else {
-    __ lw(scratch,
+    __ lw(scratch1,
           FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
   }
   // Compare smis, unsigned compare catches both negative and out-of-bound
   // indexes.
-  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
-
-  // Handle smi values specially.
-  __ JumpIfSmi(value_reg, &smi_value);
-
-  // Ensure that the object is a heap number
-  __ CheckMap(value_reg,
-              scratch,
-              masm->isolate()->factory()->heap_number_map(),
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
-
-  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
-  // in the exponent.
-  __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
-  __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
-  __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch));
-
-  __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
-  __ bind(&have_double_value);
-  __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
-  __ Addu(scratch, elements_reg, Operand(scratch4));
-  __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ sw(exponent_reg, FieldMemOperand(scratch, offset));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, value_reg);  // In delay slot.
-
-  __ bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
-  __ Branch(&is_nan, gt, exponent_reg, Operand(scratch));
-  __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-  __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
-
-  __ bind(&is_nan);
-  // Load canonical NaN for storing into the double array.
-  uint64_t nan_int64 = BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-  __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
-  __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
-  __ jmp(&have_double_value);
-
-  __ bind(&smi_value);
-  __ Addu(scratch, elements_reg,
-          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
-  __ Addu(scratch, scratch, scratch4);
-  // scratch is now effective address of the double element
-
-  FloatingPointHelper::Destination destination;
-  if (CpuFeatures::IsSupported(FPU)) {
-    destination = FloatingPointHelper::kFPURegisters;
+  if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+    __ Branch(&grow, hs, key_reg, Operand(scratch1));
   } else {
-    destination = FloatingPointHelper::kCoreRegisters;
+    __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
   }
 
-  Register untagged_value = receiver_reg;
-  __ SmiUntag(untagged_value, value_reg);
-  FloatingPointHelper::ConvertIntToDouble(
-      masm,
-      untagged_value,
-      destination,
-      f0,
-      mantissa_reg,
-      exponent_reg,
-      scratch4,
-      f2);
-  if (destination == FloatingPointHelper::kFPURegisters) {
-    CpuFeatures::Scope scope(FPU);
-    __ sdc1(f0, MemOperand(scratch, 0));
-  } else {
-    __ sw(mantissa_reg, MemOperand(scratch, 0));
-    __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
-  }
+  __ bind(&finish_store);
+
+  __ StoreNumberToDoubleElements(value_reg,
+                                 key_reg,
+                                 receiver_reg,
+                                 elements_reg,
+                                 scratch1,
+                                 scratch2,
+                                 scratch3,
+                                 scratch4,
+                                 &transition_elements_kind);
+
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, value_reg);  // In delay slot.
 
@@ -4487,6 +4440,75 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+    // Grow the array by a single element if possible.
+    __ bind(&grow);
+
+    // Make sure the array is only growing by a single element, anything else
+    // must be handled by the runtime.
+    __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1));
+
+    // Transition on values that can't be stored in a FixedDoubleArray.
+    Label value_is_smi;
+    __ JumpIfSmi(value_reg, &value_is_smi);
+    __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    __ Branch(&transition_elements_kind, ne, scratch1, Operand(at));
+    __ bind(&value_is_smi);
+
+    // Check for the empty array, and preallocate a small backing store if
+    // possible.
+    __ lw(length_reg,
+          FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ lw(elements_reg,
+          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+    __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+    __ Branch(&check_capacity, ne, elements_reg, Operand(at));
+
+    int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+    __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
+                          TAG_OBJECT);
+
+    // Initialize the new FixedDoubleArray. Leave elements unitialized for
+    // efficiency, they are guaranteed to be initialized before use.
+    __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
+    __ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+    __ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+    __ sw(scratch1,
+          FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+
+    // Install the new backing store in the JSArray.
+    __ sw(elements_reg,
+          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+    __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+                        scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                        EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+    // Increment the length of the array.
+    __ li(length_reg, Operand(Smi::FromInt(1)));
+    __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ jmp(&finish_store);
+
+    __ bind(&check_capacity);
+    // Make sure that the backing store can hold additional elements.
+    __ lw(scratch1,
+          FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+    __ Branch(&slow, hs, length_reg, Operand(scratch1));
+
+    // Grow the array and finish the store.
+    __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
+    __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ jmp(&finish_store);
+
+    __ bind(&slow);
+    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+    __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+  }
 }
 
 
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index e3f3c48..c43dd22 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -144,32 +144,32 @@
 
 
 // Type names of the different mirrors.
-const UNDEFINED_TYPE = 'undefined';
-const NULL_TYPE = 'null';
-const BOOLEAN_TYPE = 'boolean';
-const NUMBER_TYPE = 'number';
-const STRING_TYPE = 'string';
-const OBJECT_TYPE = 'object';
-const FUNCTION_TYPE = 'function';
-const REGEXP_TYPE = 'regexp';
-const ERROR_TYPE = 'error';
-const PROPERTY_TYPE = 'property';
-const FRAME_TYPE = 'frame';
-const SCRIPT_TYPE = 'script';
-const CONTEXT_TYPE = 'context';
-const SCOPE_TYPE = 'scope';
+var UNDEFINED_TYPE = 'undefined';
+var NULL_TYPE = 'null';
+var BOOLEAN_TYPE = 'boolean';
+var NUMBER_TYPE = 'number';
+var STRING_TYPE = 'string';
+var OBJECT_TYPE = 'object';
+var FUNCTION_TYPE = 'function';
+var REGEXP_TYPE = 'regexp';
+var ERROR_TYPE = 'error';
+var PROPERTY_TYPE = 'property';
+var FRAME_TYPE = 'frame';
+var SCRIPT_TYPE = 'script';
+var CONTEXT_TYPE = 'context';
+var SCOPE_TYPE = 'scope';
 
 // Maximum length when sending strings through the JSON protocol.
-const kMaxProtocolStringLength = 80;
+var kMaxProtocolStringLength = 80;
 
 // Different kind of properties.
-PropertyKind = {};
+var PropertyKind = {};
 PropertyKind.Named   = 1;
 PropertyKind.Indexed = 2;
 
 
 // A copy of the PropertyType enum from global.h
-PropertyType = {};
+var PropertyType = {};
 PropertyType.Normal                  = 0;
 PropertyType.Field                   = 1;
 PropertyType.ConstantFunction        = 2;
@@ -183,7 +183,7 @@
 
 
 // Different attributes for a property.
-PropertyAttribute = {};
+var PropertyAttribute = {};
 PropertyAttribute.None       = NONE;
 PropertyAttribute.ReadOnly   = READ_ONLY;
 PropertyAttribute.DontEnum   = DONT_ENUM;
@@ -191,12 +191,12 @@
 
 
 // A copy of the scope types from runtime.cc.
-ScopeType = { Global: 0,
-              Local: 1,
-              With: 2,
-              Closure: 3,
-              Catch: 4,
-              Block: 5 };
+var ScopeType = { Global: 0,
+                  Local: 1,
+                  With: 2,
+                  Closure: 3,
+                  Catch: 4,
+                  Block: 5 };
 
 
 // Mirror hierarchy:
@@ -225,7 +225,7 @@
  */
 function Mirror(type) {
   this.type_ = type;
-};
+}
 
 
 Mirror.prototype.type = function() {
@@ -239,7 +239,7 @@
  */
 Mirror.prototype.isValue = function() {
   return this instanceof ValueMirror;
-}
+};
 
 
 /**
@@ -248,7 +248,7 @@
  */
 Mirror.prototype.isUndefined = function() {
   return this instanceof UndefinedMirror;
-}
+};
 
 
 /**
@@ -257,7 +257,7 @@
  */
 Mirror.prototype.isNull = function() {
   return this instanceof NullMirror;
-}
+};
 
 
 /**
@@ -266,7 +266,7 @@
  */
 Mirror.prototype.isBoolean = function() {
   return this instanceof BooleanMirror;
-}
+};
 
 
 /**
@@ -275,7 +275,7 @@
  */
 Mirror.prototype.isNumber = function() {
   return this instanceof NumberMirror;
-}
+};
 
 
 /**
@@ -284,7 +284,7 @@
  */
 Mirror.prototype.isString = function() {
   return this instanceof StringMirror;
-}
+};
 
 
 /**
@@ -293,7 +293,7 @@
  */
 Mirror.prototype.isObject = function() {
   return this instanceof ObjectMirror;
-}
+};
 
 
 /**
@@ -302,7 +302,7 @@
  */
 Mirror.prototype.isFunction = function() {
   return this instanceof FunctionMirror;
-}
+};
 
 
 /**
@@ -311,7 +311,7 @@
  */
 Mirror.prototype.isUnresolvedFunction = function() {
   return this instanceof UnresolvedFunctionMirror;
-}
+};
 
 
 /**
@@ -320,7 +320,7 @@
  */
 Mirror.prototype.isArray = function() {
   return this instanceof ArrayMirror;
-}
+};
 
 
 /**
@@ -329,7 +329,7 @@
  */
 Mirror.prototype.isDate = function() {
   return this instanceof DateMirror;
-}
+};
 
 
 /**
@@ -338,7 +338,7 @@
  */
 Mirror.prototype.isRegExp = function() {
   return this instanceof RegExpMirror;
-}
+};
 
 
 /**
@@ -347,7 +347,7 @@
  */
 Mirror.prototype.isError = function() {
   return this instanceof ErrorMirror;
-}
+};
 
 
 /**
@@ -356,7 +356,7 @@
  */
 Mirror.prototype.isProperty = function() {
   return this instanceof PropertyMirror;
-}
+};
 
 
 /**
@@ -365,7 +365,7 @@
  */
 Mirror.prototype.isFrame = function() {
   return this instanceof FrameMirror;
-}
+};
 
 
 /**
@@ -374,7 +374,7 @@
  */
 Mirror.prototype.isScript = function() {
   return this instanceof ScriptMirror;
-}
+};
 
 
 /**
@@ -383,7 +383,7 @@
  */
 Mirror.prototype.isContext = function() {
   return this instanceof ContextMirror;
-}
+};
 
 
 /**
@@ -392,7 +392,7 @@
  */
 Mirror.prototype.isScope = function() {
   return this instanceof ScopeMirror;
-}
+};
 
 
 /**
@@ -400,7 +400,7 @@
  */
 Mirror.prototype.allocateHandle_ = function() {
   this.handle_ = next_handle_++;
-}
+};
 
 
 /**
@@ -409,13 +409,13 @@
  */
 Mirror.prototype.allocateTransientHandle_ = function() {
   this.handle_ = next_transient_handle_--;
-}
+};
 
 
 Mirror.prototype.toText = function() {
   // Simpel to text which is used when on specialization in subclass.
   return "#<" + this.constructor.name + ">";
-}
+};
 
 
 /**
@@ -480,7 +480,7 @@
 
 UndefinedMirror.prototype.toText = function() {
   return 'undefined';
-}
+};
 
 
 /**
@@ -496,7 +496,7 @@
 
 NullMirror.prototype.toText = function() {
   return 'null';
-}
+};
 
 
 /**
@@ -513,7 +513,7 @@
 
 BooleanMirror.prototype.toText = function() {
   return this.value_ ? 'true' : 'false';
-}
+};
 
 
 /**
@@ -530,7 +530,7 @@
 
 NumberMirror.prototype.toText = function() {
   return %NumberToString(this.value_);
-}
+};
 
 
 /**
@@ -555,11 +555,11 @@
            '... (length: ' + this.length() + ')';
   }
   return this.value_;
-}
+};
 
 StringMirror.prototype.toText = function() {
   return this.getTruncatedValue(kMaxProtocolStringLength);
-}
+};
 
 
 /**
@@ -898,7 +898,7 @@
 
 FunctionMirror.prototype.toText = function() {
   return this.source();
-}
+};
 
 
 /**
@@ -951,7 +951,7 @@
 
 UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
   return [];
-}
+};
 
 
 /**
@@ -971,7 +971,8 @@
 };
 
 
-ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index, opt_to_index) {
+ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
+                                                            opt_to_index) {
   var from_index = opt_from_index || 0;
   var to_index = opt_to_index || this.length() - 1;
   if (from_index > to_index) return new Array();
@@ -987,7 +988,7 @@
     values[i - from_index] = value;
   }
   return values;
-}
+};
 
 
 /**
@@ -1005,7 +1006,7 @@
 DateMirror.prototype.toText = function() {
   var s = JSON.stringify(this.value_);
   return s.substring(1, s.length - 1);  // cut quotes
-}
+};
 
 
 /**
@@ -1059,7 +1060,7 @@
 RegExpMirror.prototype.toText = function() {
   // Simpel to text which is used when on specialization in subclass.
   return "/" + this.source() + "/";
-}
+};
 
 
 /**
@@ -1087,12 +1088,12 @@
   // Use the same text representation as in messages.js.
   var text;
   try {
-    str = %_CallFunction(this.value_, builtins.errorToString);
+    str = %_CallFunction(this.value_, builtins.ErrorToString);
   } catch (e) {
     str = '#<Error>';
   }
   return str;
-}
+};
 
 
 /**
@@ -1110,7 +1111,7 @@
   this.value_ = details[0];
   this.details_ = details[1];
   if (details.length > 2) {
-    this.exception_ = details[2]
+    this.exception_ = details[2];
     this.getter_ = details[3];
     this.setter_ = details[4];
   }
@@ -1120,22 +1121,22 @@
 
 PropertyMirror.prototype.isReadOnly = function() {
   return (this.attributes() & PropertyAttribute.ReadOnly) != 0;
-}
+};
 
 
 PropertyMirror.prototype.isEnum = function() {
   return (this.attributes() & PropertyAttribute.DontEnum) == 0;
-}
+};
 
 
 PropertyMirror.prototype.canDelete = function() {
   return (this.attributes() & PropertyAttribute.DontDelete) == 0;
-}
+};
 
 
 PropertyMirror.prototype.name = function() {
   return this.name_;
-}
+};
 
 
 PropertyMirror.prototype.isIndexed = function() {
@@ -1145,12 +1146,12 @@
     }
   }
   return true;
-}
+};
 
 
 PropertyMirror.prototype.value = function() {
   return MakeMirror(this.value_, false);
-}
+};
 
 
 /**
@@ -1159,22 +1160,22 @@
  */
 PropertyMirror.prototype.isException = function() {
   return this.exception_ ? true : false;
-}
+};
 
 
 PropertyMirror.prototype.attributes = function() {
   return %DebugPropertyAttributesFromDetails(this.details_);
-}
+};
 
 
 PropertyMirror.prototype.propertyType = function() {
   return %DebugPropertyTypeFromDetails(this.details_);
-}
+};
 
 
 PropertyMirror.prototype.insertionIndex = function() {
   return %DebugPropertyIndexFromDetails(this.details_);
-}
+};
 
 
 /**
@@ -1183,7 +1184,7 @@
  */
 PropertyMirror.prototype.hasGetter = function() {
   return this.getter_ ? true : false;
-}
+};
 
 
 /**
@@ -1192,7 +1193,7 @@
  */
 PropertyMirror.prototype.hasSetter = function() {
   return this.setter_ ? true : false;
-}
+};
 
 
 /**
@@ -1206,7 +1207,7 @@
   } else {
     return GetUndefinedMirror();
   }
-}
+};
 
 
 /**
@@ -1220,7 +1221,7 @@
   } else {
     return GetUndefinedMirror();
   }
-}
+};
 
 
 /**
@@ -1233,27 +1234,27 @@
   return (this.propertyType() == PropertyType.Interceptor) ||
          ((this.propertyType() == PropertyType.Callbacks) &&
           !this.hasGetter() && !this.hasSetter());
-}
+};
 
 
-const kFrameDetailsFrameIdIndex = 0;
-const kFrameDetailsReceiverIndex = 1;
-const kFrameDetailsFunctionIndex = 2;
-const kFrameDetailsArgumentCountIndex = 3;
-const kFrameDetailsLocalCountIndex = 4;
-const kFrameDetailsSourcePositionIndex = 5;
-const kFrameDetailsConstructCallIndex = 6;
-const kFrameDetailsAtReturnIndex = 7;
-const kFrameDetailsFlagsIndex = 8;
-const kFrameDetailsFirstDynamicIndex = 9;
+var kFrameDetailsFrameIdIndex = 0;
+var kFrameDetailsReceiverIndex = 1;
+var kFrameDetailsFunctionIndex = 2;
+var kFrameDetailsArgumentCountIndex = 3;
+var kFrameDetailsLocalCountIndex = 4;
+var kFrameDetailsSourcePositionIndex = 5;
+var kFrameDetailsConstructCallIndex = 6;
+var kFrameDetailsAtReturnIndex = 7;
+var kFrameDetailsFlagsIndex = 8;
+var kFrameDetailsFirstDynamicIndex = 9;
 
-const kFrameDetailsNameIndex = 0;
-const kFrameDetailsValueIndex = 1;
-const kFrameDetailsNameValueSize = 2;
+var kFrameDetailsNameIndex = 0;
+var kFrameDetailsValueIndex = 1;
+var kFrameDetailsNameValueSize = 2;
 
-const kFrameDetailsFlagDebuggerFrameMask = 1 << 0;
-const kFrameDetailsFlagOptimizedFrameMask = 1 << 1;
-const kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2;
+var kFrameDetailsFlagDebuggerFrameMask = 1 << 0;
+var kFrameDetailsFlagOptimizedFrameMask = 1 << 1;
+var kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2;
 
 /**
  * Wrapper for the frame details information retreived from the VM. The frame
@@ -1284,63 +1285,63 @@
 FrameDetails.prototype.frameId = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsFrameIdIndex];
-}
+};
 
 
 FrameDetails.prototype.receiver = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsReceiverIndex];
-}
+};
 
 
 FrameDetails.prototype.func = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsFunctionIndex];
-}
+};
 
 
 FrameDetails.prototype.isConstructCall = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsConstructCallIndex];
-}
+};
 
 
 FrameDetails.prototype.isAtReturn = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsAtReturnIndex];
-}
+};
 
 
 FrameDetails.prototype.isDebuggerFrame = function() {
   %CheckExecutionState(this.break_id_);
   var f = kFrameDetailsFlagDebuggerFrameMask;
   return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-}
+};
 
 
 FrameDetails.prototype.isOptimizedFrame = function() {
   %CheckExecutionState(this.break_id_);
   var f = kFrameDetailsFlagOptimizedFrameMask;
   return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-}
+};
 
 
 FrameDetails.prototype.isInlinedFrame = function() {
   return this.inlinedFrameIndex() > 0;
-}
+};
 
 
 FrameDetails.prototype.inlinedFrameIndex = function() {
   %CheckExecutionState(this.break_id_);
   var f = kFrameDetailsFlagInlinedFrameIndexMask;
-  return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2
-}
+  return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2;
+};
 
 
 FrameDetails.prototype.argumentCount = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsArgumentCountIndex];
-}
+};
 
 
 FrameDetails.prototype.argumentName = function(index) {
@@ -1348,9 +1349,9 @@
   if (index >= 0 && index < this.argumentCount()) {
     return this.details_[kFrameDetailsFirstDynamicIndex +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsNameIndex]
+                         kFrameDetailsNameIndex];
   }
-}
+};
 
 
 FrameDetails.prototype.argumentValue = function(index) {
@@ -1358,45 +1359,45 @@
   if (index >= 0 && index < this.argumentCount()) {
     return this.details_[kFrameDetailsFirstDynamicIndex +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsValueIndex]
+                         kFrameDetailsValueIndex];
   }
-}
+};
 
 
 FrameDetails.prototype.localCount = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsLocalCountIndex];
-}
+};
 
 
 FrameDetails.prototype.sourcePosition = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsSourcePositionIndex];
-}
+};
 
 
 FrameDetails.prototype.localName = function(index) {
   %CheckExecutionState(this.break_id_);
   if (index >= 0 && index < this.localCount()) {
     var locals_offset = kFrameDetailsFirstDynamicIndex +
-                        this.argumentCount() * kFrameDetailsNameValueSize
+                        this.argumentCount() * kFrameDetailsNameValueSize;
     return this.details_[locals_offset +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsNameIndex]
+                         kFrameDetailsNameIndex];
   }
-}
+};
 
 
 FrameDetails.prototype.localValue = function(index) {
   %CheckExecutionState(this.break_id_);
   if (index >= 0 && index < this.localCount()) {
     var locals_offset = kFrameDetailsFirstDynamicIndex +
-                        this.argumentCount() * kFrameDetailsNameValueSize
+                        this.argumentCount() * kFrameDetailsNameValueSize;
     return this.details_[locals_offset +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsValueIndex]
+                         kFrameDetailsValueIndex];
   }
-}
+};
 
 
 FrameDetails.prototype.returnValue = function() {
@@ -1407,12 +1408,12 @@
   if (this.details_[kFrameDetailsAtReturnIndex]) {
     return this.details_[return_value_offset];
   }
-}
+};
 
 
 FrameDetails.prototype.scopeCount = function() {
   return %GetScopeCount(this.break_id_, this.frameId());
-}
+};
 
 
 /**
@@ -1575,7 +1576,8 @@
 };
 
 
-FrameMirror.prototype.evaluate = function(source, disable_break, opt_context_object) {
+FrameMirror.prototype.evaluate = function(source, disable_break,
+                                          opt_context_object) {
   var result = %DebugEvaluate(this.break_id_,
                               this.details_.frameId(),
                               this.details_.inlinedFrameIndex(),
@@ -1599,7 +1601,8 @@
     result += '[debugger]';
   } else {
     // If the receiver has a className which is 'global' don't display it.
-    var display_receiver = !receiver.className || receiver.className() != 'global';
+    var display_receiver =
+      !receiver.className || (receiver.className() != 'global');
     if (display_receiver) {
       result += receiver.toText();
     }
@@ -1661,7 +1664,7 @@
   }
 
   return result;
-}
+};
 
 
 FrameMirror.prototype.sourceAndPositionText = function() {
@@ -1693,13 +1696,13 @@
   }
 
   return result;
-}
+};
 
 
 FrameMirror.prototype.localsText = function() {
   // Format local variables.
   var result = '';
-  var locals_count = this.localCount()
+  var locals_count = this.localCount();
   if (locals_count > 0) {
     for (var i = 0; i < locals_count; ++i) {
       result += '      var ';
@@ -1711,7 +1714,7 @@
   }
 
   return result;
-}
+};
 
 
 FrameMirror.prototype.toText = function(opt_locals) {
@@ -1726,11 +1729,11 @@
     result += this.localsText();
   }
   return result;
-}
+};
 
 
-const kScopeDetailsTypeIndex = 0;
-const kScopeDetailsObjectIndex = 1;
+var kScopeDetailsTypeIndex = 0;
+var kScopeDetailsObjectIndex = 1;
 
 function ScopeDetails(frame, index) {
   this.break_id_ = frame.break_id_;
@@ -1744,13 +1747,13 @@
 ScopeDetails.prototype.type = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kScopeDetailsTypeIndex];
-}
+};
 
 
 ScopeDetails.prototype.object = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kScopeDetailsObjectIndex];
-}
+};
 
 
 /**
@@ -1829,6 +1832,11 @@
 };
 
 
+ScriptMirror.prototype.setSource = function(source) {
+  %DebugSetScriptSource(this.script_, source);
+};
+
+
 ScriptMirror.prototype.lineOffset = function() {
   return this.script_.line_offset;
 };
@@ -1862,12 +1870,12 @@
 ScriptMirror.prototype.locationFromPosition = function(
     position, include_resource_offset) {
   return this.script_.locationFromPosition(position, include_resource_offset);
-}
+};
 
 
 ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
   return this.script_.sourceSlice(opt_from_line, opt_to_line);
-}
+};
 
 
 ScriptMirror.prototype.context = function() {
@@ -1907,7 +1915,7 @@
   }
   result += ')';
   return result;
-}
+};
 
 
 /**
@@ -1965,7 +1973,7 @@
  */
 JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
   return this.serialize_(mirror, true, true);
-}
+};
 
 
 /**
@@ -1978,7 +1986,7 @@
 JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
   var json = this.serialize_(mirror, false, true);
   return json;
-}
+};
 
 
 /**
@@ -2000,17 +2008,17 @@
   }
 
   return content;
-}
+};
 
 
 JSONProtocolSerializer.prototype.includeSource_ = function() {
   return this.options_ && this.options_.includeSource;
-}
+};
 
 
 JSONProtocolSerializer.prototype.inlineRefs_ = function() {
   return this.options_ && this.options_.inlineRefs;
-}
+};
 
 
 JSONProtocolSerializer.prototype.maxStringLength_ = function() {
@@ -2019,7 +2027,7 @@
     return kMaxProtocolStringLength;
   }
   return this.options_.maxStringLength;
-}
+};
 
 
 JSONProtocolSerializer.prototype.add_ = function(mirror) {
@@ -2032,7 +2040,7 @@
 
   // Add the mirror to the list of mirrors to be serialized.
   this.mirrors_.push(mirror);
-}
+};
 
 
 /**
@@ -2139,7 +2147,7 @@
       break;
 
     case PROPERTY_TYPE:
-      throw new Error('PropertyMirror cannot be serialized independeltly')
+      throw new Error('PropertyMirror cannot be serialized independeltly');
       break;
 
     case FRAME_TYPE:
@@ -2179,7 +2187,7 @@
           mirror.evalFromScript()) {
         content.evalFromScript =
             this.serializeReference(mirror.evalFromScript());
-        var evalFromLocation = mirror.evalFromLocation()
+        var evalFromLocation = mirror.evalFromLocation();
         if (evalFromLocation) {
           content.evalFromLocation = { line: evalFromLocation.line,
                                        column: evalFromLocation.column };
@@ -2203,7 +2211,7 @@
 
   // Create and return the JSON string.
   return content;
-}
+};
 
 
 /**
@@ -2278,7 +2286,7 @@
     }
   }
   content.properties = p;
-}
+};
 
 
 /**
@@ -2342,7 +2350,7 @@
     result.ref = propertyValue.handle();
   }
   return result;
-}
+};
 
 
 JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
@@ -2362,7 +2370,7 @@
   var x = new Array(mirror.argumentCount());
   for (var i = 0; i < mirror.argumentCount(); i++) {
     var arg = {};
-    var argument_name = mirror.argumentName(i)
+    var argument_name = mirror.argumentName(i);
     if (argument_name) {
       arg.name = argument_name;
     }
@@ -2392,7 +2400,7 @@
       index: i
     });
   }
-}
+};
 
 
 JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
@@ -2402,7 +2410,7 @@
   content.object = this.inlineRefs_() ?
                    this.serializeValue(mirror.scopeObject()) :
                    this.serializeReference(mirror.scopeObject());
-}
+};
 
 
 /**
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index a791dbb..d1620bf 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -109,7 +109,7 @@
       if (j != 0) {
         fprintf(fp, ",");
       }
-      fprintf(fp, "%d", at(j));
+      fprintf(fp, "%u", static_cast<unsigned char>(at(j)));
     }
   }
   char at(int i) { return data_[i]; }
@@ -312,7 +312,7 @@
   }
   // If we don't do this then we end up with a stray root pointing at the
   // context even after we have disposed of the context.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
   i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
   context.Dispose();
   CppByteSink sink(argv[1]);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index e9ca6c0..8eefb23 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -94,6 +94,9 @@
     case BYTE_ARRAY_TYPE:
       ByteArray::cast(this)->ByteArrayVerify();
       break;
+    case FREE_SPACE_TYPE:
+      FreeSpace::cast(this)->FreeSpaceVerify();
+      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
       break;
@@ -135,6 +138,9 @@
     case JS_VALUE_TYPE:
       JSValue::cast(this)->JSValueVerify();
       break;
+    case JS_DATE_TYPE:
+      JSDate::cast(this)->JSDateVerify();
+      break;
     case JS_FUNCTION_TYPE:
       JSFunction::cast(this)->JSFunctionVerify();
       break;
@@ -153,6 +159,12 @@
     case JS_ARRAY_TYPE:
       JSArray::cast(this)->JSArrayVerify();
       break;
+    case JS_SET_TYPE:
+      JSSet::cast(this)->JSSetVerify();
+      break;
+    case JS_MAP_TYPE:
+      JSMap::cast(this)->JSMapVerify();
+      break;
     case JS_WEAK_MAP_TYPE:
       JSWeakMap::cast(this)->JSWeakMapVerify();
       break;
@@ -207,6 +219,11 @@
 }
 
 
+void FreeSpace::FreeSpaceVerify() {
+  ASSERT(IsFreeSpace());
+}
+
+
 void ExternalPixelArray::ExternalPixelArrayVerify() {
   ASSERT(IsExternalPixelArray());
 }
@@ -255,12 +272,20 @@
 void JSObject::JSObjectVerify() {
   VerifyHeapPointer(properties());
   VerifyHeapPointer(elements());
+
+  if (GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
+    ASSERT(this->elements()->IsFixedArray());
+    ASSERT(this->elements()->length() >= 2);
+  }
+
   if (HasFastProperties()) {
     CHECK_EQ(map()->unused_property_fields(),
              (map()->inobject_properties() + properties()->length() -
               map()->NextFreePropertyIndex()));
   }
-  ASSERT_EQ(map()->has_fast_elements(),
+  ASSERT_EQ((map()->has_fast_elements() ||
+             map()->has_fast_smi_only_elements() ||
+             (elements() == GetHeap()->empty_fixed_array())),
             (elements()->map() == GetHeap()->fixed_array_map() ||
              elements()->map() == GetHeap()->fixed_cow_array_map()));
   ASSERT(map()->has_fast_elements() == HasFastElements());
@@ -304,6 +329,18 @@
 }
 
 
+void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
+  VerifyObjectField(kIcTotalCountOffset);
+  VerifyObjectField(kIcWithTypeinfoCountOffset);
+  VerifyHeapPointer(type_feedback_cells());
+}
+
+
+void AliasedArgumentsEntry::AliasedArgumentsEntryVerify() {
+  VerifySmiField(kAliasedContextSlot);
+}
+
+
 void FixedArray::FixedArrayVerify() {
   for (int i = 0; i < length(); i++) {
     Object* e = get(i);
@@ -322,7 +359,8 @@
       double value = get_scalar(i);
       ASSERT(!isnan(value) ||
              (BitCast<uint64_t>(value) ==
-              BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())));
+              BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
+             ((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
     }
   }
 }
@@ -336,6 +374,53 @@
 }
 
 
+void JSDate::JSDateVerify() {
+  if (value()->IsHeapObject()) {
+    VerifyHeapPointer(value());
+  }
+  CHECK(value()->IsUndefined() || value()->IsSmi() || value()->IsHeapNumber());
+  CHECK(year()->IsUndefined() || year()->IsSmi() || year()->IsNaN());
+  CHECK(month()->IsUndefined() || month()->IsSmi() || month()->IsNaN());
+  CHECK(day()->IsUndefined() || day()->IsSmi() || day()->IsNaN());
+  CHECK(weekday()->IsUndefined() || weekday()->IsSmi() || weekday()->IsNaN());
+  CHECK(hour()->IsUndefined() || hour()->IsSmi() || hour()->IsNaN());
+  CHECK(min()->IsUndefined() || min()->IsSmi() || min()->IsNaN());
+  CHECK(sec()->IsUndefined() || sec()->IsSmi() || sec()->IsNaN());
+  CHECK(cache_stamp()->IsUndefined() ||
+        cache_stamp()->IsSmi() ||
+        cache_stamp()->IsNaN());
+
+  if (month()->IsSmi()) {
+    int month = Smi::cast(this->month())->value();
+    CHECK(0 <= month && month <= 11);
+  }
+  if (day()->IsSmi()) {
+    int day = Smi::cast(this->day())->value();
+    CHECK(1 <= day && day <= 31);
+  }
+  if (hour()->IsSmi()) {
+    int hour = Smi::cast(this->hour())->value();
+    CHECK(0 <= hour && hour <= 23);
+  }
+  if (min()->IsSmi()) {
+    int min = Smi::cast(this->min())->value();
+    CHECK(0 <= min && min <= 59);
+  }
+  if (sec()->IsSmi()) {
+    int sec = Smi::cast(this->sec())->value();
+    CHECK(0 <= sec && sec <= 59);
+  }
+  if (weekday()->IsSmi()) {
+    int weekday = Smi::cast(this->weekday())->value();
+    CHECK(0 <= weekday && weekday <= 6);
+  }
+  if (cache_stamp()->IsSmi()) {
+    CHECK(Smi::cast(cache_stamp())->value() <=
+          Smi::cast(Isolate::Current()->date_cache()->stamp())->value());
+  }
+}
+
+
 void JSMessageObject::JSMessageObjectVerify() {
   CHECK(IsJSMessageObject());
   CHECK(type()->IsString());
@@ -367,7 +452,7 @@
   CHECK(this->first()->IsString());
   CHECK(this->second() == GetHeap()->empty_string() ||
         this->second()->IsString());
-  CHECK(this->length() >= String::kMinNonFlatLength);
+  CHECK(this->length() >= ConsString::kMinLength);
   if (this->IsFlat()) {
     // A flat cons can only be created by String::SlowTryFlatten.
     // Afterwards, the first part may be externalized.
@@ -387,6 +472,7 @@
   CHECK(IsJSFunction());
   VerifyObjectField(kPrototypeOrInitialMapOffset);
   VerifyObjectField(kNextFunctionLinkOffset);
+  CHECK(code()->IsCode());
   CHECK(next_function_link()->IsUndefined() ||
         next_function_link()->IsJSFunction());
 }
@@ -463,6 +549,7 @@
 void Code::CodeVerify() {
   CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
                   kCodeAlignment));
+  relocation_info()->Verify();
   Address last_gc_pc = NULL;
   for (RelocIterator it(this); !it.done(); it.next()) {
     it.rinfo()->Verify();
@@ -484,11 +571,27 @@
 }
 
 
+void JSSet::JSSetVerify() {
+  CHECK(IsJSSet());
+  JSObjectVerify();
+  VerifyHeapPointer(table());
+  ASSERT(table()->IsHashTable() || table()->IsUndefined());
+}
+
+
+void JSMap::JSMapVerify() {
+  CHECK(IsJSMap());
+  JSObjectVerify();
+  VerifyHeapPointer(table());
+  ASSERT(table()->IsHashTable() || table()->IsUndefined());
+}
+
+
 void JSWeakMap::JSWeakMapVerify() {
   CHECK(IsJSWeakMap());
   JSObjectVerify();
   VerifyHeapPointer(table());
-  ASSERT(table()->IsHashTable());
+  ASSERT(table()->IsHashTable() || table()->IsUndefined());
 }
 
 
@@ -535,13 +638,14 @@
 
 
 void JSProxy::JSProxyVerify() {
-  ASSERT(IsJSProxy());
+  CHECK(IsJSProxy());
   VerifyPointer(handler());
+  ASSERT(hash()->IsSmi() || hash()->IsUndefined());
 }
 
 
 void JSFunctionProxy::JSFunctionProxyVerify() {
-  ASSERT(IsJSFunctionProxy());
+  CHECK(IsJSFunctionProxy());
   JSProxyVerify();
   VerifyPointer(call_trap());
   VerifyPointer(construct_trap());
@@ -563,6 +667,13 @@
 }
 
 
+void AccessorPair::AccessorPairVerify() {
+  CHECK(IsAccessorPair());
+  VerifyPointer(getter());
+  VerifyPointer(setter());
+}
+
+
 void AccessCheckInfo::AccessCheckInfoVerify() {
   CHECK(IsAccessCheckInfo());
   VerifyPointer(named_callback());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index e7b6a34..78578cc 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,7 +43,10 @@
 #include "isolate.h"
 #include "property.h"
 #include "spaces.h"
+#include "store-buffer.h"
 #include "v8memory.h"
+#include "factory.h"
+#include "incremental-marking.h"
 
 namespace v8 {
 namespace internal {
@@ -64,6 +67,13 @@
 }
 
 
+#define TYPE_CHECKER(type, instancetype)                                \
+  bool Object::Is##type() {                                             \
+  return Object::IsHeapObject() &&                                      \
+      HeapObject::cast(this)->map()->instance_type() == instancetype;   \
+  }
+
+
 #define CAST_ACCESSOR(type)                     \
   type* type::cast(Object* object) {            \
     ASSERT(object->Is##type());                 \
@@ -80,19 +90,19 @@
   type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
   void holder::set_##name(type* value, WriteBarrierMode mode) {         \
     WRITE_FIELD(this, offset, value);                                   \
-    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);           \
+    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);    \
   }
 
 
-// GC-safe accessors do not use HeapObject::GetHeap(), but access TLS instead.
-#define ACCESSORS_GCSAFE(holder, name, type, offset)                    \
-  type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
-  void holder::set_##name(type* value, WriteBarrierMode mode) {         \
+// Getter that returns a tagged Smi and setter that writes a tagged Smi.
+#define ACCESSORS_TO_SMI(holder, name, offset)                          \
+  Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); }   \
+  void holder::set_##name(Smi* value, WriteBarrierMode mode) {          \
     WRITE_FIELD(this, offset, value);                                   \
-    CONDITIONAL_WRITE_BARRIER(HEAP, this, offset, mode);                \
   }
 
 
+// Getter that returns a Smi as an int and writes an int as a Smi.
 #define SMI_ACCESSORS(holder, name, offset)             \
   int holder::name() {                                  \
     Object* value = READ_FIELD(this, offset);           \
@@ -118,6 +128,23 @@
   }
 
 
+bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+                                         ElementsKind to_kind) {
+  if (to_kind == FAST_ELEMENTS) {
+    return from_kind == FAST_SMI_ONLY_ELEMENTS ||
+        from_kind == FAST_DOUBLE_ELEMENTS;
+  } else {
+    return to_kind == FAST_DOUBLE_ELEMENTS &&
+        from_kind == FAST_SMI_ONLY_ELEMENTS;
+  }
+}
+
+
+bool Object::IsFixedArrayBase() {
+  return IsFixedArray() || IsFixedDoubleArray();
+}
+
+
 bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
   // There is a constraint on the object; check.
   if (!this->IsJSObject()) return false;
@@ -147,12 +174,15 @@
 }
 
 
-bool Object::IsHeapNumber() {
-  return Object::IsHeapObject()
-    && HeapObject::cast(this)->map()->instance_type() == HEAP_NUMBER_TYPE;
+bool Object::NonFailureIsHeapObject() {
+  ASSERT(!this->IsFailure());
+  return (reinterpret_cast<intptr_t>(this) & kSmiTagMask) != 0;
 }
 
 
+TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
+
+
 bool Object::IsString() {
   return Object::IsHeapObject()
     && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
@@ -165,6 +195,13 @@
 }
 
 
+bool Object::IsSpecFunction() {
+  if (!Object::IsHeapObject()) return false;
+  InstanceType type = HeapObject::cast(this)->map()->instance_type();
+  return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
+}
+
+
 bool Object::IsSymbol() {
   if (!this->IsHeapObject()) return false;
   uint32_t type = HeapObject::cast(this)->map()->instance_type();
@@ -396,17 +433,18 @@
 }
 
 
-bool Object::IsByteArray() {
-  return Object::IsHeapObject()
-    && HeapObject::cast(this)->map()->instance_type() == BYTE_ARRAY_TYPE;
+TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
+TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
+
+
+bool Object::IsFiller() {
+  if (!Object::IsHeapObject()) return false;
+  InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
+  return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
 }
 
 
-bool Object::IsExternalPixelArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-          EXTERNAL_PIXEL_ARRAY_TYPE;
-}
+TYPE_CHECKER(ExternalPixelArray, EXTERNAL_PIXEL_ARRAY_TYPE)
 
 
 bool Object::IsExternalArray() {
@@ -419,60 +457,14 @@
 }
 
 
-bool Object::IsExternalByteArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_BYTE_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedByteArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalShortArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_SHORT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedShortArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalIntArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_INT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedIntArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_UNSIGNED_INT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalFloatArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_FLOAT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalDoubleArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_DOUBLE_ARRAY_TYPE;
-}
+TYPE_CHECKER(ExternalByteArray, EXTERNAL_BYTE_ARRAY_TYPE)
+TYPE_CHECKER(ExternalUnsignedByteArray, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
+TYPE_CHECKER(ExternalShortArray, EXTERNAL_SHORT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalUnsignedShortArray, EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalIntArray, EXTERNAL_INT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalUnsignedIntArray, EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalFloatArray, EXTERNAL_FLOAT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalDoubleArray, EXTERNAL_DOUBLE_ARRAY_TYPE)
 
 
 bool MaybeObject::IsFailure() {
@@ -509,59 +501,34 @@
 
 
 bool Object::IsJSReceiver() {
+  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   return IsHeapObject() &&
       HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
 }
 
 
 bool Object::IsJSObject() {
-  return IsJSReceiver() && !IsJSProxy();
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+  return IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
 }
 
 
 bool Object::IsJSProxy() {
-  return Object::IsHeapObject() &&
-     (HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE ||
-      HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE);
+  if (!Object::IsHeapObject()) return false;
+  InstanceType type = HeapObject::cast(this)->map()->instance_type();
+  return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
 }
 
 
-bool Object::IsJSFunctionProxy() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE;
-}
-
-
-bool Object::IsJSWeakMap() {
-  return Object::IsJSObject() &&
-      HeapObject::cast(this)->map()->instance_type() == JS_WEAK_MAP_TYPE;
-}
-
-
-bool Object::IsJSContextExtensionObject() {
-  return IsHeapObject()
-      && (HeapObject::cast(this)->map()->instance_type() ==
-          JS_CONTEXT_EXTENSION_OBJECT_TYPE);
-}
-
-
-bool Object::IsMap() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == MAP_TYPE;
-}
-
-
-bool Object::IsFixedArray() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == FIXED_ARRAY_TYPE;
-}
-
-
-bool Object::IsFixedDoubleArray() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() ==
-          FIXED_DOUBLE_ARRAY_TYPE;
-}
+TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
+TYPE_CHECKER(JSSet, JS_SET_TYPE)
+TYPE_CHECKER(JSMap, JS_MAP_TYPE)
+TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
+TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
+TYPE_CHECKER(Map, MAP_TYPE)
+TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
 
 
 bool Object::IsDescriptorArray() {
@@ -596,6 +563,16 @@
 }
 
 
+bool Object::IsTypeFeedbackCells() {
+  if (!IsFixedArray()) return false;
+  // There's actually no way to see the difference between a fixed array and
+  // a cache cells array.  Since this is used for asserts we can check that
+  // the length is plausible though.
+  if (FixedArray::cast(this)->length() % 2 != 0) return false;
+  return true;
+}
+
+
 bool Object::IsContext() {
   if (Object::IsHeapObject()) {
     Map* map = HeapObject::cast(this)->map();
@@ -617,17 +594,14 @@
 }
 
 
-bool Object::IsSerializedScopeInfo() {
+bool Object::IsScopeInfo() {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
-      HeapObject::cast(this)->GetHeap()->serialized_scope_info_map();
+      HeapObject::cast(this)->GetHeap()->scope_info_map();
 }
 
 
-bool Object::IsJSFunction() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_TYPE;
-}
+TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
 
 
 template <> inline bool Is<JSFunction>(Object* obj) {
@@ -635,44 +609,13 @@
 }
 
 
-bool Object::IsCode() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == CODE_TYPE;
-}
-
-
-bool Object::IsOddball() {
-  ASSERT(HEAP->is_safe_to_read_maps());
-  return Object::IsHeapObject()
-    && HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
-}
-
-
-bool Object::IsJSGlobalPropertyCell() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type()
-      == JS_GLOBAL_PROPERTY_CELL_TYPE;
-}
-
-
-bool Object::IsSharedFunctionInfo() {
-  return Object::IsHeapObject() &&
-      (HeapObject::cast(this)->map()->instance_type() ==
-       SHARED_FUNCTION_INFO_TYPE);
-}
-
-
-bool Object::IsJSValue() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == JS_VALUE_TYPE;
-}
-
-
-bool Object::IsJSMessageObject() {
-  return Object::IsHeapObject()
-      && (HeapObject::cast(this)->map()->instance_type() ==
-          JS_MESSAGE_OBJECT_TYPE);
-}
+TYPE_CHECKER(Code, CODE_TYPE)
+TYPE_CHECKER(Oddball, ODDBALL_TYPE)
+TYPE_CHECKER(JSGlobalPropertyCell, JS_GLOBAL_PROPERTY_CELL_TYPE)
+TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
+TYPE_CHECKER(JSDate, JS_DATE_TYPE)
+TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
 
 
 bool Object::IsStringWrapper() {
@@ -680,10 +623,7 @@
 }
 
 
-bool Object::IsForeign() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == FOREIGN_TYPE;
-}
+TYPE_CHECKER(Foreign, FOREIGN_TYPE)
 
 
 bool Object::IsBoolean() {
@@ -692,16 +632,8 @@
 }
 
 
-bool Object::IsJSArray() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == JS_ARRAY_TYPE;
-}
-
-
-bool Object::IsJSRegExp() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == JS_REGEXP_TYPE;
-}
+TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
+TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
 
 
 template <> inline bool Is<JSArray>(Object* obj) {
@@ -738,7 +670,10 @@
     return false;
   }
 #ifdef DEBUG
-  reinterpret_cast<JSFunctionResultCache*>(this)->JSFunctionResultCacheVerify();
+  if (FLAG_verify_heap) {
+    reinterpret_cast<JSFunctionResultCache*>(this)->
+        JSFunctionResultCacheVerify();
+  }
 #endif
   return true;
 }
@@ -750,7 +685,9 @@
     return false;
   }
 #ifdef DEBUG
-  reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
+  if (FLAG_verify_heap) {
+    reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
+  }
 #endif
   return true;
 }
@@ -799,18 +736,8 @@
 }
 
 
-bool Object::IsJSGlobalObject() {
-  return IsHeapObject() &&
-      (HeapObject::cast(this)->map()->instance_type() ==
-       JS_GLOBAL_OBJECT_TYPE);
-}
-
-
-bool Object::IsJSBuiltinsObject() {
-  return IsHeapObject() &&
-      (HeapObject::cast(this)->map()->instance_type() ==
-       JS_BUILTINS_OBJECT_TYPE);
-}
+TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
+TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE)
 
 
 bool Object::IsUndetectableObject() {
@@ -883,6 +810,11 @@
 }
 
 
+bool Object::IsNaN() {
+  return this->IsHeapNumber() && isnan(HeapNumber::cast(this)->value());
+}
+
+
 MaybeObject* Object::ToSmi() {
   if (IsSmi()) return this;
   if (IsHeapNumber()) {
@@ -939,21 +871,20 @@
 #define WRITE_FIELD(p, offset, value) \
   (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
 
-// TODO(isolates): Pass heap in to these macros.
-#define WRITE_BARRIER(object, offset) \
-  object->GetHeap()->RecordWrite(object->address(), offset);
+#define WRITE_BARRIER(heap, object, offset, value)                      \
+  heap->incremental_marking()->RecordWrite(                             \
+      object, HeapObject::RawField(object, offset), value);             \
+  if (heap->InNewSpace(value)) {                                        \
+    heap->RecordWrite(object->address(), offset);                       \
+  }
 
-// CONDITIONAL_WRITE_BARRIER must be issued after the actual
-// write due to the assert validating the written value.
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, mode) \
-  if (mode == UPDATE_WRITE_BARRIER) { \
-    heap->RecordWrite(object->address(), offset); \
-  } else { \
-    ASSERT(mode == SKIP_WRITE_BARRIER); \
-    ASSERT(heap->InNewSpace(object) || \
-           !heap->InNewSpace(READ_FIELD(object, offset)) || \
-           Page::FromAddress(object->address())->           \
-               IsRegionDirty(object->address() + offset));  \
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode)    \
+  if (mode == UPDATE_WRITE_BARRIER) {                                   \
+    heap->incremental_marking()->RecordWrite(                           \
+      object, HeapObject::RawField(object, offset), value);             \
+    if (heap->InNewSpace(value)) {                                      \
+      heap->RecordWrite(object->address(), offset);                     \
+    }                                                                   \
   }
 
 #ifndef V8_TARGET_ARCH_MIPS
@@ -974,7 +905,6 @@
   #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
 #endif  // V8_TARGET_ARCH_MIPS
 
-
 #ifndef V8_TARGET_ARCH_MIPS
   #define WRITE_DOUBLE_FIELD(p, offset, value) \
     (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
@@ -1014,6 +944,12 @@
 #define WRITE_UINT32_FIELD(p, offset, value) \
   (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
 
+#define READ_INT64_FIELD(p, offset) \
+  (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INT64_FIELD(p, offset, value) \
+  (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
+
 #define READ_SHORT_FIELD(p, offset) \
   (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
 
@@ -1169,91 +1105,6 @@
 }
 
 
-bool MapWord::IsMarked() {
-  return (value_ & kMarkingMask) == 0;
-}
-
-
-void MapWord::SetMark() {
-  value_ &= ~kMarkingMask;
-}
-
-
-void MapWord::ClearMark() {
-  value_ |= kMarkingMask;
-}
-
-
-bool MapWord::IsOverflowed() {
-  return (value_ & kOverflowMask) != 0;
-}
-
-
-void MapWord::SetOverflow() {
-  value_ |= kOverflowMask;
-}
-
-
-void MapWord::ClearOverflow() {
-  value_ &= ~kOverflowMask;
-}
-
-
-MapWord MapWord::EncodeAddress(Address map_address, int offset) {
-  // Offset is the distance in live bytes from the first live object in the
-  // same page. The offset between two objects in the same page should not
-  // exceed the object area size of a page.
-  ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
-
-  uintptr_t compact_offset = offset >> kObjectAlignmentBits;
-  ASSERT(compact_offset < (1 << kForwardingOffsetBits));
-
-  Page* map_page = Page::FromAddress(map_address);
-  ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
-
-  uintptr_t map_page_offset =
-      map_page->Offset(map_address) >> kMapAlignmentBits;
-
-  uintptr_t encoding =
-      (compact_offset << kForwardingOffsetShift) |
-      (map_page_offset << kMapPageOffsetShift) |
-      (map_page->mc_page_index << kMapPageIndexShift);
-  return MapWord(encoding);
-}
-
-
-Address MapWord::DecodeMapAddress(MapSpace* map_space) {
-  int map_page_index =
-      static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift);
-  ASSERT_MAP_PAGE_INDEX(map_page_index);
-
-  int map_page_offset = static_cast<int>(
-      ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
-      kMapAlignmentBits);
-
-  return (map_space->PageAddress(map_page_index) + map_page_offset);
-}
-
-
-int MapWord::DecodeOffset() {
-  // The offset field is represented in the kForwardingOffsetBits
-  // most-significant bits.
-  uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
-  ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize));
-  return static_cast<int>(offset);
-}
-
-
-MapWord MapWord::FromEncodedAddress(Address address) {
-  return MapWord(reinterpret_cast<uintptr_t>(address));
-}
-
-
-Address MapWord::ToEncodedAddress() {
-  return reinterpret_cast<Address>(value_);
-}
-
-
 #ifdef DEBUG
 void HeapObject::VerifyObjectField(int offset) {
   VerifyPointer(READ_FIELD(this, offset));
@@ -1266,12 +1117,11 @@
 
 
 Heap* HeapObject::GetHeap() {
-  // During GC, the map pointer in HeapObject is used in various ways that
-  // prevent us from retrieving Heap from the map.
-  // Assert that we are not in GC, implement GC code in a way that it doesn't
-  // pull heap from the map.
-  ASSERT(HEAP->is_safe_to_read_maps());
-  return map()->heap();
+  Heap* heap =
+      MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
+  ASSERT(heap != NULL);
+  ASSERT(heap->isolate() == Isolate::Current());
+  return heap;
 }
 
 
@@ -1287,6 +1137,17 @@
 
 void HeapObject::set_map(Map* value) {
   set_map_word(MapWord::FromMap(value));
+  if (value != NULL) {
+    // TODO(1600) We are passing NULL as a slot because maps can never be on
+    // evacuation candidate.
+    value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
+  }
+}
+
+
+// Unsafe accessor omitting write barrier.
+void HeapObject::set_map_no_write_barrier(Map* value) {
+  set_map_word(MapWord::FromMap(value));
 }
 
 
@@ -1329,47 +1190,6 @@
 }
 
 
-bool HeapObject::IsMarked() {
-  return map_word().IsMarked();
-}
-
-
-void HeapObject::SetMark() {
-  ASSERT(!IsMarked());
-  MapWord first_word = map_word();
-  first_word.SetMark();
-  set_map_word(first_word);
-}
-
-
-void HeapObject::ClearMark() {
-  ASSERT(IsMarked());
-  MapWord first_word = map_word();
-  first_word.ClearMark();
-  set_map_word(first_word);
-}
-
-
-bool HeapObject::IsOverflowed() {
-  return map_word().IsOverflowed();
-}
-
-
-void HeapObject::SetOverflow() {
-  MapWord first_word = map_word();
-  first_word.SetOverflow();
-  set_map_word(first_word);
-}
-
-
-void HeapObject::ClearOverflow() {
-  ASSERT(IsOverflowed());
-  MapWord first_word = map_word();
-  first_word.ClearOverflow();
-  set_map_word(first_word);
-}
-
-
 double HeapNumber::value() {
   return READ_DOUBLE_FIELD(this, kValueOffset);
 }
@@ -1394,22 +1214,165 @@
 ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
 
 
+Object** FixedArray::GetFirstElementAddress() {
+  return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
+}
+
+
+bool FixedArray::ContainsOnlySmisOrHoles() {
+  Object* the_hole = GetHeap()->the_hole_value();
+  Object** current = GetFirstElementAddress();
+  for (int i = 0; i < length(); ++i) {
+    Object* candidate = *current++;
+    if (!candidate->IsSmi() && candidate != the_hole) return false;
+  }
+  return true;
+}
+
+
 FixedArrayBase* JSObject::elements() {
   Object* array = READ_FIELD(this, kElementsOffset);
-  ASSERT(array->HasValidElements());
   return static_cast<FixedArrayBase*>(array);
 }
 
+void JSObject::ValidateSmiOnlyElements() {
+#if DEBUG
+  if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
+    Heap* heap = GetHeap();
+    // Don't use elements, since integrity checks will fail if there
+    // are filler pointers in the array.
+    FixedArray* fixed_array =
+        reinterpret_cast<FixedArray*>(READ_FIELD(this, kElementsOffset));
+    Map* map = fixed_array->map();
+    // Arrays that have been shifted in place can't be verified.
+    if (map != heap->raw_unchecked_one_pointer_filler_map() &&
+        map != heap->raw_unchecked_two_pointer_filler_map() &&
+        map != heap->free_space_map()) {
+      for (int i = 0; i < fixed_array->length(); i++) {
+        Object* current = fixed_array->get(i);
+        ASSERT(current->IsSmi() || current->IsTheHole());
+      }
+    }
+  }
+#endif
+}
+
+
+MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
+#if DEBUG
+  ValidateSmiOnlyElements();
+#endif
+  if ((map()->elements_kind() != FAST_ELEMENTS)) {
+    return TransitionElementsKind(FAST_ELEMENTS);
+  }
+  return this;
+}
+
+
+MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
+                                                uint32_t count,
+                                                EnsureElementsMode mode) {
+  ElementsKind current_kind = map()->elements_kind();
+  ElementsKind target_kind = current_kind;
+  ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
+  if (current_kind == FAST_ELEMENTS) return this;
+
+  Heap* heap = GetHeap();
+  Object* the_hole = heap->the_hole_value();
+  Object* heap_number_map = heap->heap_number_map();
+  for (uint32_t i = 0; i < count; ++i) {
+    Object* current = *objects++;
+    if (!current->IsSmi() && current != the_hole) {
+      if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS &&
+          HeapObject::cast(current)->map() == heap_number_map) {
+        target_kind = FAST_DOUBLE_ELEMENTS;
+      } else {
+        target_kind = FAST_ELEMENTS;
+        break;
+      }
+    }
+  }
+
+  if (target_kind != current_kind) {
+    return TransitionElementsKind(target_kind);
+  }
+  return this;
+}
+
+
+MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
+                                                EnsureElementsMode mode) {
+  if (elements->map() != GetHeap()->fixed_double_array_map()) {
+    ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
+           elements->map() == GetHeap()->fixed_cow_array_map());
+    if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
+      mode = DONT_ALLOW_DOUBLE_ELEMENTS;
+    }
+    Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
+    return EnsureCanContainElements(objects, elements->length(), mode);
+  }
+
+  ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
+  if (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
+    return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
+  }
+
+  return this;
+}
+
+
+MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate,
+                                                ElementsKind to_kind) {
+  Map* current_map = map();
+  ElementsKind from_kind = current_map->elements_kind();
+
+  if (from_kind == to_kind) return current_map;
+
+  Context* global_context = isolate->context()->global_context();
+  if (current_map == global_context->smi_js_array_map()) {
+    if (to_kind == FAST_ELEMENTS) {
+      return global_context->object_js_array_map();
+    } else {
+      if (to_kind == FAST_DOUBLE_ELEMENTS) {
+        return global_context->double_js_array_map();
+      } else {
+        ASSERT(to_kind == DICTIONARY_ELEMENTS);
+      }
+    }
+  }
+  return GetElementsTransitionMapSlow(to_kind);
+}
+
+
+void JSObject::set_map_and_elements(Map* new_map,
+                                    FixedArrayBase* value,
+                                    WriteBarrierMode mode) {
+  ASSERT(value->HasValidElements());
+#ifdef DEBUG
+  ValidateSmiOnlyElements();
+#endif
+  if (new_map != NULL) {
+    if (mode == UPDATE_WRITE_BARRIER) {
+      set_map(new_map);
+    } else {
+      ASSERT(mode == SKIP_WRITE_BARRIER);
+      set_map_no_write_barrier(new_map);
+    }
+  }
+  ASSERT((map()->has_fast_elements() ||
+          map()->has_fast_smi_only_elements() ||
+          (value == GetHeap()->empty_fixed_array())) ==
+         (value->map() == GetHeap()->fixed_array_map() ||
+          value->map() == GetHeap()->fixed_cow_array_map()));
+  ASSERT((value == GetHeap()->empty_fixed_array()) ||
+         (map()->has_fast_double_elements() == value->IsFixedDoubleArray()));
+  WRITE_FIELD(this, kElementsOffset, value);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
+}
+
 
 void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
-  ASSERT(map()->has_fast_elements() ==
-         (value->map() == GetHeap()->fixed_array_map() ||
-          value->map() == GetHeap()->fixed_cow_array_map()));
-  ASSERT(map()->has_fast_double_elements() ==
-         value->IsFixedDoubleArray());
-  ASSERT(value->HasValidElements());
-  WRITE_FIELD(this, kElementsOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
+  set_map_and_elements(NULL, value, mode);
 }
 
 
@@ -1420,7 +1383,7 @@
 
 
 void JSObject::initialize_elements() {
-  ASSERT(map()->has_fast_elements());
+  ASSERT(map()->has_fast_elements() || map()->has_fast_smi_only_elements());
   ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
   WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
 }
@@ -1428,9 +1391,12 @@
 
 MaybeObject* JSObject::ResetElements() {
   Object* obj;
-  { MaybeObject* maybe_obj = map()->GetFastElementsMap();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
+  ElementsKind elements_kind = FLAG_smi_only_arrays
+      ? FAST_SMI_ONLY_ELEMENTS
+      : FAST_ELEMENTS;
+  MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
+                                                    elements_kind);
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   set_map(Map::cast(obj));
   initialize_elements();
   return this;
@@ -1442,12 +1408,12 @@
 
 
 byte Oddball::kind() {
-  return READ_BYTE_FIELD(this, kKindOffset);
+  return Smi::cast(READ_FIELD(this, kKindOffset))->value();
 }
 
 
 void Oddball::set_kind(byte value) {
-  WRITE_BYTE_FIELD(this, kKindOffset, value);
+  WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
 }
 
 
@@ -1480,12 +1446,14 @@
       return JSFunction::kSize;
     case JS_VALUE_TYPE:
       return JSValue::kSize;
+    case JS_DATE_TYPE:
+      return JSDate::kSize;
     case JS_ARRAY_TYPE:
-      return JSValue::kSize;
+      return JSArray::kSize;
     case JS_WEAK_MAP_TYPE:
       return JSWeakMap::kSize;
     case JS_REGEXP_TYPE:
-      return JSValue::kSize;
+      return JSRegExp::kSize;
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
       return JSObject::kHeaderSize;
     case JS_MESSAGE_OBJECT_TYPE:
@@ -1528,7 +1496,17 @@
   // to adjust the index here.
   int offset = GetHeaderSize() + (kPointerSize * index);
   WRITE_FIELD(this, offset, value);
-  WRITE_BARRIER(this, offset);
+  WRITE_BARRIER(GetHeap(), this, offset, value);
+}
+
+
+void JSObject::SetInternalField(int index, Smi* value) {
+  ASSERT(index < GetInternalFieldCount() && index >= 0);
+  // Internal objects do follow immediately after the header, whereas in-object
+  // properties are at the end of the object. Therefore there is no need
+  // to adjust the index here.
+  int offset = GetHeaderSize() + (kPointerSize * index);
+  WRITE_FIELD(this, offset, value);
 }
 
 
@@ -1554,7 +1532,7 @@
   if (index < 0) {
     int offset = map()->instance_size() + (index * kPointerSize);
     WRITE_FIELD(this, offset, value);
-    WRITE_BARRIER(this, offset);
+    WRITE_BARRIER(GetHeap(), this, offset, value);
   } else {
     ASSERT(index < properties()->length());
     properties()->set(index, value);
@@ -1588,16 +1566,32 @@
   ASSERT(index < 0);
   int offset = map()->instance_size() + (index * kPointerSize);
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
   return value;
 }
 
 
 
-void JSObject::InitializeBody(int object_size, Object* value) {
-  ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
-  for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
-    WRITE_FIELD(this, offset, value);
+void JSObject::InitializeBody(Map* map,
+                              Object* pre_allocated_value,
+                              Object* filler_value) {
+  ASSERT(!filler_value->IsHeapObject() ||
+         !GetHeap()->InNewSpace(filler_value));
+  ASSERT(!pre_allocated_value->IsHeapObject() ||
+         !GetHeap()->InNewSpace(pre_allocated_value));
+  int size = map->instance_size();
+  int offset = kHeaderSize;
+  if (filler_value != pre_allocated_value) {
+    int pre_allocated = map->pre_allocated_property_fields();
+    ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size);
+    for (int i = 0; i < pre_allocated; i++) {
+      WRITE_FIELD(this, offset, pre_allocated_value);
+      offset += kPointerSize;
+    }
+  }
+  while (offset < size) {
+    WRITE_FIELD(this, offset, filler_value);
+    offset += kPointerSize;
   }
 }
 
@@ -1683,7 +1677,7 @@
   ASSERT(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  WRITE_BARRIER(this, offset);
+  WRITE_BARRIER(GetHeap(), this, offset, value);
 }
 
 
@@ -1713,6 +1707,12 @@
   return result;
 }
 
+int64_t FixedDoubleArray::get_representation(int index) {
+  ASSERT(map() != HEAP->fixed_cow_array_map() &&
+         map() != HEAP->fixed_array_map());
+  ASSERT(index >= 0 && index < this->length());
+  return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
+}
 
 MaybeObject* FixedDoubleArray::get(int index) {
   if (is_the_hole(index)) {
@@ -1746,67 +1746,10 @@
 }
 
 
-void FixedDoubleArray::Initialize(FixedDoubleArray* from) {
-  int old_length = from->length();
-  ASSERT(old_length < length());
-  if (old_length * kDoubleSize >= OS::kMinComplexMemCopy) {
-    OS::MemCopy(FIELD_ADDR(this, kHeaderSize),
-                FIELD_ADDR(from, kHeaderSize),
-                old_length * kDoubleSize);
-  } else {
-    for (int i = 0; i < old_length; ++i) {
-      if (from->is_the_hole(i)) {
-        set_the_hole(i);
-      } else {
-        set(i, from->get_scalar(i));
-      }
-    }
-  }
-  int offset = kHeaderSize + old_length * kDoubleSize;
-  for (int current = from->length(); current < length(); ++current) {
-    WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
-    offset += kDoubleSize;
-  }
-}
-
-
-void FixedDoubleArray::Initialize(FixedArray* from) {
-  int old_length = from->length();
-  ASSERT(old_length < length());
-  for (int i = 0; i < old_length; i++) {
-    Object* hole_or_object = from->get(i);
-    if (hole_or_object->IsTheHole()) {
-      set_the_hole(i);
-    } else {
-      set(i, hole_or_object->Number());
-    }
-  }
-  int offset = kHeaderSize + old_length * kDoubleSize;
-  for (int current = from->length(); current < length(); ++current) {
-    WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
-    offset += kDoubleSize;
-  }
-}
-
-
-void FixedDoubleArray::Initialize(SeededNumberDictionary* from) {
-  int offset = kHeaderSize;
-  for (int current = 0; current < length(); ++current) {
-    WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
-    offset += kDoubleSize;
-  }
-  for (int i = 0; i < from->Capacity(); i++) {
-    Object* key = from->KeyAt(i);
-    if (key->IsNumber()) {
-      uint32_t entry = static_cast<uint32_t>(key->Number());
-      set(entry, from->ValueAt(i)->Number());
-    }
-  }
-}
-
-
 WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
-  if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
+  Heap* heap = GetHeap();
+  if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
+  if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
   return UPDATE_WRITE_BARRIER;
 }
 
@@ -1818,11 +1761,27 @@
   ASSERT(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
 }
 
 
-void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
+void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
+                                              int index,
+                                              Object* value) {
+  ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
+  ASSERT(index >= 0 && index < array->length());
+  int offset = kHeaderSize + index * kPointerSize;
+  WRITE_FIELD(array, offset, value);
+  Heap* heap = array->GetHeap();
+  if (heap->InNewSpace(value)) {
+    heap->RecordWrite(array->address(), offset);
+  }
+}
+
+
+void FixedArray::NoWriteBarrierSet(FixedArray* array,
+                                   int index,
+                                   Object* value) {
   ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
   ASSERT(index >= 0 && index < array->length());
   ASSERT(!HEAP->InNewSpace(value));
@@ -1879,7 +1838,7 @@
                                WriteBarrierMode mode) {
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(heap, this, offset, mode);
+  CONDITIONAL_WRITE_BARRIER(heap, this, offset, value, mode);
 }
 
 
@@ -1914,10 +1873,12 @@
 }
 
 
-void DescriptorArray::fast_swap(FixedArray* array, int first, int second) {
+void DescriptorArray::NoIncrementalWriteBarrierSwap(FixedArray* array,
+                                                    int first,
+                                                    int second) {
   Object* tmp = array->get(first);
-  fast_set(array, first, array->get(second));
-  fast_set(array, second, tmp);
+  NoIncrementalWriteBarrierSet(array, first, array->get(second));
+  NoIncrementalWriteBarrierSet(array, second, tmp);
 }
 
 
@@ -1992,19 +1953,38 @@
 AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
   ASSERT(GetType(descriptor_number) == CALLBACKS);
   Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
-  return reinterpret_cast<AccessorDescriptor*>(p->address());
+  return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
 }
 
 
 bool DescriptorArray::IsProperty(int descriptor_number) {
-  return GetType(descriptor_number) < FIRST_PHANTOM_PROPERTY_TYPE;
+  Entry entry(this, descriptor_number);
+  return IsPropertyDescriptor(&entry);
 }
 
 
-bool DescriptorArray::IsTransition(int descriptor_number) {
-  PropertyType t = GetType(descriptor_number);
-  return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
-      t == ELEMENTS_TRANSITION;
+bool DescriptorArray::IsTransitionOnly(int descriptor_number) {
+  switch (GetType(descriptor_number)) {
+    case MAP_TRANSITION:
+    case CONSTANT_TRANSITION:
+    case ELEMENTS_TRANSITION:
+      return true;
+    case CALLBACKS: {
+      Object* value = GetValue(descriptor_number);
+      if (!value->IsAccessorPair()) return false;
+      AccessorPair* accessors = AccessorPair::cast(value);
+      return accessors->getter()->IsMap() && accessors->setter()->IsMap();
+    }
+    case NORMAL:
+    case FIELD:
+    case CONSTANT_FUNCTION:
+    case HANDLER:
+    case INTERCEPTOR:
+    case NULL_DESCRIPTOR:
+      return false;
+  }
+  UNREACHABLE();  // Keep the compiler happy.
+  return false;
 }
 
 
@@ -2025,34 +2005,50 @@
 }
 
 
-void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
+void DescriptorArray::Set(int descriptor_number,
+                          Descriptor* desc,
+                          const WhitenessWitness&) {
   // Range check.
   ASSERT(descriptor_number < number_of_descriptors());
 
-  // Make sure none of the elements in desc are in new space.
-  ASSERT(!HEAP->InNewSpace(desc->GetKey()));
-  ASSERT(!HEAP->InNewSpace(desc->GetValue()));
-
-  fast_set(this, ToKeyIndex(descriptor_number), desc->GetKey());
+  NoIncrementalWriteBarrierSet(this,
+                               ToKeyIndex(descriptor_number),
+                               desc->GetKey());
   FixedArray* content_array = GetContentArray();
-  fast_set(content_array, ToValueIndex(descriptor_number), desc->GetValue());
-  fast_set(content_array, ToDetailsIndex(descriptor_number),
-           desc->GetDetails().AsSmi());
+  NoIncrementalWriteBarrierSet(content_array,
+                               ToValueIndex(descriptor_number),
+                               desc->GetValue());
+  NoIncrementalWriteBarrierSet(content_array,
+                               ToDetailsIndex(descriptor_number),
+                               desc->GetDetails().AsSmi());
 }
 
 
-void DescriptorArray::CopyFrom(int index, DescriptorArray* src, int src_index) {
-  Descriptor desc;
-  src->Get(src_index, &desc);
-  Set(index, &desc);
+void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors(
+    int first, int second) {
+  NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
+  FixedArray* content_array = GetContentArray();
+  NoIncrementalWriteBarrierSwap(content_array,
+                                ToValueIndex(first),
+                                ToValueIndex(second));
+  NoIncrementalWriteBarrierSwap(content_array,
+                                ToDetailsIndex(first),
+                                ToDetailsIndex(second));
 }
 
 
-void DescriptorArray::Swap(int first, int second) {
-  fast_swap(this, ToKeyIndex(first), ToKeyIndex(second));
-  FixedArray* content_array = GetContentArray();
-  fast_swap(content_array, ToValueIndex(first), ToValueIndex(second));
-  fast_swap(content_array, ToDetailsIndex(first),  ToDetailsIndex(second));
+DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
+    : marking_(array->GetHeap()->incremental_marking()) {
+  marking_->EnterNoMarkingScope();
+  if (array->number_of_descriptors() > 0) {
+    ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
+    ASSERT(Marking::Color(array->GetContentArray()) == Marking::WHITE_OBJECT);
+  }
+}
+
+
+DescriptorArray::WhitenessWitness::~WhitenessWitness() {
+  marking_->LeaveNoMarkingScope();
 }
 
 
@@ -2084,7 +2080,7 @@
     Object* element = KeyAt(entry);
     // Empty entry.
     if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
-    if (element != isolate->heap()->raw_unchecked_null_value() &&
+    if (element != isolate->heap()->raw_unchecked_the_hole_value() &&
         Shape::IsMatch(key, element)) return entry;
     entry = NextProbe(entry, count++, capacity);
   }
@@ -2121,9 +2117,11 @@
 CAST_ACCESSOR(DescriptorArray)
 CAST_ACCESSOR(DeoptimizationInputData)
 CAST_ACCESSOR(DeoptimizationOutputData)
+CAST_ACCESSOR(TypeFeedbackCells)
 CAST_ACCESSOR(SymbolTable)
 CAST_ACCESSOR(JSFunctionResultCache)
 CAST_ACCESSOR(NormalizedMapCache)
+CAST_ACCESSOR(ScopeInfo)
 CAST_ACCESSOR(CompilationCacheTable)
 CAST_ACCESSOR(CodeCacheHashTable)
 CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
@@ -2156,9 +2154,12 @@
 CAST_ACCESSOR(JSRegExp)
 CAST_ACCESSOR(JSProxy)
 CAST_ACCESSOR(JSFunctionProxy)
+CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSMap)
 CAST_ACCESSOR(JSWeakMap)
 CAST_ACCESSOR(Foreign)
 CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(FreeSpace)
 CAST_ACCESSOR(ExternalArray)
 CAST_ACCESSOR(ExternalByteArray)
 CAST_ACCESSOR(ExternalUnsignedByteArray)
@@ -2185,6 +2186,7 @@
 
 
 SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
 
 SMI_ACCESSORS(String, length, kLengthOffset)
 
@@ -2341,7 +2343,7 @@
 
 
 void SlicedString::set_parent(String* parent) {
-  ASSERT(parent->IsSeqString());
+  ASSERT(parent->IsSeqString() || parent->IsExternalString());
   WRITE_FIELD(this, kParentOffset, parent);
 }
 
@@ -2361,7 +2363,7 @@
 
 void ConsString::set_first(String* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kFirstOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode);
 }
 
 
@@ -2377,29 +2379,83 @@
 
 void ConsString::set_second(String* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kSecondOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
 }
 
 
-ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+bool ExternalString::is_short() {
+  InstanceType type = map()->instance_type();
+  return (type & kShortExternalStringMask) == kShortExternalStringTag;
+}
+
+
+const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
   return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
 }
 
 
+void ExternalAsciiString::update_data_cache() {
+  if (is_short()) return;
+  const char** data_field =
+      reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
+  *data_field = resource()->data();
+}
+
+
 void ExternalAsciiString::set_resource(
-    ExternalAsciiString::Resource* resource) {
-  *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+    const ExternalAsciiString::Resource* resource) {
+  *reinterpret_cast<const Resource**>(
+      FIELD_ADDR(this, kResourceOffset)) = resource;
+  if (resource != NULL) update_data_cache();
 }
 
 
-ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
+const char* ExternalAsciiString::GetChars() {
+  return resource()->data();
+}
+
+
+uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return GetChars()[index];
+}
+
+
+const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
   return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
 }
 
 
+void ExternalTwoByteString::update_data_cache() {
+  if (is_short()) return;
+  const uint16_t** data_field =
+      reinterpret_cast<const uint16_t**>(FIELD_ADDR(this, kResourceDataOffset));
+  *data_field = resource()->data();
+}
+
+
 void ExternalTwoByteString::set_resource(
-    ExternalTwoByteString::Resource* resource) {
-  *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+    const ExternalTwoByteString::Resource* resource) {
+  *reinterpret_cast<const Resource**>(
+      FIELD_ADDR(this, kResourceOffset)) = resource;
+  if (resource != NULL) update_data_cache();
+}
+
+
+const uint16_t* ExternalTwoByteString::GetChars() {
+  return resource()->data();
+}
+
+
+uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return GetChars()[index];
+}
+
+
+const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
+      unsigned start) {
+  return GetChars() + start;
 }
 
 
@@ -2699,6 +2755,9 @@
   if (instance_type == BYTE_ARRAY_TYPE) {
     return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
   }
+  if (instance_type == FREE_SPACE_TYPE) {
+    return reinterpret_cast<FreeSpace*>(this)->size();
+  }
   if (instance_type == STRING_TYPE) {
     return SeqTwoByteString::SizeFor(
         reinterpret_cast<SeqTwoByteString*>(this)->length());
@@ -2860,12 +2919,6 @@
 }
 
 
-FixedArray* Map::unchecked_prototype_transitions() {
-  return reinterpret_cast<FixedArray*>(
-      READ_FIELD(this, kPrototypeTransitionsOffset));
-}
-
-
 Code::Flags Code::flags() {
   return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
 }
@@ -2937,6 +2990,19 @@
 }
 
 
+bool Code::is_pregenerated() {
+  return kind() == STUB && IsPregeneratedField::decode(flags());
+}
+
+
+void Code::set_is_pregenerated(bool value) {
+  ASSERT(kind() == STUB);
+  Flags f = flags();
+  f = static_cast<Flags>(IsPregeneratedField::update(f, value));
+  set_flags(f);
+}
+
+
 bool Code::optimizable() {
   ASSERT(kind() == FUNCTION);
   return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
@@ -2979,6 +3045,36 @@
 }
 
 
+bool Code::is_compiled_optimizable() {
+  ASSERT(kind() == FUNCTION);
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  return FullCodeFlagsIsCompiledOptimizable::decode(flags);
+}
+
+
+void Code::set_compiled_optimizable(bool value) {
+  ASSERT(kind() == FUNCTION);
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  flags = FullCodeFlagsIsCompiledOptimizable::update(flags, value);
+  WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
+}
+
+
+bool Code::has_self_optimization_header() {
+  ASSERT(kind() == FUNCTION);
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  return FullCodeFlagsHasSelfOptimizationHeader::decode(flags);
+}
+
+
+void Code::set_self_optimization_header(bool value) {
+  ASSERT(kind() == FUNCTION);
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  flags = FullCodeFlagsHasSelfOptimizationHeader::update(flags, value);
+  WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
+}
+
+
 int Code::allow_osr_at_loop_nesting_level() {
   ASSERT(kind() == FUNCTION);
   return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
@@ -3102,6 +3198,19 @@
   WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
 }
 
+
+bool Code::has_function_cache() {
+  ASSERT(kind() == STUB);
+  return READ_BYTE_FIELD(this, kHasFunctionCacheOffset) != 0;
+}
+
+
+void Code::set_has_function_cache(bool flag) {
+  ASSERT(kind() == STUB);
+  WRITE_BYTE_FIELD(this, kHasFunctionCacheOffset, flag);
+}
+
+
 bool Code::is_inline_cache_stub() {
   Kind kind = this->kind();
   return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
@@ -3187,48 +3296,6 @@
 }
 
 
-Isolate* Map::isolate() {
-  return heap()->isolate();
-}
-
-
-Heap* Map::heap() {
-  // NOTE: address() helper is not used to save one instruction.
-  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
-  ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
-  return heap;
-}
-
-
-Heap* Code::heap() {
-  // NOTE: address() helper is not used to save one instruction.
-  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
-  ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
-  return heap;
-}
-
-
-Isolate* Code::isolate() {
-  return heap()->isolate();
-}
-
-
-Heap* JSGlobalPropertyCell::heap() {
-  // NOTE: address() helper is not used to save one instruction.
-  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
-  ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
-  return heap;
-}
-
-
-Isolate* JSGlobalPropertyCell::isolate() {
-  return heap()->isolate();
-}
-
-
 Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
   return HeapObject::
       FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
@@ -3243,53 +3310,14 @@
 void Map::set_prototype(Object* value, WriteBarrierMode mode) {
   ASSERT(value->IsNull() || value->IsJSReceiver());
   WRITE_FIELD(this, kPrototypeOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
-}
-
-
-MaybeObject* Map::GetFastElementsMap() {
-  if (has_fast_elements()) return this;
-  Object* obj;
-  { MaybeObject* maybe_obj = CopyDropTransitions();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  Map* new_map = Map::cast(obj);
-  new_map->set_elements_kind(FAST_ELEMENTS);
-  isolate()->counters()->map_to_fast_elements()->Increment();
-  return new_map;
-}
-
-
-MaybeObject* Map::GetFastDoubleElementsMap() {
-  if (has_fast_double_elements()) return this;
-  Object* obj;
-  { MaybeObject* maybe_obj = CopyDropTransitions();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  Map* new_map = Map::cast(obj);
-  new_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
-  isolate()->counters()->map_to_fast_double_elements()->Increment();
-  return new_map;
-}
-
-
-MaybeObject* Map::GetSlowElementsMap() {
-  if (!has_fast_elements() && !has_fast_double_elements()) return this;
-  Object* obj;
-  { MaybeObject* maybe_obj = CopyDropTransitions();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  Map* new_map = Map::cast(obj);
-  new_map->set_elements_kind(DICTIONARY_ELEMENTS);
-  isolate()->counters()->map_to_slow_elements()->Increment();
-  return new_map;
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
 }
 
 
 DescriptorArray* Map::instance_descriptors() {
   Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
   if (object->IsSmi()) {
-    return HEAP->empty_descriptor_array();
+    return GetHeap()->empty_descriptor_array();
   } else {
     return DescriptorArray::cast(object);
   }
@@ -3317,7 +3345,8 @@
                                    WriteBarrierMode mode) {
   Object* object = READ_FIELD(this,
                               kInstanceDescriptorsOrBitField3Offset);
-  if (value == isolate()->heap()->empty_descriptor_array()) {
+  Heap* heap = GetHeap();
+  if (value == heap->empty_descriptor_array()) {
     clear_instance_descriptors();
     return;
   } else {
@@ -3330,10 +3359,8 @@
   }
   ASSERT(!is_shared());
   WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(),
-                            this,
-                            kInstanceDescriptorsOrBitField3Offset,
-                            mode);
+  CONDITIONAL_WRITE_BARRIER(
+      heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
 }
 
 
@@ -3362,14 +3389,22 @@
 }
 
 
+FixedArray* Map::unchecked_prototype_transitions() {
+  return reinterpret_cast<FixedArray*>(
+      READ_FIELD(this, kPrototypeTransitionsOffset));
+}
+
+
 ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
 ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
 ACCESSORS(Map, constructor, Object, kConstructorOffset)
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
-ACCESSORS_GCSAFE(JSFunction, next_function_link, Object,
-                 kNextFunctionLinkOffset)
+ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
+ACCESSORS(JSFunction,
+          next_function_link,
+          Object,
+          kNextFunctionLinkOffset)
 
 ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
 ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -3381,7 +3416,10 @@
 ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
 ACCESSORS(AccessorInfo, data, Object, kDataOffset)
 ACCESSORS(AccessorInfo, name, Object, kNameOffset)
-ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
+ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
+
+ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
+ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
 
 ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
 ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@@ -3419,7 +3457,7 @@
           kInstanceCallHandlerOffset)
 ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
           kAccessCheckInfoOffset)
-ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
+ACCESSORS_TO_SMI(FunctionTemplateInfo, flag, kFlagOffset)
 
 ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
 ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
@@ -3433,17 +3471,18 @@
 ACCESSORS(Script, source, Object, kSourceOffset)
 ACCESSORS(Script, name, Object, kNameOffset)
 ACCESSORS(Script, id, Object, kIdOffset)
-ACCESSORS(Script, line_offset, Smi, kLineOffsetOffset)
-ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
+ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
+ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
 ACCESSORS(Script, data, Object, kDataOffset)
 ACCESSORS(Script, context_data, Object, kContextOffset)
 ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
-ACCESSORS(Script, type, Smi, kTypeOffset)
-ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
+ACCESSORS_TO_SMI(Script, type, kTypeOffset)
+ACCESSORS_TO_SMI(Script, compilation_type, kCompilationTypeOffset)
+ACCESSORS_TO_SMI(Script, compilation_state, kCompilationStateOffset)
 ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
 ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
-ACCESSORS(Script, eval_from_instructions_offset, Smi,
-          kEvalFrominstructionsOffsetOffset)
+ACCESSORS_TO_SMI(Script, eval_from_instructions_offset,
+                 kEvalFrominstructionsOffsetOffset)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
@@ -3451,15 +3490,15 @@
 ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex)
 ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
 
-ACCESSORS(BreakPointInfo, code_position, Smi, kCodePositionIndex)
-ACCESSORS(BreakPointInfo, source_position, Smi, kSourcePositionIndex)
-ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex)
+ACCESSORS_TO_SMI(BreakPointInfo, code_position, kCodePositionIndex)
+ACCESSORS_TO_SMI(BreakPointInfo, source_position, kSourcePositionIndex)
+ACCESSORS_TO_SMI(BreakPointInfo, statement_position, kStatementPositionIndex)
 ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
 #endif
 
 ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS_GCSAFE(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS_GCSAFE(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
+ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
           kInstanceClassNameOffset)
 ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -3469,6 +3508,8 @@
 ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
           kThisPropertyAssignmentsOffset)
 
+SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset)
+
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
                kHiddenPrototypeBit)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
@@ -3515,6 +3556,8 @@
 SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
               kThisPropertyAssignmentsCountOffset)
 SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
+SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
+SMI_ACCESSORS(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
 #else
 
 #define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset)             \
@@ -3565,6 +3608,9 @@
                         this_property_assignments_count,
                         kThisPropertyAssignmentsCountOffset)
 PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
+
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
 #endif
 
 
@@ -3586,7 +3632,7 @@
 
 
 bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
-  return initial_map() != HEAP->undefined_value();
+  return initial_map() != GetHeap()->undefined_value();
 }
 
 
@@ -3608,14 +3654,49 @@
 }
 
 
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, strict_mode,
-               kStrictModeFunction)
+LanguageMode SharedFunctionInfo::language_mode() {
+  int hints = compiler_hints();
+  if (BooleanBit::get(hints, kExtendedModeFunction)) {
+    ASSERT(BooleanBit::get(hints, kStrictModeFunction));
+    return EXTENDED_MODE;
+  }
+  return BooleanBit::get(hints, kStrictModeFunction)
+      ? STRICT_MODE : CLASSIC_MODE;
+}
+
+
+void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
+  // We only allow language mode transitions that go set the same language mode
+  // again or go up in the chain:
+  //   CLASSIC_MODE -> STRICT_MODE -> EXTENDED_MODE.
+  ASSERT(this->language_mode() == CLASSIC_MODE ||
+         this->language_mode() == language_mode ||
+         language_mode == EXTENDED_MODE);
+  int hints = compiler_hints();
+  hints = BooleanBit::set(
+      hints, kStrictModeFunction, language_mode != CLASSIC_MODE);
+  hints = BooleanBit::set(
+      hints, kExtendedModeFunction, language_mode == EXTENDED_MODE);
+  set_compiler_hints(hints);
+}
+
+
+bool SharedFunctionInfo::is_classic_mode() {
+  return !BooleanBit::get(compiler_hints(), kStrictModeFunction);
+}
+
+BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
+            kExtendedModeFunction)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
                name_should_print_as_anonymous,
                kNameShouldPrintAsAnonymous)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
+               kDontOptimize)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
 
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -3665,30 +3746,23 @@
 
 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kCodeOffset, value);
-  ASSERT(!Isolate::Current()->heap()->InNewSpace(value));
+  CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
 }
 
 
-SerializedScopeInfo* SharedFunctionInfo::scope_info() {
-  return reinterpret_cast<SerializedScopeInfo*>(
-      READ_FIELD(this, kScopeInfoOffset));
+ScopeInfo* SharedFunctionInfo::scope_info() {
+  return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
 }
 
 
-void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
+void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
                                         WriteBarrierMode mode) {
   WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset, mode);
-}
-
-
-Smi* SharedFunctionInfo::deopt_counter() {
-  return reinterpret_cast<Smi*>(READ_FIELD(this, kDeoptCounterOffset));
-}
-
-
-void SharedFunctionInfo::set_deopt_counter(Smi* value) {
-  WRITE_FIELD(this, kDeoptCounterOffset, value);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(),
+                            this,
+                            kScopeInfoOffset,
+                            reinterpret_cast<Object*>(value),
+                            mode);
 }
 
 
@@ -3726,8 +3800,8 @@
 
 
 void SharedFunctionInfo::set_code_age(int code_age) {
-  set_compiler_hints(compiler_hints() |
-                     ((code_age & kCodeAgeMask) << kCodeAgeShift));
+  int hints = compiler_hints() & ~(kCodeAgeMask << kCodeAgeShift);
+  set_compiler_hints(hints | ((code_age & kCodeAgeMask) << kCodeAgeShift));
 }
 
 
@@ -3775,10 +3849,13 @@
 
 
 void JSFunction::set_code(Code* value) {
-  // Skip the write barrier because code is never in new space.
   ASSERT(!HEAP->InNewSpace(value));
   Address entry = value->entry();
   WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
+  GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
+      this,
+      HeapObject::RawField(this, kCodeEntryOffset),
+      value);
 }
 
 
@@ -3818,7 +3895,7 @@
 void JSFunction::set_context(Object* value) {
   ASSERT(value->IsUndefined() || value->IsContext());
   WRITE_FIELD(this, kContextOffset, value);
-  WRITE_BARRIER(this, kContextOffset);
+  WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
 }
 
 ACCESSORS(JSFunction, prototype_or_initial_map, Object,
@@ -3835,6 +3912,40 @@
 }
 
 
+MaybeObject* JSFunction::set_initial_map_and_cache_transitions(
+    Map* initial_map) {
+  Context* global_context = context()->global_context();
+  Object* array_function =
+      global_context->get(Context::ARRAY_FUNCTION_INDEX);
+  if (array_function->IsJSFunction() &&
+      this == JSFunction::cast(array_function)) {
+    ASSERT(initial_map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+
+    MaybeObject* maybe_map = initial_map->CopyDropTransitions();
+    Map* new_double_map = NULL;
+    if (!maybe_map->To<Map>(&new_double_map)) return maybe_map;
+    new_double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
+    maybe_map = initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS,
+                                                   new_double_map);
+    if (maybe_map->IsFailure()) return maybe_map;
+
+    maybe_map = new_double_map->CopyDropTransitions();
+    Map* new_object_map = NULL;
+    if (!maybe_map->To<Map>(&new_object_map)) return maybe_map;
+    new_object_map->set_elements_kind(FAST_ELEMENTS);
+    maybe_map = new_double_map->AddElementsTransition(FAST_ELEMENTS,
+                                                      new_object_map);
+    if (maybe_map->IsFailure()) return maybe_map;
+
+    global_context->set_smi_js_array_map(initial_map);
+    global_context->set_double_js_array_map(new_double_map);
+    global_context->set_object_js_array_map(new_object_map);
+  }
+  set_initial_map(initial_map);
+  return this;
+}
+
+
 bool JSFunction::has_initial_map() {
   return prototype_or_initial_map()->IsMap();
 }
@@ -3877,7 +3988,36 @@
 }
 
 
+FixedArray* JSFunction::literals() {
+  ASSERT(!shared()->bound());
+  return literals_or_bindings();
+}
+
+
+void JSFunction::set_literals(FixedArray* literals) {
+  ASSERT(!shared()->bound());
+  set_literals_or_bindings(literals);
+}
+
+
+FixedArray* JSFunction::function_bindings() {
+  ASSERT(shared()->bound());
+  return literals_or_bindings();
+}
+
+
+void JSFunction::set_function_bindings(FixedArray* bindings) {
+  ASSERT(shared()->bound());
+  // Bound function literal may be initialized to the empty fixed array
+  // before the bindings are set.
+  ASSERT(bindings == GetHeap()->empty_fixed_array() ||
+         bindings->map() == GetHeap()->fixed_cow_array_map());
+  set_literals_or_bindings(bindings);
+}
+
+
 int JSFunction::NumberOfLiterals() {
+  ASSERT(!shared()->bound());
   return literals()->length();
 }
 
@@ -3892,7 +4032,7 @@
                                               Object* value) {
   ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
   WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
-  WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
+  WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value);
 }
 
 
@@ -3911,6 +4051,7 @@
 
 
 ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+ACCESSORS(JSProxy, hash, Object, kHashOffset)
 ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset)
 ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
 
@@ -3923,22 +4064,19 @@
 }
 
 
-ACCESSORS(JSWeakMap, table, ObjectHashTable, kTableOffset)
-ACCESSORS_GCSAFE(JSWeakMap, next, Object, kNextOffset)
+ACCESSORS(JSSet, table, Object, kTableOffset)
+ACCESSORS(JSMap, table, Object, kTableOffset)
+ACCESSORS(JSWeakMap, table, Object, kTableOffset)
+ACCESSORS(JSWeakMap, next, Object, kNextOffset)
 
 
-ObjectHashTable* JSWeakMap::unchecked_table() {
-  return reinterpret_cast<ObjectHashTable*>(READ_FIELD(this, kTableOffset));
+Address Foreign::foreign_address() {
+  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset));
 }
 
 
-Address Foreign::address() {
-  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kAddressOffset));
-}
-
-
-void Foreign::set_address(Address value) {
-  WRITE_INTPTR_FIELD(this, kAddressOffset, OffsetFrom(value));
+void Foreign::set_foreign_address(Address value) {
+  WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
 }
 
 
@@ -3952,6 +4090,24 @@
 }
 
 
+ACCESSORS(JSDate, value, Object, kValueOffset)
+ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
+ACCESSORS(JSDate, year, Object, kYearOffset)
+ACCESSORS(JSDate, month, Object, kMonthOffset)
+ACCESSORS(JSDate, day, Object, kDayOffset)
+ACCESSORS(JSDate, weekday, Object, kWeekdayOffset)
+ACCESSORS(JSDate, hour, Object, kHourOffset)
+ACCESSORS(JSDate, min, Object, kMinOffset)
+ACCESSORS(JSDate, sec, Object, kSecOffset)
+
+
+JSDate* JSDate::cast(Object* obj) {
+  ASSERT(obj->IsJSDate());
+  ASSERT(HeapObject::cast(obj)->Size() == JSDate::kSize);
+  return reinterpret_cast<JSDate*>(obj);
+}
+
+
 ACCESSORS(JSMessageObject, type, String, kTypeOffset)
 ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
 ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
@@ -3970,10 +4126,11 @@
 
 INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
 ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
+ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
 ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-ACCESSORS(Code, next_code_flushing_candidate,
-          Object, kNextCodeFlushingCandidateOffset)
-
+ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset)
+ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
+INT_ACCESSORS(Code, ic_age, kICAgeOffset)
 
 byte* Code::instruction_start()  {
   return FIELD_ADDR(this, kHeaderSize);
@@ -4016,9 +4173,8 @@
 }
 
 
-bool Code::contains(byte* pc) {
-  return (instruction_start() <= pc) &&
-      (pc <= instruction_start() + instruction_size());
+bool Code::contains(byte* inner_pointer) {
+  return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
 }
 
 
@@ -4097,6 +4253,7 @@
   if (value->IsSmi()) {
     fa->set_unchecked(index, Smi::cast(value));
   } else {
+    // We only do this during GC, so we don't need to notify the write barrier.
     fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
   }
 }
@@ -4104,15 +4261,23 @@
 
 ElementsKind JSObject::GetElementsKind() {
   ElementsKind kind = map()->elements_kind();
-  ASSERT((kind == FAST_ELEMENTS &&
-          (elements()->map() == GetHeap()->fixed_array_map() ||
-           elements()->map() == GetHeap()->fixed_cow_array_map())) ||
-         (kind == FAST_DOUBLE_ELEMENTS &&
-          elements()->IsFixedDoubleArray()) ||
-         (kind == DICTIONARY_ELEMENTS &&
-          elements()->IsFixedArray() &&
-          elements()->IsDictionary()) ||
-         (kind > DICTIONARY_ELEMENTS));
+#if DEBUG
+  FixedArrayBase* fixed_array =
+      reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
+  Map* map = fixed_array->map();
+    ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) &&
+            (map == GetHeap()->fixed_array_map() ||
+             map == GetHeap()->fixed_cow_array_map())) ||
+           (kind == FAST_DOUBLE_ELEMENTS &&
+            (fixed_array->IsFixedDoubleArray() ||
+            fixed_array == GetHeap()->empty_fixed_array())) ||
+           (kind == DICTIONARY_ELEMENTS &&
+            fixed_array->IsFixedArray() &&
+            fixed_array->IsDictionary()) ||
+           (kind > DICTIONARY_ELEMENTS));
+    ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+           (elements()->IsFixedArray() && elements()->length() >= 2));
+#endif
   return kind;
 }
 
@@ -4127,6 +4292,18 @@
 }
 
 
+bool JSObject::HasFastSmiOnlyElements() {
+  return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS;
+}
+
+
+bool JSObject::HasFastTypeElements() {
+  ElementsKind elements_kind = GetElementsKind();
+  return elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+      elements_kind == FAST_ELEMENTS;
+}
+
+
 bool JSObject::HasFastDoubleElements() {
   return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
 }
@@ -4137,6 +4314,11 @@
 }
 
 
+bool JSObject::HasNonStrictArgumentsElements() {
+  return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+}
+
+
 bool JSObject::HasExternalArrayElements() {
   HeapObject* array = elements();
   ASSERT(array != NULL);
@@ -4179,16 +4361,8 @@
 }
 
 
-bool JSObject::AllowsSetElementsLength() {
-  bool result = elements()->IsFixedArray() ||
-      elements()->IsFixedDoubleArray();
-  ASSERT(result == !HasExternalArrayElements());
-  return result;
-}
-
-
 MaybeObject* JSObject::EnsureWritableFastElements() {
-  ASSERT(HasFastElements());
+  ASSERT(HasFastTypeElements());
   FixedArray* elems = FixedArray::cast(elements());
   Isolate* isolate = GetIsolate();
   if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -4252,7 +4426,11 @@
 }
 
 
-void StringHasher::AddCharacter(uc32 c) {
+void StringHasher::AddCharacter(uint32_t c) {
+  if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+    AddSurrogatePair(c);  // Not inlined.
+    return;
+  }
   // Use the Jenkins one-at-a-time hash function to update the hash
   // for the given character.
   raw_running_hash_ += c;
@@ -4281,8 +4459,12 @@
 }
 
 
-void StringHasher::AddCharacterNoIndex(uc32 c) {
+void StringHasher::AddCharacterNoIndex(uint32_t c) {
   ASSERT(!is_array_index());
+  if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+    AddSurrogatePairNoIndex(c);  // Not inlined.
+    return;
+  }
   raw_running_hash_ += c;
   raw_running_hash_ += (raw_running_hash_ << 10);
   raw_running_hash_ ^= (raw_running_hash_ >> 6);
@@ -4366,44 +4548,18 @@
 }
 
 
-bool JSObject::HasHiddenPropertiesObject() {
-  ASSERT(!IsJSGlobalProxy());
-  return GetPropertyAttributePostInterceptor(this,
-                                             GetHeap()->hidden_symbol(),
-                                             false) != ABSENT;
+MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
+  return IsJSProxy()
+      ? JSProxy::cast(this)->GetIdentityHash(flag)
+      : JSObject::cast(this)->GetIdentityHash(flag);
 }
 
 
-Object* JSObject::GetHiddenPropertiesObject() {
-  ASSERT(!IsJSGlobalProxy());
-  PropertyAttributes attributes;
-  // You can't install a getter on a property indexed by the hidden symbol,
-  // so we can be sure that GetLocalPropertyPostInterceptor returns a real
-  // object.
-  Object* result =
-      GetLocalPropertyPostInterceptor(this,
-                                      GetHeap()->hidden_symbol(),
-                                      &attributes)->ToObjectUnchecked();
-  return result;
-}
-
-
-MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
-  ASSERT(!IsJSGlobalProxy());
-  return SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
-                                    hidden_obj,
-                                    DONT_ENUM,
-                                    kNonStrictMode);
-}
-
-
-bool JSObject::HasHiddenProperties() {
-  return !GetHiddenProperties(OMIT_CREATION)->ToObjectChecked()->IsUndefined();
-}
-
-
-bool JSObject::HasElement(uint32_t index) {
-  return HasElementWithReceiver(this, index);
+bool JSReceiver::HasElement(uint32_t index) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->HasElementWithHandler(index);
+  }
+  return JSObject::cast(this)->HasElementWithReceiver(this, index);
 }
 
 
@@ -4466,7 +4622,7 @@
   WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
   FixedArray::set(index, key, mode);
   FixedArray::set(index+1, value, mode);
-  FixedArray::fast_set(this, index+2, details.AsSmi());
+  FixedArray::set(index+2, details.AsSmi());
 }
 
 
@@ -4526,47 +4682,45 @@
 }
 
 
-bool ObjectHashTableShape::IsMatch(JSObject* key, Object* other) {
-  return key == JSObject::cast(other);
+template <int entrysize>
+bool ObjectHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
+  return key->SameValue(other);
 }
 
 
-uint32_t ObjectHashTableShape::Hash(JSObject* key) {
-  MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
-  ASSERT(!maybe_hash->IsFailure());
-  return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+template <int entrysize>
+uint32_t ObjectHashTableShape<entrysize>::Hash(Object* key) {
+  MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+  return Smi::cast(maybe_hash->ToObjectChecked())->value();
 }
 
 
-uint32_t ObjectHashTableShape::HashForObject(JSObject* key, Object* other) {
-  MaybeObject* maybe_hash = JSObject::cast(other)->GetIdentityHash(
-      JSObject::OMIT_CREATION);
-  ASSERT(!maybe_hash->IsFailure());
-  return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+template <int entrysize>
+uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key,
+                                                        Object* other) {
+  MaybeObject* maybe_hash = other->GetHash(OMIT_CREATION);
+  return Smi::cast(maybe_hash->ToObjectChecked())->value();
 }
 
 
-MaybeObject* ObjectHashTableShape::AsObject(JSObject* key) {
+template <int entrysize>
+MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Object* key) {
   return key;
 }
 
 
-void ObjectHashTable::RemoveEntry(int entry) {
-  RemoveEntry(entry, GetHeap());
-}
-
-
 void Map::ClearCodeCache(Heap* heap) {
   // No write barrier is needed since empty_fixed_array is not in new space.
   // Please note this function is used during marking:
   //  - MarkCompactCollector::MarkUnmarkedObject
+  //  - IncrementalMarking::Step
   ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array()));
   WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array());
 }
 
 
 void JSArray::EnsureSize(int required_size) {
-  ASSERT(HasFastElements());
+  ASSERT(HasFastTypeElements());
   FixedArray* elts = FixedArray::cast(elements());
   const int kArraySizeThatFitsComfortablyInNewSpace = 128;
   if (elts->length() < required_size) {
@@ -4584,13 +4738,31 @@
 
 
 void JSArray::set_length(Smi* length) {
+  // Don't need a write barrier for a Smi.
   set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
 }
 
 
-void JSArray::SetContent(FixedArray* storage) {
-  set_length(Smi::FromInt(storage->length()));
+bool JSArray::AllowsSetElementsLength() {
+  bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
+  ASSERT(result == !HasExternalArrayElements());
+  return result;
+}
+
+
+MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
+  MaybeObject* maybe_result = EnsureCanContainElements(
+      storage, ALLOW_COPIED_DOUBLE_ELEMENTS);
+  if (maybe_result->IsFailure()) return maybe_result;
+  ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
+          GetElementsKind() == FAST_DOUBLE_ELEMENTS) ||
+         ((storage->map() != GetHeap()->fixed_double_array_map()) &&
+          ((GetElementsKind() == FAST_ELEMENTS) ||
+           (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
+            FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
   set_elements(storage);
+  set_length(Smi::FromInt(storage->length()));
+  return this;
 }
 
 
@@ -4600,6 +4772,57 @@
 }
 
 
+MaybeObject* FixedDoubleArray::Copy() {
+  if (length() == 0) return this;
+  return GetHeap()->CopyFixedDoubleArray(this);
+}
+
+
+void TypeFeedbackCells::SetAstId(int index, Smi* id) {
+  set(1 + index * 2, id);
+}
+
+
+Smi* TypeFeedbackCells::AstId(int index) {
+  return Smi::cast(get(1 + index * 2));
+}
+
+
+void TypeFeedbackCells::SetCell(int index, JSGlobalPropertyCell* cell) {
+  set(index * 2, cell);
+}
+
+
+JSGlobalPropertyCell* TypeFeedbackCells::Cell(int index) {
+  return JSGlobalPropertyCell::cast(get(index * 2));
+}
+
+
+Handle<Object> TypeFeedbackCells::UninitializedSentinel(Isolate* isolate) {
+  return isolate->factory()->the_hole_value();
+}
+
+
+Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
+  return isolate->factory()->undefined_value();
+}
+
+
+Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
+  return heap->raw_unchecked_the_hole_value();
+}
+
+
+SMI_ACCESSORS(TypeFeedbackInfo, ic_total_count, kIcTotalCountOffset)
+SMI_ACCESSORS(TypeFeedbackInfo, ic_with_typeinfo_count,
+              kIcWithTypeinfoCountOffset)
+ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
+          kTypeFeedbackCellsOffset)
+
+
+SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
+
+
 Relocatable::Relocatable(Isolate* isolate) {
   ASSERT(isolate == Isolate::Current());
   isolate_ = isolate;
@@ -4622,14 +4845,14 @@
 
 void Foreign::ForeignIterateBody(ObjectVisitor* v) {
   v->VisitExternalReference(
-      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
+      reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
 }
 
 
 template<typename StaticVisitor>
 void Foreign::ForeignIterateBody() {
   StaticVisitor::VisitExternalReference(
-      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
+      reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
 }
 
 
@@ -4682,22 +4905,27 @@
 
 #undef SLOT_ADDR
 
-
+#undef TYPE_CHECKER
 #undef CAST_ACCESSOR
 #undef INT_ACCESSORS
-#undef SMI_ACCESSORS
 #undef ACCESSORS
+#undef ACCESSORS_TO_SMI
+#undef SMI_ACCESSORS
+#undef BOOL_GETTER
+#undef BOOL_ACCESSORS
 #undef FIELD_ADDR
 #undef READ_FIELD
 #undef WRITE_FIELD
 #undef WRITE_BARRIER
 #undef CONDITIONAL_WRITE_BARRIER
-#undef READ_MEMADDR_FIELD
-#undef WRITE_MEMADDR_FIELD
 #undef READ_DOUBLE_FIELD
 #undef WRITE_DOUBLE_FIELD
 #undef READ_INT_FIELD
 #undef WRITE_INT_FIELD
+#undef READ_INTPTR_FIELD
+#undef WRITE_INTPTR_FIELD
+#undef READ_UINT32_FIELD
+#undef WRITE_UINT32_FIELD
 #undef READ_SHORT_FIELD
 #undef WRITE_SHORT_FIELD
 #undef READ_BYTE_FIELD
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 0398572..38e6138 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -82,12 +82,18 @@
     case HEAP_NUMBER_TYPE:
       HeapNumber::cast(this)->HeapNumberPrint(out);
       break;
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
+      break;
     case FIXED_ARRAY_TYPE:
       FixedArray::cast(this)->FixedArrayPrint(out);
       break;
     case BYTE_ARRAY_TYPE:
       ByteArray::cast(this)->ByteArrayPrint(out);
       break;
+    case FREE_SPACE_TYPE:
+      FreeSpace::cast(this)->FreeSpacePrint(out);
+      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
       break;
@@ -145,6 +151,9 @@
       PrintF(out, "Value wrapper around:");
       JSValue::cast(this)->value()->Print(out);
       break;
+    case JS_DATE_TYPE:
+      JSDate::cast(this)->value()->Print(out);
+      break;
     case CODE_TYPE:
       Code::cast(this)->CodePrint(out);
       break;
@@ -189,6 +198,11 @@
 }
 
 
+void FreeSpace::FreeSpacePrint(FILE* out) {
+  PrintF(out, "free space, size %d", Size());
+}
+
+
 void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
   PrintF(out, "external pixel array");
 }
@@ -256,16 +270,37 @@
           descs->GetCallbacksObject(i)->ShortPrint(out);
           PrintF(out, " (callback)\n");
           break;
+        case ELEMENTS_TRANSITION: {
+          PrintF(out, "(elements transition to ");
+          Object* descriptor_contents = descs->GetValue(i);
+          if (descriptor_contents->IsMap()) {
+            Map* map = Map::cast(descriptor_contents);
+            PrintElementsKind(out, map->elements_kind());
+          } else {
+            FixedArray* map_array = FixedArray::cast(descriptor_contents);
+            for (int i = 0; i < map_array->length(); ++i) {
+              Map* map = Map::cast(map_array->get(i));
+              if (i != 0) {
+                PrintF(out, ", ");
+              }
+              PrintElementsKind(out, map->elements_kind());
+            }
+          }
+          PrintF(out, ")\n");
+          break;
+        }
         case MAP_TRANSITION:
-          PrintF(out, " (map transition)\n");
+          PrintF(out, "(map transition)\n");
           break;
         case CONSTANT_TRANSITION:
-          PrintF(out, " (constant transition)\n");
+          PrintF(out, "(constant transition)\n");
           break;
         case NULL_DESCRIPTOR:
-          PrintF(out, " (null descriptor)\n");
+          PrintF(out, "(null descriptor)\n");
           break;
-        default:
+        case NORMAL:  // only in slow mode
+        case HANDLER:  // only in lookup results, not in descriptors
+        case INTERCEPTOR:  // only in lookup results, not in descriptors
           UNREACHABLE();
           break;
       }
@@ -277,7 +312,10 @@
 
 
 void JSObject::PrintElements(FILE* out) {
-  switch (GetElementsKind()) {
+  // Don't call GetElementsKind, its validation code can cause the printer to
+  // fail when debugging.
+  switch (map()->elements_kind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Print in array notation for non-sparse arrays.
       FixedArray* p = FixedArray::cast(elements());
@@ -385,8 +423,13 @@
 
 void JSObject::JSObjectPrint(FILE* out) {
   PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
+  PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
+  // Don't call GetElementsKind, its validation code can cause the printer to
+  // fail when debugging.
+  PrintElementsKind(out, this->map()->elements_kind());
+  PrintF(out,
+         "]\n - prototype = %p\n",
+         reinterpret_cast<void*>(GetPrototype()));
   PrintF(out, " {\n");
   PrintProperties(out);
   PrintElements(out);
@@ -406,6 +449,9 @@
     case EXTERNAL_ASCII_SYMBOL_TYPE:
     case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
     case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
+    case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE:
+    case SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
+    case SHORT_EXTERNAL_SYMBOL_TYPE: return "SHORT_EXTERNAL_SYMBOL";
     case ASCII_STRING_TYPE: return "ASCII_STRING";
     case STRING_TYPE: return "TWO_BYTE_STRING";
     case CONS_STRING_TYPE:
@@ -413,8 +459,12 @@
     case EXTERNAL_ASCII_STRING_TYPE:
     case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
     case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
+    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+    case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
+    case SHORT_EXTERNAL_STRING_TYPE: return "SHORT_EXTERNAL_STRING";
     case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
     case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
+    case FREE_SPACE_TYPE: return "FREE_SPACE";
     case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
     case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
     case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -458,7 +508,9 @@
   PrintF(out, " - type: %s\n", TypeToString(instance_type()));
   PrintF(out, " - instance size: %d\n", instance_size());
   PrintF(out, " - inobject properties: %d\n", inobject_properties());
-  PrintF(out, " - pre-allocated property fields: %d\n",
+  PrintF(out, " - elements kind: ");
+  PrintElementsKind(out, elements_kind());
+  PrintF(out, "\n - pre-allocated property fields: %d\n",
       pre_allocated_property_fields());
   PrintF(out, " - unused property fields: %d\n", unused_property_fields());
   if (is_hidden_prototype()) {
@@ -505,6 +557,21 @@
 }
 
 
+void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "TypeFeedbackInfo");
+  PrintF(out, "\n - ic_total_count: %d, ic_with_typeinfo_count: %d",
+         ic_total_count(), ic_with_typeinfo_count());
+  PrintF(out, "\n - type_feedback_cells: ");
+  type_feedback_cells()->FixedArrayPrint(out);
+}
+
+
+void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "AliasedArgumentsEntry");
+  PrintF(out, "\n - aliased_context_slot: %d", aliased_context_slot());
+}
+
+
 void FixedArray::FixedArrayPrint(FILE* out) {
   HeapObject::PrintHeader(out, "FixedArray");
   PrintF(out, " - length: %d", length());
@@ -516,6 +583,20 @@
 }
 
 
+void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "FixedDoubleArray");
+  PrintF(out, " - length: %d", length());
+  for (int i = 0; i < length(); i++) {
+    if (is_the_hole(i)) {
+      PrintF(out, "\n  [%d]: <the hole>", i);
+    } else {
+      PrintF(out, "\n  [%d]: %g", i, get_scalar(i));
+    }
+  }
+  PrintF(out, "\n");
+}
+
+
 void JSValue::JSValuePrint(FILE* out) {
   HeapObject::PrintHeader(out, "ValueObject");
   value()->Print(out);
@@ -568,7 +649,7 @@
 
 
 // This method is only meant to be called from gdb for debugging purposes.
-// Since the string can also be in two-byte encoding, non-ascii characters
+// Since the string can also be in two-byte encoding, non-ASCII characters
 // will be ignored in the output.
 char* String::ToAsciiArray() {
   // Static so that subsequent calls frees previously allocated space.
@@ -582,11 +663,37 @@
 }
 
 
+static const char* const weekdays[] = {
+  "???", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
+};
+
+void JSDate::JSDatePrint(FILE* out) {
+  HeapObject::PrintHeader(out, "JSDate");
+  PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - value = ");
+  value()->Print(out);
+  if (!year()->IsSmi()) {
+    PrintF(out, " - time = NaN\n");
+  } else {
+    PrintF(out, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
+           weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
+           year()->IsSmi() ? Smi::cast(year())->value() : -1,
+           month()->IsSmi() ? Smi::cast(month())->value() : -1,
+           day()->IsSmi() ? Smi::cast(day())->value() : -1,
+           hour()->IsSmi() ? Smi::cast(hour())->value() : -1,
+           min()->IsSmi() ? Smi::cast(min())->value() : -1,
+           sec()->IsSmi() ? Smi::cast(sec())->value() : -1);
+  }
+}
+
+
 void JSProxy::JSProxyPrint(FILE* out) {
   HeapObject::PrintHeader(out, "JSProxy");
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
   PrintF(out, " - handler = ");
   handler()->Print(out);
+  PrintF(out, " - hash = ");
+  hash()->Print(out);
   PrintF(out, "\n");
 }
 
@@ -607,7 +714,6 @@
 void JSWeakMap::JSWeakMapPrint(FILE* out) {
   HeapObject::PrintHeader(out, "JSWeakMap");
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - number of elements = %d\n", table()->NumberOfElements());
   PrintF(out, " - table = ");
   table()->ShortPrint(out);
   PrintF(out, "\n");
@@ -707,7 +813,7 @@
 
 
 void Foreign::ForeignPrint(FILE* out) {
-  PrintF(out, "foreign address : %p", address());
+  PrintF(out, "foreign address : %p", foreign_address());
 }
 
 
@@ -726,6 +832,15 @@
 }
 
 
+void AccessorPair::AccessorPairPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "AccessorPair");
+  PrintF(out, "\n - getter: ");
+  getter()->ShortPrint(out);
+  PrintF(out, "\n - setter: ");
+  setter()->ShortPrint(out);
+}
+
+
 void AccessCheckInfo::AccessCheckInfoPrint(FILE* out) {
   HeapObject::PrintHeader(out, "AccessCheckInfo");
   PrintF(out, "\n - named_callback: ");
@@ -802,10 +917,15 @@
 
 void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
   HeapObject::PrintHeader(out, "ObjectTemplateInfo");
+  PrintF(out, " - tag: ");
+  tag()->ShortPrint(out);
+  PrintF(out, "\n - property_list: ");
+  property_list()->ShortPrint(out);
   PrintF(out, "\n - constructor: ");
   constructor()->ShortPrint(out);
   PrintF(out, "\n - internal_field_count: ");
   internal_field_count()->ShortPrint(out);
+  PrintF(out, "\n");
 }
 
 
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
new file mode 100644
index 0000000..627d1bc
--- /dev/null
+++ b/src/objects-visiting-inl.h
@@ -0,0 +1,155 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OBJECTS_VISITING_INL_H_
+#define V8_OBJECTS_VISITING_INL_H_
+
+
+namespace v8 {
+namespace internal {
+
+template<typename StaticVisitor>
+void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
+  table_.Register(kVisitShortcutCandidate,
+                  &FixedBodyVisitor<StaticVisitor,
+                  ConsString::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitConsString,
+                  &FixedBodyVisitor<StaticVisitor,
+                  ConsString::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitSlicedString,
+                  &FixedBodyVisitor<StaticVisitor,
+                  SlicedString::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitFixedArray,
+                  &FlexibleBodyVisitor<StaticVisitor,
+                  FixedArray::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+
+  table_.Register(kVisitGlobalContext,
+                  &FixedBodyVisitor<StaticVisitor,
+                  Context::ScavengeBodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitByteArray, &VisitByteArray);
+
+  table_.Register(kVisitSharedFunctionInfo,
+                  &FixedBodyVisitor<StaticVisitor,
+                  SharedFunctionInfo::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
+
+  table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
+
+  table_.Register(kVisitJSFunction,
+                  &JSObjectVisitor::
+                      template VisitSpecialized<JSFunction::kSize>);
+
+  table_.Register(kVisitFreeSpace, &VisitFreeSpace);
+
+  table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
+
+  table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
+
+  table_.template RegisterSpecializations<DataObjectVisitor,
+                                          kVisitDataObject,
+                                          kVisitDataObjectGeneric>();
+
+  table_.template RegisterSpecializations<JSObjectVisitor,
+                                          kVisitJSObject,
+                                          kVisitJSObjectGeneric>();
+  table_.template RegisterSpecializations<StructVisitor,
+                                          kVisitStruct,
+                                          kVisitStructGeneric>();
+}
+
+
+void Code::CodeIterateBody(ObjectVisitor* v) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  // There are two places where we iterate code bodies: here and the
+  // templated CodeIterateBody (below).  They should be kept in sync.
+  IteratePointer(v, kRelocationInfoOffset);
+  IteratePointer(v, kHandlerTableOffset);
+  IteratePointer(v, kDeoptimizationDataOffset);
+  IteratePointer(v, kTypeFeedbackInfoOffset);
+
+  RelocIterator it(this, mode_mask);
+  for (; !it.done(); it.next()) {
+    it.rinfo()->Visit(v);
+  }
+}
+
+
+template<typename StaticVisitor>
+void Code::CodeIterateBody(Heap* heap) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  // There are two places where we iterate code bodies: here and the
+  // non-templated CodeIterateBody (above).  They should be kept in sync.
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
+
+  RelocIterator it(this, mode_mask);
+  for (; !it.done(); it.next()) {
+    it.rinfo()->template Visit<StaticVisitor>(heap);
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_OBJECTS_VISITING_INL_H_
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 0aa21dd..c7c8a87 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -64,7 +64,7 @@
       case kExternalStringTag:
         return GetVisitorIdForSize(kVisitDataObject,
                                    kVisitDataObjectGeneric,
-                                   ExternalString::kSize);
+                                   instance_size);
     }
     UNREACHABLE();
   }
@@ -73,6 +73,9 @@
     case BYTE_ARRAY_TYPE:
       return kVisitByteArray;
 
+    case FREE_SPACE_TYPE:
+      return kVisitFreeSpace;
+
     case FIXED_ARRAY_TYPE:
       return kVisitFixedArray;
 
@@ -91,6 +94,16 @@
     case JS_GLOBAL_PROPERTY_CELL_TYPE:
       return kVisitPropertyCell;
 
+    case JS_SET_TYPE:
+      return GetVisitorIdForSize(kVisitStruct,
+                                 kVisitStructGeneric,
+                                 JSSet::kSize);
+
+    case JS_MAP_TYPE:
+      return GetVisitorIdForSize(kVisitStruct,
+                                 kVisitStructGeneric,
+                                 JSMap::kSize);
+
     case JS_WEAK_MAP_TYPE:
       return kVisitJSWeakMap;
 
@@ -121,6 +134,7 @@
     case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_VALUE_TYPE:
+    case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 4ce1bd0..26e79ae 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -30,22 +30,6 @@
 
 #include "allocation.h"
 
-#if V8_TARGET_ARCH_IA32
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/assembler-mips.h"
-#include "mips/assembler-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
 // This file provides base classes and auxiliary methods for defining
 // static object visitors used during GC.
 // Visiting HeapObject body with a normal ObjectVisitor requires performing
@@ -67,6 +51,7 @@
     kVisitSeqTwoByteString,
     kVisitShortcutCandidate,
     kVisitByteArray,
+    kVisitFreeSpace,
     kVisitFixedArray,
     kVisitFixedDoubleArray,
     kVisitGlobalContext,
@@ -150,7 +135,7 @@
            (base == kVisitJSObject));
     ASSERT(IsAligned(object_size, kPointerSize));
     ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
-    ASSERT(object_size < Page::kMaxHeapObjectSize);
+    ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
 
     const VisitorId specialization = static_cast<VisitorId>(
         base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
@@ -172,6 +157,10 @@
     }
   }
 
+  inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
+    return reinterpret_cast<Callback>(callbacks_[id]);
+  }
+
   inline Callback GetVisitor(Map* map) {
     return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
   }
@@ -236,7 +225,7 @@
   static inline ReturnType Visit(Map* map, HeapObject* object) {
     int object_size = BodyDescriptor::SizeOf(map, object);
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         BodyDescriptor::kStartOffset,
         object_size);
@@ -247,7 +236,7 @@
   static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
     ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         BodyDescriptor::kStartOffset,
         object_size);
@@ -261,7 +250,7 @@
  public:
   static inline ReturnType Visit(Map* map, HeapObject* object) {
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         BodyDescriptor::kStartOffset,
         BodyDescriptor::kEndOffset);
@@ -289,63 +278,7 @@
 template<typename StaticVisitor>
 class StaticNewSpaceVisitor : public StaticVisitorBase {
  public:
-  static void Initialize() {
-    table_.Register(kVisitShortcutCandidate,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      ConsString::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitConsString,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      ConsString::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitSlicedString,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      SlicedString::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitFixedArray,
-                    &FlexibleBodyVisitor<StaticVisitor,
-                                         FixedArray::BodyDescriptor,
-                                         int>::Visit);
-
-    table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
-
-    table_.Register(kVisitGlobalContext,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      Context::ScavengeBodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitByteArray, &VisitByteArray);
-
-    table_.Register(kVisitSharedFunctionInfo,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      SharedFunctionInfo::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitJSWeakMap, &VisitJSObject);
-
-    table_.Register(kVisitJSRegExp, &VisitJSObject);
-
-    table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
-
-    table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
-    table_.Register(kVisitJSFunction,
-                    &JSObjectVisitor::
-                        template VisitSpecialized<JSFunction::kSize>);
-
-    table_.RegisterSpecializations<DataObjectVisitor,
-                                   kVisitDataObject,
-                                   kVisitDataObjectGeneric>();
-    table_.RegisterSpecializations<JSObjectVisitor,
-                                   kVisitJSObject,
-                                   kVisitJSObjectGeneric>();
-    table_.RegisterSpecializations<StructVisitor,
-                                   kVisitStruct,
-                                   kVisitStructGeneric>();
-  }
+  static void Initialize();
 
   static inline int IterateBody(Map* map, HeapObject* obj) {
     return table_.GetVisitor(map)(map, obj);
@@ -379,6 +312,10 @@
         SeqTwoByteStringSize(map->instance_type());
   }
 
+  static inline int VisitFreeSpace(Map* map, HeapObject* object) {
+    return FreeSpace::cast(object)->Size();
+  }
+
   class DataObjectVisitor {
    public:
     template<int object_size>
@@ -410,55 +347,6 @@
   StaticNewSpaceVisitor<StaticVisitor>::table_;
 
 
-void Code::CodeIterateBody(ObjectVisitor* v) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // Use the relocation info pointer before it is visited by
-  // the heap compaction in the next statement.
-  RelocIterator it(this, mode_mask);
-
-  IteratePointer(v, kRelocationInfoOffset);
-  IteratePointer(v, kDeoptimizationDataOffset);
-
-  for (; !it.done(); it.next()) {
-    it.rinfo()->Visit(v);
-  }
-}
-
-
-template<typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // Use the relocation info pointer before it is visited by
-  // the heap compaction in the next statement.
-  RelocIterator it(this, mode_mask);
-
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
-
-  for (; !it.done(); it.next()) {
-    it.rinfo()->template Visit<StaticVisitor>(heap);
-  }
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_OBJECTS_VISITING_H_
diff --git a/src/objects.cc b/src/objects.cc
index 88ebbf4..64d85a0 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,13 +33,16 @@
 #include "codegen.h"
 #include "debug.h"
 #include "deoptimizer.h"
+#include "date.h"
 #include "elements.h"
 #include "execution.h"
 #include "full-codegen.h"
 #include "hydrogen.h"
 #include "objects-inl.h"
 #include "objects-visiting.h"
+#include "objects-visiting-inl.h"
 #include "macro-assembler.h"
+#include "mark-compact.h"
 #include "safepoint-table.h"
 #include "string-stream.h"
 #include "utils.h"
@@ -53,10 +56,11 @@
 namespace v8 {
 namespace internal {
 
-// Getters and setters are stored in a fixed array property.  These are
-// constants for their indices.
-const int kGetterIndex = 0;
-const int kSetterIndex = 1;
+void PrintElementsKind(FILE* out, ElementsKind kind) {
+  ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+  PrintF(out, "%s", accessor->name());
+}
+
 
 MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
                                                   Object* value) {
@@ -132,34 +136,27 @@
 
 void Object::Lookup(String* name, LookupResult* result) {
   Object* holder = NULL;
-  if (IsSmi()) {
-    Context* global_context = Isolate::Current()->context()->global_context();
-    holder = global_context->number_function()->instance_prototype();
+  if (IsJSReceiver()) {
+    holder = this;
   } else {
-    HeapObject* heap_object = HeapObject::cast(this);
-    if (heap_object->IsJSObject()) {
-      return JSObject::cast(this)->Lookup(name, result);
-    } else if (heap_object->IsJSProxy()) {
-      return result->HandlerResult();
-    }
     Context* global_context = Isolate::Current()->context()->global_context();
-    if (heap_object->IsString()) {
-      holder = global_context->string_function()->instance_prototype();
-    } else if (heap_object->IsHeapNumber()) {
+    if (IsNumber()) {
       holder = global_context->number_function()->instance_prototype();
-    } else if (heap_object->IsBoolean()) {
+    } else if (IsString()) {
+      holder = global_context->string_function()->instance_prototype();
+    } else if (IsBoolean()) {
       holder = global_context->boolean_function()->instance_prototype();
     }
   }
   ASSERT(holder != NULL);  // Cannot handle null or undefined.
-  JSObject::cast(holder)->Lookup(name, result);
+  JSReceiver::cast(holder)->Lookup(name, result);
 }
 
 
 MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
                                              String* name,
                                              PropertyAttributes* attributes) {
-  LookupResult result;
+  LookupResult result(name->GetIsolate());
   Lookup(name, &result);
   MaybeObject* value = GetProperty(receiver, &result, name, attributes);
   ASSERT(*attributes <= ABSENT);
@@ -167,10 +164,9 @@
 }
 
 
-MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
-                                             Object* structure,
-                                             String* name,
-                                             Object* holder) {
+MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
+                                               Object* structure,
+                                               String* name) {
   Isolate* isolate = name->GetIsolate();
   // To accommodate both the old and the new api we switch on the
   // data structure used to store the callbacks.  Eventually foreign
@@ -178,7 +174,7 @@
   if (structure->IsForeign()) {
     AccessorDescriptor* callback =
         reinterpret_cast<AccessorDescriptor*>(
-            Foreign::cast(structure)->address());
+            Foreign::cast(structure)->foreign_address());
     MaybeObject* value = (callback->getter)(receiver, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return value;
@@ -191,10 +187,9 @@
     v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
     HandleScope scope(isolate);
     JSObject* self = JSObject::cast(receiver);
-    JSObject* holder_handle = JSObject::cast(holder);
     Handle<String> key(name);
     LOG(isolate, ApiNamedPropertyAccess("load", self, name));
-    CustomArguments args(isolate, data->data(), self, holder_handle);
+    CustomArguments args(isolate, data->data(), self, this);
     v8::AccessorInfo info(args.end());
     v8::Handle<v8::Value> result;
     {
@@ -210,11 +205,11 @@
   }
 
   // __defineGetter__ callback
-  if (structure->IsFixedArray()) {
-    Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
-    if (getter->IsJSFunction()) {
-      return Object::GetPropertyWithDefinedGetter(receiver,
-                                                  JSFunction::cast(getter));
+  if (structure->IsAccessorPair()) {
+    Object* getter = AccessorPair::cast(structure)->getter();
+    if (getter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
     }
     // Getter is not a function.
     return isolate->heap()->undefined_value();
@@ -225,50 +220,75 @@
 }
 
 
-MaybeObject* Object::GetPropertyWithHandler(Object* receiver_raw,
-                                            String* name_raw,
-                                            Object* handler_raw) {
-  Isolate* isolate = name_raw->GetIsolate();
+MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
+                                             String* name_raw) {
+  Isolate* isolate = GetIsolate();
   HandleScope scope(isolate);
   Handle<Object> receiver(receiver_raw);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(handler_raw);
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("get");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { receiver, name };
+  Handle<Object> result = CallTrap(
+    "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    // Get the derived `get' property.
-    trap = isolate->derived_get_trap();
-  }
-
-  // Call trap function.
-  Object** args[] = { receiver.location(), name.location() };
-  bool has_exception;
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
 
   return *result;
 }
 
 
+Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
+  Isolate* isolate = object->IsHeapObject()
+      ? Handle<HeapObject>::cast(object)->GetIsolate()
+      : Isolate::Current();
+  CALL_HEAP_FUNCTION(isolate, object->GetElement(index), Object);
+}
+
+
+MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
+                                            uint32_t index) {
+  String* name;
+  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+  if (!maybe->To<String>(&name)) return maybe;
+  return GetPropertyWithHandler(receiver, name);
+}
+
+
+MaybeObject* JSProxy::SetElementWithHandler(uint32_t index,
+                                            Object* value,
+                                            StrictModeFlag strict_mode) {
+  String* name;
+  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+  if (!maybe->To<String>(&name)) return maybe;
+  return SetPropertyWithHandler(name, value, NONE, strict_mode);
+}
+
+
+bool JSProxy::HasElementWithHandler(uint32_t index) {
+  String* name;
+  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+  if (!maybe->To<String>(&name)) return maybe;
+  return HasPropertyWithHandler(name);
+}
+
+
 MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
-                                                  JSFunction* getter) {
+                                                  JSReceiver* getter) {
   HandleScope scope;
-  Handle<JSFunction> fun(JSFunction::cast(getter));
+  Handle<JSReceiver> fun(getter);
   Handle<Object> self(receiver);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debug* debug = fun->GetHeap()->isolate()->debug();
   // Handle stepping into a getter if step into is active.
-  if (debug->StepInActive()) {
-    debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
+  // TODO(rossberg): should this apply to getters that are function proxies?
+  if (debug->StepInActive() && fun->IsJSFunction()) {
+    debug->HandleStepIn(
+        Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
   }
 #endif
+
   bool has_pending_exception;
   Handle<Object> result =
-      Execution::Call(fun, self, 0, NULL, &has_pending_exception);
+      Execution::Call(fun, self, 0, NULL, &has_pending_exception, true);
   // Check for pending exception and return the result.
   if (has_pending_exception) return Failure::Exception();
   return *result;
@@ -290,10 +310,8 @@
           AccessorInfo* info = AccessorInfo::cast(obj);
           if (info->all_can_read()) {
             *attributes = result->GetAttributes();
-            return GetPropertyWithCallback(receiver,
-                                           result->GetCallbackObject(),
-                                           name,
-                                           result->holder());
+            return result->holder()->GetPropertyWithCallback(
+                receiver, result->GetCallbackObject(), name);
           }
         }
         break;
@@ -302,7 +320,7 @@
       case FIELD:
       case CONSTANT_FUNCTION: {
         // Search ALL_CAN_READ accessors in prototype chain.
-        LookupResult r;
+        LookupResult r(GetIsolate());
         result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
         if (r.IsProperty()) {
           return GetPropertyWithFailedAccessCheck(receiver,
@@ -315,7 +333,7 @@
       case INTERCEPTOR: {
         // If the object has an interceptor, try real named properties.
         // No access check in GetPropertyAttributeWithInterceptor.
-        LookupResult r;
+        LookupResult r(GetIsolate());
         result->holder()->LookupRealNamedProperty(name, &r);
         if (r.IsProperty()) {
           return GetPropertyWithFailedAccessCheck(receiver,
@@ -362,7 +380,7 @@
       case CONSTANT_FUNCTION: {
         if (!continue_search) break;
         // Search ALL_CAN_READ accessors in prototype chain.
-        LookupResult r;
+        LookupResult r(GetIsolate());
         result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
         if (r.IsProperty()) {
           return GetPropertyAttributeWithFailedAccessCheck(receiver,
@@ -376,7 +394,7 @@
       case INTERCEPTOR: {
         // If the object has an interceptor, try real named properties.
         // No access check in GetPropertyAttributeWithInterceptor.
-        LookupResult r;
+        LookupResult r(GetIsolate());
         if (continue_search) {
           result->holder()->LookupRealNamedProperty(name, &r);
         } else {
@@ -396,7 +414,7 @@
     }
   }
 
-  GetHeap()->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+  GetIsolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
   return ABSENT;
 }
 
@@ -426,6 +444,16 @@
 }
 
 
+Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
+                                               Handle<String> key,
+                                               Handle<Object> value,
+                                               PropertyDetails details) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->SetNormalizedProperty(*key, *value, details),
+                     Object);
+}
+
+
 MaybeObject* JSObject::SetNormalizedProperty(String* name,
                                              Object* value,
                                              PropertyDetails details) {
@@ -486,7 +514,7 @@
       }
       JSGlobalPropertyCell* cell =
           JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
-      cell->set_value(cell->heap()->the_hole_value());
+      cell->set_value(cell->GetHeap()->the_hole_value());
       dictionary->DetailsAtPut(entry, details.AsDeleted());
     } else {
       Object* deleted = dictionary->DeleteProperty(entry, mode);
@@ -520,6 +548,21 @@
 }
 
 
+Handle<Object> Object::GetProperty(Handle<Object> object,
+                                   Handle<Object> receiver,
+                                   LookupResult* result,
+                                   Handle<String> key,
+                                   PropertyAttributes* attributes) {
+  Isolate* isolate = object->IsHeapObject()
+      ? Handle<HeapObject>::cast(object)->GetIsolate()
+      : Isolate::Current();
+  CALL_HEAP_FUNCTION(
+      isolate,
+      object->GetProperty(*receiver, result, *key, attributes),
+      Object);
+}
+
+
 MaybeObject* Object::GetProperty(Object* receiver,
                                  LookupResult* result,
                                  String* name,
@@ -537,7 +580,9 @@
   // holder in the prototype chain.
   // Proxy handlers do not use the proxy's prototype, so we can skip this.
   if (!result->IsHandler()) {
-    Object* last = result->IsProperty() ? result->holder() : heap->null_value();
+    Object* last = result->IsProperty()
+        ? result->holder()
+        : Object::cast(heap->null_value());
     ASSERT(this != this->GetPrototype());
     for (Object* current = this; true; current = current->GetPrototype()) {
       if (current->IsAccessCheckNeeded()) {
@@ -566,30 +611,26 @@
   }
   *attributes = result->GetAttributes();
   Object* value;
-  JSObject* holder = result->holder();
   switch (result->type()) {
     case NORMAL:
-      value = holder->GetNormalizedProperty(result);
+      value = result->holder()->GetNormalizedProperty(result);
       ASSERT(!value->IsTheHole() || result->IsReadOnly());
       return value->IsTheHole() ? heap->undefined_value() : value;
     case FIELD:
-      value = holder->FastPropertyAt(result->GetFieldIndex());
+      value = result->holder()->FastPropertyAt(result->GetFieldIndex());
       ASSERT(!value->IsTheHole() || result->IsReadOnly());
       return value->IsTheHole() ? heap->undefined_value() : value;
     case CONSTANT_FUNCTION:
       return result->GetConstantFunction();
     case CALLBACKS:
-      return GetPropertyWithCallback(receiver,
-                                     result->GetCallbackObject(),
-                                     name,
-                                     holder);
-    case HANDLER: {
-      JSProxy* proxy = JSProxy::cast(this);
-      return GetPropertyWithHandler(receiver, name, proxy->handler());
-    }
+      return result->holder()->GetPropertyWithCallback(
+          receiver, result->GetCallbackObject(), name);
+    case HANDLER:
+      return result->proxy()->GetPropertyWithHandler(receiver, name);
     case INTERCEPTOR: {
       JSObject* recvr = JSObject::cast(receiver);
-      return holder->GetPropertyWithInterceptor(recvr, name, attributes);
+      return result->holder()->GetPropertyWithInterceptor(
+          recvr, name, attributes);
     }
     case MAP_TRANSITION:
     case ELEMENTS_TRANSITION:
@@ -613,28 +654,21 @@
   for (holder = this;
        holder != heap->null_value();
        holder = holder->GetPrototype()) {
-    if (holder->IsSmi()) {
-      Context* global_context = Isolate::Current()->context()->global_context();
-      holder = global_context->number_function()->instance_prototype();
-    } else {
-      HeapObject* heap_object = HeapObject::cast(holder);
-      if (!heap_object->IsJSObject()) {
-        Isolate* isolate = heap->isolate();
-        Context* global_context = isolate->context()->global_context();
-        if (heap_object->IsString()) {
-          holder = global_context->string_function()->instance_prototype();
-        } else if (heap_object->IsHeapNumber()) {
-          holder = global_context->number_function()->instance_prototype();
-        } else if (heap_object->IsBoolean()) {
-          holder = global_context->boolean_function()->instance_prototype();
-        } else if (heap_object->IsJSProxy()) {
-          // TODO(rossberg): do something
-          return heap->undefined_value();  // For now...
-        } else {
-          // Undefined and null have no indexed properties.
-          ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
-          return heap->undefined_value();
-        }
+    if (!holder->IsJSObject()) {
+      Isolate* isolate = heap->isolate();
+      Context* global_context = isolate->context()->global_context();
+      if (holder->IsNumber()) {
+        holder = global_context->number_function()->instance_prototype();
+      } else if (holder->IsString()) {
+        holder = global_context->string_function()->instance_prototype();
+      } else if (holder->IsBoolean()) {
+        holder = global_context->boolean_function()->instance_prototype();
+      } else if (holder->IsJSProxy()) {
+        return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
+      } else {
+        // Undefined and null have no indexed properties.
+        ASSERT(holder->IsUndefined() || holder->IsNull());
+        return heap->undefined_value();
       }
     }
 
@@ -658,10 +692,7 @@
 
     if (js_object->elements() != heap->empty_fixed_array()) {
       MaybeObject* result = js_object->GetElementsAccessor()->Get(
-          js_object->elements(),
-          index,
-          js_object,
-          receiver);
+          receiver, js_object, index);
       if (result != heap->the_hole_value()) return result;
     }
   }
@@ -701,6 +732,49 @@
 }
 
 
+MaybeObject* Object::GetHash(CreationFlag flag) {
+  // The object is either a number, a string, an odd-ball,
+  // a real JS object, or a Harmony proxy.
+  if (IsNumber()) {
+    uint32_t hash = ComputeLongHash(double_to_uint64(Number()));
+    return Smi::FromInt(hash & Smi::kMaxValue);
+  }
+  if (IsString()) {
+    uint32_t hash = String::cast(this)->Hash();
+    return Smi::FromInt(hash);
+  }
+  if (IsOddball()) {
+    uint32_t hash = Oddball::cast(this)->to_string()->Hash();
+    return Smi::FromInt(hash);
+  }
+  if (IsJSReceiver()) {
+    return JSReceiver::cast(this)->GetIdentityHash(flag);
+  }
+
+  UNREACHABLE();
+  return Smi::FromInt(0);
+}
+
+
+bool Object::SameValue(Object* other) {
+  if (other == this) return true;
+  if (!IsHeapObject() || !other->IsHeapObject()) return false;
+
+  // The object is either a number, a string, an odd-ball,
+  // a real JS object, or a Harmony proxy.
+  if (IsNumber() && other->IsNumber()) {
+    double this_value = Number();
+    double other_value = other->Number();
+    return (this_value == other_value) ||
+        (isnan(this_value) && isnan(other_value));
+  }
+  if (IsString() && other->IsString()) {
+    return String::cast(this)->Equals(String::cast(other));
+  }
+  return false;
+}
+
+
 void Object::ShortPrint(FILE* out) {
   HeapStringAllocator allocator;
   StringStream accumulator(&allocator);
@@ -818,7 +892,7 @@
                     len - first_length);
       }
       cs->set_first(result);
-      cs->set_second(heap->empty_string());
+      cs->set_second(heap->empty_string(), SKIP_WRITE_BARRIER);
       return result;
     }
     default:
@@ -844,39 +918,40 @@
 #endif  // DEBUG
   Heap* heap = GetHeap();
   int size = this->Size();  // Byte size of the original string.
-  if (size < ExternalString::kSize) {
-    // The string is too small to fit an external String in its place. This can
-    // only happen for zero length strings.
+  if (size < ExternalString::kShortSize) {
     return false;
   }
-  ASSERT(size >= ExternalString::kSize);
   bool is_ascii = this->IsAsciiRepresentation();
   bool is_symbol = this->IsSymbol();
-  int length = this->length();
-  int hash_field = this->hash_field();
 
   // Morph the object to an external string by adjusting the map and
   // reinitializing the fields.
-  this->set_map(is_ascii ?
-                heap->external_string_with_ascii_data_map() :
-                heap->external_string_map());
-  ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
-  self->set_length(length);
-  self->set_hash_field(hash_field);
-  self->set_resource(resource);
-  // Additionally make the object into an external symbol if the original string
-  // was a symbol to start with.
-  if (is_symbol) {
-    self->Hash();  // Force regeneration of the hash value.
-    // Now morph this external string into a external symbol.
-    this->set_map(is_ascii ?
-                  heap->external_symbol_with_ascii_data_map() :
-                  heap->external_symbol_map());
+  if (size >= ExternalString::kSize) {
+    this->set_map_no_write_barrier(
+        is_symbol
+            ? (is_ascii ?  heap->external_symbol_with_ascii_data_map()
+                        :  heap->external_symbol_map())
+            : (is_ascii ?  heap->external_string_with_ascii_data_map()
+                        :  heap->external_string_map()));
+  } else {
+    this->set_map_no_write_barrier(
+        is_symbol
+            ? (is_ascii ?  heap->short_external_symbol_with_ascii_data_map()
+                        :  heap->short_external_symbol_map())
+            : (is_ascii ?  heap->short_external_string_with_ascii_data_map()
+                        :  heap->short_external_string_map()));
   }
+  ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
+  self->set_resource(resource);
+  if (is_symbol) self->Hash();  // Force regeneration of the hash value.
 
   // Fill the remainder of the string with dead wood.
   int new_size = this->Size();  // Byte size of the external String object.
   heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+    MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+                                               new_size - size);
+  }
   return true;
 }
 
@@ -895,34 +970,33 @@
 #endif  // DEBUG
   Heap* heap = GetHeap();
   int size = this->Size();  // Byte size of the original string.
-  if (size < ExternalString::kSize) {
-    // The string is too small to fit an external String in its place. This can
-    // only happen for zero length strings.
+  if (size < ExternalString::kShortSize) {
     return false;
   }
-  ASSERT(size >= ExternalString::kSize);
   bool is_symbol = this->IsSymbol();
-  int length = this->length();
-  int hash_field = this->hash_field();
 
   // Morph the object to an external string by adjusting the map and
-  // reinitializing the fields.
-  this->set_map(heap->external_ascii_string_map());
-  ExternalAsciiString* self = ExternalAsciiString::cast(this);
-  self->set_length(length);
-  self->set_hash_field(hash_field);
-  self->set_resource(resource);
-  // Additionally make the object into an external symbol if the original string
-  // was a symbol to start with.
-  if (is_symbol) {
-    self->Hash();  // Force regeneration of the hash value.
-    // Now morph this external string into a external symbol.
-    this->set_map(heap->external_ascii_symbol_map());
+  // reinitializing the fields.  Use short version if space is limited.
+  if (size >= ExternalString::kSize) {
+    this->set_map_no_write_barrier(
+        is_symbol ? heap->external_ascii_symbol_map()
+                  : heap->external_ascii_string_map());
+  } else {
+    this->set_map_no_write_barrier(
+        is_symbol ? heap->short_external_ascii_symbol_map()
+                  : heap->short_external_ascii_string_map());
   }
+  ExternalAsciiString* self = ExternalAsciiString::cast(this);
+  self->set_resource(resource);
+  if (is_symbol) self->Hash();  // Force regeneration of the hash value.
 
   // Fill the remainder of the string with dead wood.
   int new_size = this->Size();  // Byte size of the external String object.
   heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+    MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+                                               new_size - size);
+  }
   return true;
 }
 
@@ -994,12 +1068,11 @@
   switch (map()->instance_type()) {
     case JS_ARRAY_TYPE: {
       double length = JSArray::cast(this)->length()->Number();
-      accumulator->Add("<JS array[%u]>", static_cast<uint32_t>(length));
+      accumulator->Add("<JS Array[%u]>", static_cast<uint32_t>(length));
       break;
     }
     case JS_WEAK_MAP_TYPE: {
-      int elements = JSWeakMap::cast(this)->table()->NumberOfElements();
-      accumulator->Add("<JS WeakMap[%d]>", elements);
+      accumulator->Add("<JS WeakMap>");
       break;
     }
     case JS_REGEXP_TYPE: {
@@ -1027,7 +1100,7 @@
     // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
     default: {
       Map* map_of_this = map();
-      Heap* heap = map_of_this->heap();
+      Heap* heap = GetHeap();
       Object* constructor = map_of_this->constructor();
       bool printed = false;
       if (constructor->IsHeapObject() &&
@@ -1049,7 +1122,6 @@
                        global_object ? "Global Object: " : "",
                        vowel ? "n" : "");
                 accumulator->Put(str);
-                accumulator->Put('>');
                 printed = true;
               }
             }
@@ -1070,8 +1142,28 @@
 }
 
 
+void JSObject::PrintElementsTransition(
+    FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
+    ElementsKind to_kind, FixedArrayBase* to_elements) {
+  if (from_kind != to_kind) {
+    PrintF(file, "elements transition [");
+    PrintElementsKind(file, from_kind);
+    PrintF(file, " -> ");
+    PrintElementsKind(file, to_kind);
+    PrintF(file, "] in ");
+    JavaScriptFrame::PrintTop(file, false, true);
+    PrintF(file, " for ");
+    ShortPrint(file);
+    PrintF(file, " from ");
+    from_elements->ShortPrint(file);
+    PrintF(file, " to ");
+    to_elements->ShortPrint(file);
+    PrintF(file, "\n");
+  }
+}
+
+
 void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
-  // if (!HEAP->InNewSpace(this)) PrintF("*", this);
   Heap* heap = GetHeap();
   if (!heap->Contains(this)) {
     accumulator->Add("!!!INVALID POINTER!!!");
@@ -1094,14 +1186,21 @@
   }
   switch (map()->instance_type()) {
     case MAP_TYPE:
-      accumulator->Add("<Map>");
+      accumulator->Add("<Map(elements=%u)>", Map::cast(this)->elements_kind());
       break;
     case FIXED_ARRAY_TYPE:
       accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length());
       break;
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      accumulator->Add("<FixedDoubleArray[%u]>",
+                       FixedDoubleArray::cast(this)->length());
+      break;
     case BYTE_ARRAY_TYPE:
       accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
       break;
+    case FREE_SPACE_TYPE:
+      accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size());
+      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       accumulator->Add("<ExternalPixelArray[%u]>",
                        ExternalPixelArray::cast(this)->length());
@@ -1240,7 +1339,10 @@
     case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_VALUE_TYPE:
+    case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
+    case JS_SET_TYPE:
+    case JS_MAP_TYPE:
     case JS_WEAK_MAP_TYPE:
     case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
@@ -1277,6 +1379,7 @@
     case HEAP_NUMBER_TYPE:
     case FILLER_TYPE:
     case BYTE_ARRAY_TYPE:
+    case FREE_SPACE_TYPE:
     case EXTERNAL_PIXEL_ARRAY_TYPE:
     case EXTERNAL_BYTE_ARRAY_TYPE:
     case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -1508,8 +1611,6 @@
     String* name,
     JSFunction* function,
     PropertyAttributes attributes) {
-  ASSERT(!GetHeap()->InNewSpace(function));
-
   // Allocate new instance descriptors with (name, function) added
   ConstantFunctionDescriptor d(name, function, attributes);
   Object* new_descriptors;
@@ -1533,7 +1634,7 @@
 
   // If the old map is the global object map (from new Object()),
   // then transitions are not added to it, so we are done.
-  Heap* heap = old_map->heap();
+  Heap* heap = GetHeap();
   if (old_map == heap->isolate()->context()->global_context()->
       object_function()->map()) {
     return function;
@@ -1609,10 +1710,10 @@
                                    StrictModeFlag strict_mode) {
   ASSERT(!IsJSGlobalProxy());
   Map* map_of_this = map();
-  Heap* heap = map_of_this->heap();
+  Heap* heap = GetHeap();
   if (!map_of_this->is_extensible()) {
     if (strict_mode == kNonStrictMode) {
-      return heap->undefined_value();
+      return value;
     } else {
       Handle<Object> args[1] = {Handle<String>(name)};
       return heap->isolate()->Throw(
@@ -1624,7 +1725,7 @@
     // Ensure the descriptor array does not get too big.
     if (map_of_this->instance_descriptors()->number_of_descriptors() <
         DescriptorArray::kMaxNumberOfDescriptors) {
-      if (value->IsJSFunction() && !heap->InNewSpace(value)) {
+      if (value->IsJSFunction()) {
         return AddConstantFunctionProperty(name,
                                            JSFunction::cast(value),
                                            attributes);
@@ -1651,13 +1752,21 @@
     PropertyAttributes attributes,
     StrictModeFlag strict_mode) {
   // Check local property, ignore interceptor.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsFound()) {
     // An existing property, a map transition or a null descriptor was
     // found.  Use set property to handle all these cases.
     return SetProperty(&result, name, value, attributes, strict_mode);
   }
+  bool found = false;
+  MaybeObject* result_object;
+  result_object = SetPropertyWithCallbackSetterInPrototypes(name,
+                                                            value,
+                                                            attributes,
+                                                            &found,
+                                                            strict_mode);
+  if (found) return result_object;
   // Add a new real property.
   return AddProperty(name, value, attributes, strict_mode);
 }
@@ -1671,7 +1780,7 @@
   int new_enumeration_index = 0;  // 0 means "Use the next available index."
   if (old_index != -1) {
     // All calls to ReplaceSlowProperty have had all transitions removed.
-    ASSERT(!dictionary->DetailsAt(old_index).IsTransition());
+    ASSERT(!dictionary->ContainsTransition(old_index));
     new_enumeration_index = dictionary->DetailsAt(old_index).index();
   }
 
@@ -1696,7 +1805,7 @@
     return result;
   }
   // Do not add transitions to the map of "new Object()".
-  if (map() == old_map->heap()->isolate()->context()->global_context()->
+  if (map() == GetIsolate()->context()->global_context()->
       object_function()->map()) {
     return result;
   }
@@ -1821,11 +1930,22 @@
 }
 
 
+Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
+                                       Handle<String> key,
+                                       Handle<Object> value,
+                                       PropertyAttributes attributes,
+                                       StrictModeFlag strict_mode) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->SetProperty(*key, *value, attributes, strict_mode),
+                     Object);
+}
+
+
 MaybeObject* JSReceiver::SetProperty(String* name,
                                      Object* value,
                                      PropertyAttributes attributes,
                                      StrictModeFlag strict_mode) {
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookup(name, &result);
   return SetProperty(&result, name, value, attributes, strict_mode);
 }
@@ -1850,7 +1970,7 @@
   if (structure->IsForeign()) {
     AccessorDescriptor* callback =
         reinterpret_cast<AccessorDescriptor*>(
-            Foreign::cast(structure)->address());
+            Foreign::cast(structure)->foreign_address());
     MaybeObject* obj = (callback->setter)(this,  value, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (obj->IsFailure()) return obj;
@@ -1878,10 +1998,11 @@
     return *value_handle;
   }
 
-  if (structure->IsFixedArray()) {
-    Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
-    if (setter->IsJSFunction()) {
-     return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+  if (structure->IsAccessorPair()) {
+    Object* setter = AccessorPair::cast(structure)->setter();
+    if (setter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+     return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
     } else {
       if (strict_mode == kNonStrictMode) {
         return value;
@@ -1900,22 +2021,24 @@
 }
 
 
-MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
-                                                    Object* value) {
+MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
+                                                      Object* value) {
   Isolate* isolate = GetIsolate();
   Handle<Object> value_handle(value, isolate);
-  Handle<JSFunction> fun(JSFunction::cast(setter), isolate);
-  Handle<JSObject> self(this, isolate);
+  Handle<JSReceiver> fun(setter, isolate);
+  Handle<JSReceiver> self(this, isolate);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debug* debug = isolate->debug();
   // Handle stepping into a setter if step into is active.
-  if (debug->StepInActive()) {
-    debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
+  // TODO(rossberg): should this apply to getters that are function proxies?
+  if (debug->StepInActive() && fun->IsJSFunction()) {
+    debug->HandleStepIn(
+        Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
   }
 #endif
   bool has_pending_exception;
-  Object** argv[] = { value_handle.location() };
-  Execution::Call(fun, self, 1, argv, &has_pending_exception);
+  Handle<Object> argv[] = { value_handle };
+  Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
   // Check for pending exception and return the result.
   if (has_pending_exception) return Failure::Exception();
   return *value_handle;
@@ -1928,6 +2051,9 @@
   for (Object* pt = GetPrototype();
        pt != heap->null_value();
        pt = pt->GetPrototype()) {
+    if (pt->IsJSProxy()) {
+      return result->HandlerResult(JSProxy::cast(pt));
+    }
     JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
     if (result->IsProperty()) {
       if (result->type() == CALLBACKS && !result->IsReadOnly()) return;
@@ -1948,6 +2074,16 @@
   for (Object* pt = GetPrototype();
        pt != heap->null_value();
        pt = pt->GetPrototype()) {
+    if (pt->IsJSProxy()) {
+      String* name;
+      MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+      if (!maybe->To<String>(&name)) {
+        *found = true;  // Force abort
+        return maybe;
+      }
+      return JSProxy::cast(pt)->SetPropertyWithHandlerIfDefiningSetter(
+          name, value, NONE, strict_mode, found);
+    }
     if (!JSObject::cast(pt)->HasDictionaryElements()) {
       continue;
     }
@@ -1970,6 +2106,48 @@
   return heap->the_hole_value();
 }
 
+MaybeObject* JSObject::SetPropertyWithCallbackSetterInPrototypes(
+    String* name,
+    Object* value,
+    PropertyAttributes attributes,
+    bool* found,
+    StrictModeFlag strict_mode) {
+  Heap* heap = GetHeap();
+  // We could not find a local property so let's check whether there is an
+  // accessor that wants to handle the property.
+  LookupResult accessor_result(heap->isolate());
+  LookupCallbackSetterInPrototypes(name, &accessor_result);
+  if (accessor_result.IsFound()) {
+    *found = true;
+    if (accessor_result.type() == CALLBACKS) {
+      return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
+                                     name,
+                                     value,
+                                     accessor_result.holder(),
+                                     strict_mode);
+    } else if (accessor_result.type() == HANDLER) {
+      // There is a proxy in the prototype chain. Invoke its
+      // getPropertyDescriptor trap.
+      bool found = false;
+      // SetPropertyWithHandlerIfDefiningSetter can cause GC,
+      // make sure to use the handlified references after calling
+      // the function.
+      Handle<JSObject> self(this);
+      Handle<String> hname(name);
+      Handle<Object> hvalue(value);
+      MaybeObject* result =
+          accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
+              name, value, attributes, strict_mode, &found);
+      if (found) return result;
+      // The proxy does not define the property as an accessor.
+      // Consequently, it has no effect on setting the receiver.
+      return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
+    }
+  }
+  *found = false;
+  return heap->the_hole_value();
+}
+
 
 void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
   DescriptorArray* descriptors = map()->instance_descriptors();
@@ -1986,7 +2164,8 @@
                               String* name,
                               LookupResult* result) {
   DescriptorArray* descriptors = instance_descriptors();
-  DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
+  DescriptorLookupCache* cache =
+      GetHeap()->isolate()->descriptor_lookup_cache();
   int number = cache->Lookup(descriptors, name);
   if (number == DescriptorLookupCache::kAbsent) {
     number = descriptors->Search(name);
@@ -2000,75 +2179,295 @@
 }
 
 
-MaybeObject* Map::GetElementsTransitionMap(ElementsKind elements_kind,
-                                           bool safe_to_add_transition) {
-  Heap* current_heap = heap();
-  DescriptorArray* descriptors = instance_descriptors();
-  String* elements_transition_sentinel_name = current_heap->empty_symbol();
+static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
+  ASSERT(!map.is_null());
+  for (int i = 0; i < maps->length(); ++i) {
+    if (!maps->at(i).is_null() && maps->at(i).is_identical_to(map)) return true;
+  }
+  return false;
+}
 
-  if (safe_to_add_transition) {
-    // It's only safe to manipulate the descriptor array if it would be
-    // safe to add a transition.
 
-    ASSERT(!is_shared());  // no transitions can be added to shared maps.
-    // Check if the elements transition already exists.
-    DescriptorLookupCache* cache =
-        current_heap->isolate()->descriptor_lookup_cache();
-    int index = cache->Lookup(descriptors, elements_transition_sentinel_name);
-    if (index == DescriptorLookupCache::kAbsent) {
-      index = descriptors->Search(elements_transition_sentinel_name);
-      cache->Update(descriptors,
-                    elements_transition_sentinel_name,
-                    index);
+template <class T>
+static Handle<T> MaybeNull(T* p) {
+  if (p == NULL) return Handle<T>::null();
+  return Handle<T>(p);
+}
+
+
+Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
+  ElementsKind elms_kind = elements_kind();
+  if (elms_kind == FAST_DOUBLE_ELEMENTS) {
+    bool dummy = true;
+    Handle<Map> fast_map =
+        MaybeNull(LookupElementsTransitionMap(FAST_ELEMENTS, &dummy));
+    if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
+      return fast_map;
     }
+    return Handle<Map>::null();
+  }
+  if (elms_kind == FAST_SMI_ONLY_ELEMENTS) {
+    bool dummy = true;
+    Handle<Map> double_map =
+        MaybeNull(LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, &dummy));
+    // In the current implementation, if the DOUBLE map doesn't exist, the
+    // FAST map can't exist either.
+    if (double_map.is_null()) return Handle<Map>::null();
+    Handle<Map> fast_map =
+        MaybeNull(double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
+                                                          &dummy));
+    if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
+      return fast_map;
+    }
+    if (ContainsMap(candidates, double_map)) return double_map;
+  }
+  return Handle<Map>::null();
+}
 
-    // If the transition already exists, check the type. If there is a match,
-    // return it.
-    if (index != DescriptorArray::kNotFound) {
-      PropertyDetails details(PropertyDetails(descriptors->GetDetails(index)));
-      if (details.type() == ELEMENTS_TRANSITION &&
-          details.elements_kind() == elements_kind) {
-        return descriptors->GetValue(index);
-      } else {
-        safe_to_add_transition = false;
+static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents,
+                                                   ElementsKind elements_kind) {
+  if (descriptor_contents->IsMap()) {
+    Map* map = Map::cast(descriptor_contents);
+    if (map->elements_kind() == elements_kind) {
+      return map;
+    }
+    return NULL;
+  }
+
+  FixedArray* map_array = FixedArray::cast(descriptor_contents);
+  for (int i = 0; i < map_array->length(); ++i) {
+    Object* current = map_array->get(i);
+    // Skip undefined slots, they are sentinels for reclaimed maps.
+    if (!current->IsUndefined()) {
+      Map* current_map = Map::cast(map_array->get(i));
+      if (current_map->elements_kind() == elements_kind) {
+        return current_map;
       }
     }
   }
 
+  return NULL;
+}
+
+
+static MaybeObject* AddElementsTransitionMapToDescriptor(
+    Object* descriptor_contents,
+    Map* new_map) {
+  // Nothing was in the descriptor for an ELEMENTS_TRANSITION,
+  // simply add the map.
+  if (descriptor_contents == NULL) {
+    return new_map;
+  }
+
+  // There was already a map in the descriptor, create a 2-element FixedArray
+  // to contain the existing map plus the new one.
+  FixedArray* new_array;
+  Heap* heap = new_map->GetHeap();
+  if (descriptor_contents->IsMap()) {
+    // Must tenure, DescriptorArray expects no new-space objects.
+    MaybeObject* maybe_new_array = heap->AllocateFixedArray(2, TENURED);
+    if (!maybe_new_array->To<FixedArray>(&new_array)) {
+      return maybe_new_array;
+    }
+    new_array->set(0, descriptor_contents);
+    new_array->set(1, new_map);
+    return new_array;
+  }
+
+  // The descriptor already contained a list of maps for different ElementKinds
+  // of ELEMENTS_TRANSITION, first check the existing array for an undefined
+  // slot, and if that's not available, create a FixedArray to hold the existing
+  // maps plus the new one and fill it in.
+  FixedArray* array = FixedArray::cast(descriptor_contents);
+  for (int i = 0; i < array->length(); ++i) {
+    if (array->get(i)->IsUndefined()) {
+      array->set(i, new_map);
+      return array;
+    }
+  }
+
+  // Must tenure, DescriptorArray expects no new-space objects.
+  MaybeObject* maybe_new_array =
+      heap->AllocateFixedArray(array->length() + 1, TENURED);
+  if (!maybe_new_array->To<FixedArray>(&new_array)) {
+    return maybe_new_array;
+  }
+  int i = 0;
+  while (i < array->length()) {
+    new_array->set(i, array->get(i));
+    ++i;
+  }
+  new_array->set(i, new_map);
+  return new_array;
+}
+
+
+String* Map::elements_transition_sentinel_name() {
+  return GetHeap()->empty_symbol();
+}
+
+
+Object* Map::GetDescriptorContents(String* sentinel_name,
+                                   bool* safe_to_add_transition) {
+  // Get the cached index for the descriptors lookup, or find and cache it.
+  DescriptorArray* descriptors = instance_descriptors();
+  DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
+  int index = cache->Lookup(descriptors, sentinel_name);
+  if (index == DescriptorLookupCache::kAbsent) {
+    index = descriptors->Search(sentinel_name);
+    cache->Update(descriptors, sentinel_name, index);
+  }
+  // If the transition already exists, return its descriptor.
+  if (index != DescriptorArray::kNotFound) {
+    PropertyDetails details(descriptors->GetDetails(index));
+    if (details.type() == ELEMENTS_TRANSITION) {
+      return descriptors->GetValue(index);
+    } else {
+      if (safe_to_add_transition != NULL) {
+        *safe_to_add_transition = false;
+      }
+    }
+  }
+  return NULL;
+}
+
+
+Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind,
+                                      bool* safe_to_add_transition) {
+  // Special case: indirect SMI->FAST transition (cf. comment in
+  // AddElementsTransition()).
+  if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      elements_kind == FAST_ELEMENTS) {
+    Map* double_map = this->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS,
+                                                        safe_to_add_transition);
+    if (double_map == NULL) return double_map;
+    return double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
+                                                   safe_to_add_transition);
+  }
+  Object* descriptor_contents = GetDescriptorContents(
+      elements_transition_sentinel_name(), safe_to_add_transition);
+  if (descriptor_contents != NULL) {
+    Map* maybe_transition_map =
+        GetElementsTransitionMapFromDescriptor(descriptor_contents,
+                                               elements_kind);
+    ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap());
+    return maybe_transition_map;
+  }
+  return NULL;
+}
+
+
+MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind,
+                                        Map* transitioned_map) {
+  // The map transition graph should be a tree, therefore the transition
+  // from SMI to FAST elements is not done directly, but by going through
+  // DOUBLE elements first.
+  if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      elements_kind == FAST_ELEMENTS) {
+    bool safe_to_add = true;
+    Map* double_map = this->LookupElementsTransitionMap(
+        FAST_DOUBLE_ELEMENTS, &safe_to_add);
+    // This method is only called when safe_to_add_transition has been found
+    // to be true earlier.
+    ASSERT(safe_to_add);
+
+    if (double_map == NULL) {
+      MaybeObject* maybe_map = this->CopyDropTransitions();
+      if (!maybe_map->To(&double_map)) return maybe_map;
+      double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
+      MaybeObject* maybe_double_transition = this->AddElementsTransition(
+          FAST_DOUBLE_ELEMENTS, double_map);
+      if (maybe_double_transition->IsFailure()) return maybe_double_transition;
+    }
+    return double_map->AddElementsTransition(FAST_ELEMENTS, transitioned_map);
+  }
+
+  bool safe_to_add_transition = true;
+  Object* descriptor_contents = GetDescriptorContents(
+      elements_transition_sentinel_name(), &safe_to_add_transition);
+  // This method is only called when safe_to_add_transition has been found
+  // to be true earlier.
+  ASSERT(safe_to_add_transition);
+  MaybeObject* maybe_new_contents =
+      AddElementsTransitionMapToDescriptor(descriptor_contents,
+                                           transitioned_map);
+  Object* new_contents;
+  if (!maybe_new_contents->ToObject(&new_contents)) {
+    return maybe_new_contents;
+  }
+
+  ElementsTransitionDescriptor desc(elements_transition_sentinel_name(),
+                                    new_contents);
+  Object* new_descriptors;
+  MaybeObject* maybe_new_descriptors =
+      instance_descriptors()->CopyInsert(&desc, KEEP_TRANSITIONS);
+  if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
+    return maybe_new_descriptors;
+  }
+  set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+  return this;
+}
+
+
+Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
+                                               ElementsKind to_kind) {
+  Isolate* isolate = object->GetIsolate();
+  CALL_HEAP_FUNCTION(isolate,
+                     object->GetElementsTransitionMap(isolate, to_kind),
+                     Map);
+}
+
+
+MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
+  Map* current_map = map();
+  ElementsKind from_kind = current_map->elements_kind();
+
+  if (from_kind == to_kind) return current_map;
+
+  // Only objects with FastProperties can have DescriptorArrays and can track
+  // element-related maps. Also don't add descriptors to maps that are shared.
+  bool safe_to_add_transition = HasFastProperties() &&
+      !current_map->IsUndefined() &&
+      !current_map->is_shared();
+
+  // Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps caused by objects
+  // with elements that switch back and forth between dictionary and fast
+  // element mode.
+  if (from_kind == DICTIONARY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    safe_to_add_transition = false;
+  }
+
+  if (safe_to_add_transition) {
+    // It's only safe to manipulate the descriptor array if it would be
+    // safe to add a transition.
+    Map* maybe_transition_map = current_map->LookupElementsTransitionMap(
+        to_kind, &safe_to_add_transition);
+    if (maybe_transition_map != NULL) {
+      return maybe_transition_map;
+    }
+  }
+
+  Map* new_map = NULL;
+
   // No transition to an existing map for the given ElementsKind. Make a new
   // one.
-  Object* obj;
-  { MaybeObject* maybe_map = CopyDropTransitions();
-    if (!maybe_map->ToObject(&obj)) return maybe_map;
+  { MaybeObject* maybe_map = current_map->CopyDropTransitions();
+    if (!maybe_map->To(&new_map)) return maybe_map;
   }
-  Map* new_map = Map::cast(obj);
 
-  new_map->set_elements_kind(elements_kind);
-  GetIsolate()->counters()->map_to_external_array_elements()->Increment();
+  new_map->set_elements_kind(to_kind);
 
   // Only remember the map transition if the object's map is NOT equal to the
   // global object_function's map and there is not an already existing
   // non-matching element transition.
-  bool allow_map_transition =
-      safe_to_add_transition &&
-      (GetIsolate()->context()->global_context()->object_function()->map() !=
-       map());
+  Context* global_context = GetIsolate()->context()->global_context();
+  bool allow_map_transition = safe_to_add_transition &&
+      (global_context->object_function()->map() != map());
   if (allow_map_transition) {
-    // Allocate new instance descriptors for the old map with map transition.
-    ElementsTransitionDescriptor desc(elements_transition_sentinel_name,
-                                      Map::cast(new_map),
-                                      elements_kind);
-    Object* new_descriptors;
-    MaybeObject* maybe_new_descriptors = descriptors->CopyInsert(
-        &desc,
-        KEEP_TRANSITIONS);
-    if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
-      return maybe_new_descriptors;
-    }
-    descriptors = DescriptorArray::cast(new_descriptors);
-    set_instance_descriptors(descriptors);
+    MaybeObject* maybe_transition =
+        current_map->AddElementsTransition(to_kind, new_map);
+    if (maybe_transition->IsFailure()) return maybe_transition;
   }
-
   return new_map;
 }
 
@@ -2079,6 +2478,7 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return result->NotFound();
     ASSERT(proto->IsJSGlobalObject());
+    // A GlobalProxy's prototype should always be a proper JSObject.
     return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
   }
 
@@ -2173,7 +2573,7 @@
         case INTERCEPTOR: {
           // Try lookup real named properties. Note that only property can be
           // set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
-          LookupResult r;
+          LookupResult r(GetIsolate());
           LookupRealNamedProperty(name, &r);
           if (r.IsProperty()) {
             return SetPropertyWithFailedAccessCheck(&r,
@@ -2191,10 +2591,10 @@
     }
   }
 
-  Heap* heap = GetHeap();
-  HandleScope scope(heap->isolate());
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
   Handle<Object> value_handle(value);
-  heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+  isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
   return *value_handle;
 }
 
@@ -2205,7 +2605,7 @@
                                      PropertyAttributes attributes,
                                      StrictModeFlag strict_mode) {
   if (result->IsFound() && result->type() == HANDLER) {
-    return JSProxy::cast(this)->SetPropertyWithHandler(
+    return result->proxy()->SetPropertyWithHandler(
         key, value, attributes, strict_mode);
   } else {
     return JSObject::cast(this)->SetPropertyForResult(
@@ -2219,22 +2619,11 @@
   HandleScope scope(isolate);
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("has");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { name };
+  Handle<Object> result = CallTrap(
+    "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    trap = isolate->derived_has_trap();
-  }
-
-  // Call trap function.
-  Object** args[] = { name.location() };
-  bool has_exception;
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
 
   return result->ToBoolean()->IsTrue();
 }
@@ -2250,24 +2639,85 @@
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
   Handle<Object> value(value_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("set");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { receiver, name, value };
+  CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    trap = isolate->derived_set_trap();
+
+  return *value;
+}
+
+
+MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandlerIfDefiningSetter(
+    String* name_raw,
+    Object* value_raw,
+    PropertyAttributes attributes,
+    StrictModeFlag strict_mode,
+    bool* found) {
+  *found = true;  // except where defined otherwise...
+  Isolate* isolate = GetHeap()->isolate();
+  Handle<JSProxy> proxy(this);
+  Handle<Object> handler(this->handler());  // Trap might morph proxy.
+  Handle<String> name(name_raw);
+  Handle<Object> value(value_raw);
+  Handle<Object> args[] = { name };
+  Handle<Object> result = proxy->CallTrap(
+      "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
+  if (isolate->has_pending_exception()) return Failure::Exception();
+
+  if (!result->IsUndefined()) {
+    // The proxy handler cares about this property.
+    // Check whether it is virtualized as an accessor.
+    // Emulate [[GetProperty]] semantics for proxies.
+    bool has_pending_exception;
+    Handle<Object> argv[] = { result };
+    Handle<Object> desc =
+        Execution::Call(isolate->to_complete_property_descriptor(), result,
+                        ARRAY_SIZE(argv), argv, &has_pending_exception);
+    if (has_pending_exception) return Failure::Exception();
+
+    Handle<String> conf_name =
+        isolate->factory()->LookupAsciiSymbol("configurable_");
+    Handle<Object> configurable(v8::internal::GetProperty(desc, conf_name));
+    ASSERT(!isolate->has_pending_exception());
+    if (configurable->IsFalse()) {
+      Handle<String> trap =
+          isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+      Handle<Object> args[] = { handler, trap, name };
+      Handle<Object> error = isolate->factory()->NewTypeError(
+          "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
+      return isolate->Throw(*error);
+    }
+    ASSERT(configurable->IsTrue());
+
+    // Check for AccessorDescriptor.
+    Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
+    Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
+    ASSERT(!isolate->has_pending_exception());
+    if (!setter->IsUndefined()) {
+      // We have a setter -- invoke it.
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return proxy->SetPropertyWithDefinedSetter(
+          JSReceiver::cast(*setter), *value);
+    } else {
+      Handle<String> get_name = isolate->factory()->LookupAsciiSymbol("get_");
+      Handle<Object> getter(v8::internal::GetProperty(desc, get_name));
+      ASSERT(!isolate->has_pending_exception());
+      if (!getter->IsUndefined()) {
+        // We have a getter but no setter -- the property may not be
+        // written. In strict mode, throw an error.
+        if (strict_mode == kNonStrictMode) return *value;
+        Handle<Object> args[] = { name, proxy };
+        Handle<Object> error = isolate->factory()->NewTypeError(
+            "no_setter_in_callback", HandleVector(args, ARRAY_SIZE(args)));
+        return isolate->Throw(*error);
+      }
+    }
+    // Fall-through.
   }
 
-  // Call trap function.
-  Object** args[] = {
-      receiver.location(), name.location(), value.location()
-  };
-  bool has_exception;
-  Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
-
+  // The proxy does not define the property as an accessor.
+  *found = false;
   return *value;
 }
 
@@ -2278,31 +2728,16 @@
   HandleScope scope(isolate);
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { name };
+  Handle<Object> result = CallTrap(
+    "delete", Handle<Object>(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    Handle<Object> args[] = { handler, trap_name };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
-    isolate->Throw(*error);
-    return Failure::Exception();
-  }
-
-  // Call trap function.
-  Object** args[] = { name.location() };
-  bool has_exception;
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
 
   Object* bool_result = result->ToBoolean();
-  if (mode == STRICT_DELETION &&
-      bool_result == isolate->heap()->false_value()) {
-    Handle<Object> args[] = { handler, trap_name };
+  if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
+    Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+    Handle<Object> args[] = { Handle<Object>(handler()), trap_name };
     Handle<Object> error = isolate->factory()->NewTypeError(
         "handler_failed", HandleVector(args, ARRAY_SIZE(args)));
     isolate->Throw(*error);
@@ -2312,39 +2747,76 @@
 }
 
 
-MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
-    JSReceiver* receiver_raw,
-    String* name_raw,
-    bool* has_exception) {
+MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler(
+    uint32_t index,
+    DeleteMode mode) {
   Isolate* isolate = GetIsolate();
   HandleScope scope(isolate);
+  Handle<String> name = isolate->factory()->Uint32ToString(index);
+  return JSProxy::DeletePropertyWithHandler(*name, mode);
+}
+
+
+MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
+    JSReceiver* receiver_raw,
+    String* name_raw) {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<JSProxy> proxy(this);
+  Handle<Object> handler(this->handler());  // Trap might morph proxy.
   Handle<JSReceiver> receiver(receiver_raw);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name =
-      isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { name };
+  Handle<Object> result = CallTrap(
+    "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return NONE;
-  if (trap->IsUndefined()) {
-    Handle<Object> args[] = { handler, trap_name };
+
+  if (result->IsUndefined()) return ABSENT;
+
+  bool has_pending_exception;
+  Handle<Object> argv[] = { result };
+  Handle<Object> desc =
+      Execution::Call(isolate->to_complete_property_descriptor(), result,
+                      ARRAY_SIZE(argv), argv, &has_pending_exception);
+  if (has_pending_exception) return NONE;
+
+  // Convert result to PropertyAttributes.
+  Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable");
+  Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n));
+  if (isolate->has_pending_exception()) return NONE;
+  Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable");
+  Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n));
+  if (isolate->has_pending_exception()) return NONE;
+  Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable");
+  Handle<Object> writable(v8::internal::GetProperty(desc, writ_n));
+  if (isolate->has_pending_exception()) return NONE;
+
+  if (configurable->IsFalse()) {
+    Handle<String> trap =
+        isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+    Handle<Object> args[] = { handler, trap, name };
     Handle<Object> error = isolate->factory()->NewTypeError(
-        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+        "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
     isolate->Throw(*error);
-    *has_exception = true;
     return NONE;
   }
 
-  // Call trap function.
-  Object** args[] = { name.location() };
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, has_exception);
-  if (has_exception) return NONE;
+  int attributes = NONE;
+  if (enumerable->ToBoolean()->IsFalse()) attributes |= DONT_ENUM;
+  if (configurable->ToBoolean()->IsFalse()) attributes |= DONT_DELETE;
+  if (writable->ToBoolean()->IsFalse()) attributes |= READ_ONLY;
+  return static_cast<PropertyAttributes>(attributes);
+}
 
-  // TODO(rossberg): convert result to PropertyAttributes
-  USE(result);
-  return NONE;
+
+MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
+    JSReceiver* receiver,
+    uint32_t index) {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<String> name = isolate->factory()->Uint32ToString(index);
+  return GetPropertyAttributeWithHandler(receiver, *name);
 }
 
 
@@ -2353,6 +2825,9 @@
   HandleScope scope(isolate);
   Handle<JSProxy> self(this);
 
+  // Save identity hash.
+  MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION);
+
   if (IsJSFunctionProxy()) {
     isolate->factory()->BecomeJSFunction(self);
     // Code will be set on the JavaScript side.
@@ -2360,9 +2835,42 @@
     isolate->factory()->BecomeJSObject(self);
   }
   ASSERT(self->IsJSObject());
+
+  // Inherit identity, if it was present.
+  Object* hash;
+  if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
+    Handle<JSObject> new_self(JSObject::cast(*self));
+    isolate->factory()->SetIdentityHash(new_self, hash);
+  }
 }
 
 
+MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
+                                                 Handle<Object> derived,
+                                                 int argc,
+                                                 Handle<Object> argv[]) {
+  Isolate* isolate = GetIsolate();
+  Handle<Object> handler(this->handler());
+
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name);
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  if (isolate->has_pending_exception()) return trap;
+
+  if (trap->IsUndefined()) {
+    if (derived.is_null()) {
+      Handle<Object> args[] = { handler, trap_name };
+      Handle<Object> error = isolate->factory()->NewTypeError(
+        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+      isolate->Throw(*error);
+      return Handle<Object>();
+    }
+    trap = Handle<Object>(derived);
+  }
+
+  bool threw;
+  return Execution::Call(trap, handler, argc, argv, &threw);
+}
+
 
 MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
                                             String* name,
@@ -2387,48 +2895,46 @@
   }
 
   // Check access rights if needed.
-  if (IsAccessCheckNeeded()
-      && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
-    return SetPropertyWithFailedAccessCheck(result,
-                                            name,
-                                            value,
-                                            true,
-                                            strict_mode);
+  if (IsAccessCheckNeeded()) {
+    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+      return SetPropertyWithFailedAccessCheck(
+          result, name, value, true, strict_mode);
+    }
   }
 
   if (IsJSGlobalProxy()) {
     Object* proto = GetPrototype();
     if (proto->IsNull()) return value;
     ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->SetProperty(
+    return JSObject::cast(proto)->SetPropertyForResult(
         result, name, value, attributes, strict_mode);
   }
 
   if (!result->IsProperty() && !IsJSContextExtensionObject()) {
-    // We could not find a local property so let's check whether there is an
-    // accessor that wants to handle the property.
-    LookupResult accessor_result;
-    LookupCallbackSetterInPrototypes(name, &accessor_result);
-    if (accessor_result.IsProperty()) {
-      return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
-                                     name,
-                                     value,
-                                     accessor_result.holder(),
-                                     strict_mode);
-    }
+    bool found = false;
+    MaybeObject* result_object;
+    result_object = SetPropertyWithCallbackSetterInPrototypes(name,
+                                                              value,
+                                                              attributes,
+                                                              &found,
+                                                              strict_mode);
+    if (found) return result_object;
   }
+
+  // At this point, no GC should have happened, as this would invalidate
+  // 'result', which we cannot handlify!
+
   if (!result->IsFound()) {
     // Neither properties nor transitions found.
     return AddProperty(name, value, attributes, strict_mode);
   }
   if (result->IsReadOnly() && result->IsProperty()) {
     if (strict_mode == kStrictMode) {
-      HandleScope scope(heap->isolate());
-      Handle<String> key(name);
-      Handle<Object> holder(this);
-      Handle<Object> args[2] = { key, holder };
+      Handle<JSObject> self(this);
+      Handle<String> hname(name);
+      Handle<Object> args[] = { hname, self };
       return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
-          "strict_read_only_property", HandleVector(args, 2)));
+          "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
     } else {
       return value;
     }
@@ -2472,7 +2978,6 @@
       ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
       JSFunction* function =
           JSFunction::cast(target_descriptors->GetValue(number));
-      ASSERT(!HEAP->InNewSpace(function));
       if (value == function) {
         set_map(target_map);
         return value;
@@ -2484,10 +2989,11 @@
     case NULL_DESCRIPTOR:
     case ELEMENTS_TRANSITION:
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
-    default:
+    case HANDLER:
       UNREACHABLE();
+      return value;
   }
-  UNREACHABLE();
+  UNREACHABLE();  // keep the compiler happy
   return value;
 }
 
@@ -2501,6 +3007,18 @@
 // Note that this method cannot be used to set the prototype of a function
 // because ConvertDescriptorToField() which is called in "case CALLBACKS:"
 // doesn't handle function prototypes correctly.
+Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
+    Handle<JSObject> object,
+    Handle<String> key,
+    Handle<Object> value,
+    PropertyAttributes attributes) {
+  CALL_HEAP_FUNCTION(
+    object->GetIsolate(),
+    object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
+    Object);
+}
+
+
 MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
     String* name,
     Object* value,
@@ -2509,12 +3027,12 @@
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
   AssertNoContextChange ncc;
-  LookupResult result;
+  Isolate* isolate = GetIsolate();
+  LookupResult result(isolate);
   LocalLookup(name, &result);
   // Check access rights if needed.
   if (IsAccessCheckNeeded()) {
-    Heap* heap = GetHeap();
-    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    if (!isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
       return SetPropertyWithFailedAccessCheck(&result,
                                               name,
                                               value,
@@ -2572,10 +3090,11 @@
     case NULL_DESCRIPTOR:
     case ELEMENTS_TRANSITION:
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
-    default:
+    case HANDLER:
       UNREACHABLE();
+      return value;
   }
-  UNREACHABLE();
+  UNREACHABLE();  // keep the compiler happy
   return value;
 }
 
@@ -2585,7 +3104,7 @@
       String* name,
       bool continue_search) {
   // Check local property, ignore interceptor.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) return result.GetAttributes();
 
@@ -2657,12 +3176,11 @@
       String* key) {
   uint32_t index = 0;
   if (IsJSObject() && key->AsArrayIndex(&index)) {
-    if (JSObject::cast(this)->HasElementWithReceiver(receiver, index))
-      return NONE;
-    return ABSENT;
+    return JSObject::cast(this)->HasElementWithReceiver(receiver, index)
+        ? NONE : ABSENT;
   }
   // Named property.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   Lookup(key, &result);
   return GetPropertyAttribute(receiver, &result, key, true);
 }
@@ -2689,10 +3207,8 @@
       case CALLBACKS:
         return result->GetAttributes();
       case HANDLER: {
-        // TODO(rossberg): propagate exceptions properly.
-        bool has_exception = false;
-        return JSProxy::cast(this)->GetPropertyAttributeWithHandler(
-            receiver, name, &has_exception);
+        return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler(
+            receiver, name);
       }
       case INTERCEPTOR:
         return result->holder()->GetPropertyAttributeWithInterceptor(
@@ -2713,7 +3229,7 @@
     return ABSENT;
   }
   // Named property.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookup(name, &result);
   return GetPropertyAttribute(this, &result, name, false);
 }
@@ -2728,7 +3244,9 @@
   if (result->IsMap() &&
       Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
 #ifdef DEBUG
-    Map::cast(result)->SharedMapVerify();
+    if (FLAG_verify_heap) {
+      Map::cast(result)->SharedMapVerify();
+    }
     if (FLAG_enable_slow_asserts) {
       // The cached map should match newly created normalized map bit-by-bit.
       Object* fresh;
@@ -2764,6 +3282,15 @@
 }
 
 
+void JSObject::UpdateMapCodeCache(Handle<JSObject> object,
+                                  Handle<String> name,
+                                  Handle<Code> code) {
+  Isolate* isolate = object->GetIsolate();
+  CALL_HEAP_FUNCTION_VOID(isolate,
+                          object->UpdateMapCodeCache(*name, *code));
+}
+
+
 MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
   if (map()->is_shared()) {
     // Fast case maps are never marked as shared.
@@ -2782,6 +3309,15 @@
 }
 
 
+void JSObject::NormalizeProperties(Handle<JSObject> object,
+                                   PropertyNormalizationMode mode,
+                                   int expected_additional_properties) {
+  CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+                          object->NormalizeProperties(
+                              mode, expected_additional_properties));
+}
+
+
 MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
                                            int expected_additional_properties) {
   if (!HasFastProperties()) return this;
@@ -2800,12 +3336,10 @@
   } else {
     property_count += 2;  // Make space for two more properties.
   }
-  Object* obj;
-  { MaybeObject* maybe_obj =
-        StringDictionary::Allocate(property_count);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  StringDictionary* dictionary;
+  { MaybeObject* maybe_dictionary = StringDictionary::Allocate(property_count);
+    if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
   }
-  StringDictionary* dictionary = StringDictionary::cast(obj);
 
   DescriptorArray* descs = map_of_this->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
@@ -2815,36 +3349,31 @@
         PropertyDetails d =
             PropertyDetails(details.attributes(), NORMAL, details.index());
         Object* value = descs->GetConstantFunction(i);
-        Object* result;
-        { MaybeObject* maybe_result =
-              dictionary->Add(descs->GetKey(i), value, d);
-          if (!maybe_result->ToObject(&result)) return maybe_result;
-        }
-        dictionary = StringDictionary::cast(result);
+        MaybeObject* maybe_dictionary =
+            dictionary->Add(descs->GetKey(i), value, d);
+        if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
         break;
       }
       case FIELD: {
         PropertyDetails d =
             PropertyDetails(details.attributes(), NORMAL, details.index());
         Object* value = FastPropertyAt(descs->GetFieldIndex(i));
-        Object* result;
-        { MaybeObject* maybe_result =
-              dictionary->Add(descs->GetKey(i), value, d);
-          if (!maybe_result->ToObject(&result)) return maybe_result;
-        }
-        dictionary = StringDictionary::cast(result);
+        MaybeObject* maybe_dictionary =
+            dictionary->Add(descs->GetKey(i), value, d);
+        if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
         break;
       }
       case CALLBACKS: {
-        PropertyDetails d =
-            PropertyDetails(details.attributes(), CALLBACKS, details.index());
+        if (!descs->IsProperty(i)) break;
         Object* value = descs->GetCallbacksObject(i);
-        Object* result;
-        { MaybeObject* maybe_result =
-              dictionary->Add(descs->GetKey(i), value, d);
-          if (!maybe_result->ToObject(&result)) return maybe_result;
+        if (value->IsAccessorPair()) {
+          MaybeObject* maybe_copy =
+              AccessorPair::cast(value)->CopyWithoutTransitions();
+          if (!maybe_copy->To(&value)) return maybe_copy;
         }
-        dictionary = StringDictionary::cast(result);
+        MaybeObject* maybe_dictionary =
+            dictionary->Add(descs->GetKey(i), value, details);
+        if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
         break;
       }
       case MAP_TRANSITION:
@@ -2853,23 +3382,25 @@
       case INTERCEPTOR:
       case ELEMENTS_TRANSITION:
         break;
-      default:
+      case HANDLER:
+      case NORMAL:
         UNREACHABLE();
+        break;
     }
   }
 
-  Heap* current_heap = map_of_this->heap();
+  Heap* current_heap = GetHeap();
 
   // Copy the next enumeration index from instance descriptor.
   int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
   dictionary->SetNextEnumerationIndex(index);
 
-  { MaybeObject* maybe_obj =
+  Map* new_map;
+  { MaybeObject* maybe_map =
         current_heap->isolate()->context()->global_context()->
         normalized_map_cache()->Get(this, mode);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+    if (!maybe_map->To(&new_map)) return maybe_map;
   }
-  Map* new_map = Map::cast(obj);
 
   // We have now successfully allocated all the necessary objects.
   // Changes can now be made with the guarantee that all of them take effect.
@@ -2880,6 +3411,11 @@
   ASSERT(instance_size_delta >= 0);
   current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
                                      instance_size_delta);
+  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+    MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+                                               -instance_size_delta);
+  }
+
 
   set_map(new_map);
   new_map->clear_instance_descriptors();
@@ -2898,6 +3434,14 @@
 }
 
 
+void JSObject::TransformToFastProperties(Handle<JSObject> object,
+                                         int unused_property_fields) {
+  CALL_HEAP_FUNCTION_VOID(
+      object->GetIsolate(),
+      object->TransformToFastProperties(unused_property_fields));
+}
+
+
 MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
   if (HasFastProperties()) return this;
   ASSERT(!IsGlobalObject());
@@ -2906,6 +3450,14 @@
 }
 
 
+Handle<SeededNumberDictionary> JSObject::NormalizeElements(
+    Handle<JSObject> object) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->NormalizeElements(),
+                     SeededNumberDictionary);
+}
+
+
 MaybeObject* JSObject::NormalizeElements() {
   ASSERT(!HasExternalArrayElements());
 
@@ -2913,13 +3465,14 @@
   FixedArrayBase* array = FixedArrayBase::cast(elements());
   Map* old_map = array->map();
   bool is_arguments =
-      (old_map == old_map->heap()->non_strict_arguments_elements_map());
+      (old_map == old_map->GetHeap()->non_strict_arguments_elements_map());
   if (is_arguments) {
     array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
   }
   if (array->IsDictionary()) return array;
 
   ASSERT(HasFastElements() ||
+         HasFastSmiOnlyElements() ||
          HasFastDoubleElements() ||
          HasFastArgumentsElements());
   // Compute the effective length and allocate a new backing store.
@@ -2954,7 +3507,8 @@
         if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
       }
     } else {
-      ASSERT(old_map->has_fast_elements());
+      ASSERT(old_map->has_fast_elements() ||
+             old_map->has_fast_smi_only_elements());
       value = FixedArray::cast(array)->get(i);
     }
     PropertyDetails details = PropertyDetails(NONE, NORMAL);
@@ -2974,13 +3528,15 @@
     // Set the new map first to satify the elements type assert in
     // set_elements().
     Object* new_map;
-    MaybeObject* maybe = map()->GetSlowElementsMap();
+    MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(),
+                                                  DICTIONARY_ELEMENTS);
     if (!maybe->ToObject(&new_map)) return maybe;
     set_map(Map::cast(new_map));
     set_elements(dictionary);
   }
 
-  old_map->isolate()->counters()->elements_to_dictionary()->Increment();
+  old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
+      Increment();
 
 #ifdef DEBUG
   if (FLAG_trace_normalization) {
@@ -2994,102 +3550,252 @@
 }
 
 
-MaybeObject* JSObject::GetHiddenProperties(HiddenPropertiesFlag flag) {
+Smi* JSReceiver::GenerateIdentityHash() {
   Isolate* isolate = GetIsolate();
-  Heap* heap = isolate->heap();
-  Object* holder = BypassGlobalProxy();
-  if (holder->IsUndefined()) return heap->undefined_value();
-  JSObject* obj = JSObject::cast(holder);
-  if (obj->HasFastProperties()) {
-    // If the object has fast properties, check whether the first slot
-    // in the descriptor array matches the hidden symbol. Since the
-    // hidden symbols hash code is zero (and no other string has hash
-    // code zero) it will always occupy the first entry if present.
-    DescriptorArray* descriptors = obj->map()->instance_descriptors();
-    if ((descriptors->number_of_descriptors() > 0) &&
-        (descriptors->GetKey(0) == heap->hidden_symbol()) &&
-        descriptors->IsProperty(0)) {
-      ASSERT(descriptors->GetType(0) == FIELD);
-      return obj->FastPropertyAt(descriptors->GetFieldIndex(0));
-    }
-  }
-
-  // Only attempt to find the hidden properties in the local object and not
-  // in the prototype chain.
-  if (!obj->HasHiddenPropertiesObject()) {
-    // Hidden properties object not found. Allocate a new hidden properties
-    // object if requested. Otherwise return the undefined value.
-    if (flag == ALLOW_CREATION) {
-      Object* hidden_obj;
-      { MaybeObject* maybe_obj = heap->AllocateJSObject(
-            isolate->context()->global_context()->object_function());
-        if (!maybe_obj->ToObject(&hidden_obj)) return maybe_obj;
-      }
-      // Don't allow leakage of the hidden object through accessors
-      // on Object.prototype.
-      {
-        MaybeObject* maybe_obj =
-            JSObject::cast(hidden_obj)->SetPrototype(heap->null_value(), false);
-        if (maybe_obj->IsFailure()) return maybe_obj;
-      }
-      return obj->SetHiddenPropertiesObject(hidden_obj);
-    } else {
-      return heap->undefined_value();
-    }
-  }
-  return obj->GetHiddenPropertiesObject();
-}
-
-
-MaybeObject* JSObject::GetIdentityHash(HiddenPropertiesFlag flag) {
-  Isolate* isolate = GetIsolate();
-  Object* hidden_props_obj;
-  { MaybeObject* maybe_obj = GetHiddenProperties(flag);
-    if (!maybe_obj->ToObject(&hidden_props_obj)) return maybe_obj;
-  }
-  if (!hidden_props_obj->IsJSObject()) {
-    // We failed to create hidden properties.  That's a detached
-    // global proxy.
-    ASSERT(hidden_props_obj->IsUndefined());
-    return Smi::FromInt(0);
-  }
-  JSObject* hidden_props = JSObject::cast(hidden_props_obj);
-  String* hash_symbol = isolate->heap()->identity_hash_symbol();
-  {
-    // Note that HasLocalProperty() can cause a GC in the general case in the
-    // presence of interceptors.
-    AssertNoAllocation no_alloc;
-    if (hidden_props->HasLocalProperty(hash_symbol)) {
-      MaybeObject* hash = hidden_props->GetProperty(hash_symbol);
-      return Smi::cast(hash->ToObjectChecked());
-    }
-  }
 
   int hash_value;
   int attempts = 0;
   do {
     // Generate a random 32-bit hash value but limit range to fit
     // within a smi.
-    hash_value = V8::Random(isolate) & Smi::kMaxValue;
+    hash_value = V8::RandomPrivate(isolate) & Smi::kMaxValue;
     attempts++;
   } while (hash_value == 0 && attempts < 30);
   hash_value = hash_value != 0 ? hash_value : 1;  // never return 0
 
-  Smi* hash = Smi::FromInt(hash_value);
-  { MaybeObject* result = hidden_props->SetLocalPropertyIgnoreAttributes(
-        hash_symbol,
-        hash,
-        static_cast<PropertyAttributes>(None));
-    if (result->IsFailure()) return result;
+  return Smi::FromInt(hash_value);
+}
+
+
+MaybeObject* JSObject::SetIdentityHash(Object* hash, CreationFlag flag) {
+  MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+                                         hash);
+  if (maybe->IsFailure()) return maybe;
+  return this;
+}
+
+
+int JSObject::GetIdentityHash(Handle<JSObject> obj) {
+  CALL_AND_RETRY(obj->GetIsolate(),
+                 obj->GetIdentityHash(ALLOW_CREATION),
+                 return Smi::cast(__object__)->value(),
+                 return 0);
+}
+
+
+MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
+  Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol());
+  if (stored_value->IsSmi()) return stored_value;
+
+  // Do not generate permanent identity hash code if not requested.
+  if (flag == OMIT_CREATION) return GetHeap()->undefined_value();
+
+  Smi* hash = GenerateIdentityHash();
+  MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+                                          hash);
+  if (result->IsFailure()) return result;
+  if (result->ToObjectUnchecked()->IsUndefined()) {
+    // Trying to get hash of detached proxy.
+    return Smi::FromInt(0);
   }
   return hash;
 }
 
 
+MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
+  Object* hash = this->hash();
+  if (!hash->IsSmi() && flag == ALLOW_CREATION) {
+    hash = GenerateIdentityHash();
+    set_hash(hash);
+  }
+  return hash;
+}
+
+
+Object* JSObject::GetHiddenProperty(String* key) {
+  if (IsJSGlobalProxy()) {
+    // For a proxy, use the prototype as target object.
+    Object* proxy_parent = GetPrototype();
+    // If the proxy is detached, return undefined.
+    if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+    ASSERT(proxy_parent->IsJSGlobalObject());
+    return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
+  }
+  ASSERT(!IsJSGlobalProxy());
+  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
+  ASSERT(!hidden_lookup->IsFailure());  // No failure when passing false as arg.
+  if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) {
+    return GetHeap()->undefined_value();
+  }
+  StringDictionary* dictionary =
+      StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
+  int entry = dictionary->FindEntry(key);
+  if (entry == StringDictionary::kNotFound) return GetHeap()->undefined_value();
+  return dictionary->ValueAt(entry);
+}
+
+
+Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
+                                 Handle<String> key,
+                                 Handle<Object> value) {
+  CALL_HEAP_FUNCTION(obj->GetIsolate(),
+                     obj->SetHiddenProperty(*key, *value),
+                     Object);
+}
+
+
+MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
+  if (IsJSGlobalProxy()) {
+    // For a proxy, use the prototype as target object.
+    Object* proxy_parent = GetPrototype();
+    // If the proxy is detached, return undefined.
+    if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+    ASSERT(proxy_parent->IsJSGlobalObject());
+    return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
+  }
+  ASSERT(!IsJSGlobalProxy());
+  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(true);
+  StringDictionary* dictionary;
+  if (!hidden_lookup->To<StringDictionary>(&dictionary)) return hidden_lookup;
+
+  // If it was found, check if the key is already in the dictionary.
+  int entry = dictionary->FindEntry(key);
+  if (entry != StringDictionary::kNotFound) {
+    // If key was found, just update the value.
+    dictionary->ValueAtPut(entry, value);
+    return this;
+  }
+  // Key was not already in the dictionary, so add the entry.
+  MaybeObject* insert_result = dictionary->Add(key,
+                                               value,
+                                               PropertyDetails(NONE, NORMAL));
+  StringDictionary* new_dict;
+  if (!insert_result->To<StringDictionary>(&new_dict)) return insert_result;
+  if (new_dict != dictionary) {
+    // If adding the key expanded the dictionary (i.e., Add returned a new
+    // dictionary), store it back to the object.
+    MaybeObject* store_result = SetHiddenPropertiesDictionary(new_dict);
+    if (store_result->IsFailure()) return store_result;
+  }
+  // Return this to mark success.
+  return this;
+}
+
+
+void JSObject::DeleteHiddenProperty(String* key) {
+  if (IsJSGlobalProxy()) {
+    // For a proxy, use the prototype as target object.
+    Object* proxy_parent = GetPrototype();
+    // If the proxy is detached, return immediately.
+    if (proxy_parent->IsNull()) return;
+    ASSERT(proxy_parent->IsJSGlobalObject());
+    JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
+    return;
+  }
+  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
+  ASSERT(!hidden_lookup->IsFailure());  // No failure when passing false as arg.
+  if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) return;
+  StringDictionary* dictionary =
+      StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
+  int entry = dictionary->FindEntry(key);
+  if (entry == StringDictionary::kNotFound) {
+    // Key wasn't in dictionary. Deletion is a success.
+    return;
+  }
+  // Key was in the dictionary. Remove it.
+  dictionary->DeleteProperty(entry, JSReceiver::FORCE_DELETION);
+}
+
+
+bool JSObject::HasHiddenProperties() {
+  return GetPropertyAttributePostInterceptor(this,
+                                             GetHeap()->hidden_symbol(),
+                                             false) != ABSENT;
+}
+
+
+MaybeObject* JSObject::GetHiddenPropertiesDictionary(bool create_if_absent) {
+  ASSERT(!IsJSGlobalProxy());
+  if (HasFastProperties()) {
+    // If the object has fast properties, check whether the first slot
+    // in the descriptor array matches the hidden symbol. Since the
+    // hidden symbols hash code is zero (and no other string has hash
+    // code zero) it will always occupy the first entry if present.
+    DescriptorArray* descriptors = this->map()->instance_descriptors();
+    if ((descriptors->number_of_descriptors() > 0) &&
+        (descriptors->GetKey(0) == GetHeap()->hidden_symbol())) {
+      if (descriptors->GetType(0) == FIELD) {
+        Object* hidden_store =
+            this->FastPropertyAt(descriptors->GetFieldIndex(0));
+        return StringDictionary::cast(hidden_store);
+      } else {
+        ASSERT(descriptors->GetType(0) == NULL_DESCRIPTOR ||
+               descriptors->GetType(0) == MAP_TRANSITION);
+      }
+    }
+  } else {
+    PropertyAttributes attributes;
+    // You can't install a getter on a property indexed by the hidden symbol,
+    // so we can be sure that GetLocalPropertyPostInterceptor returns a real
+    // object.
+    Object* lookup =
+        GetLocalPropertyPostInterceptor(this,
+                                        GetHeap()->hidden_symbol(),
+                                        &attributes)->ToObjectUnchecked();
+    if (!lookup->IsUndefined()) {
+      return StringDictionary::cast(lookup);
+    }
+  }
+  if (!create_if_absent) return GetHeap()->undefined_value();
+  const int kInitialSize = 5;
+  MaybeObject* dict_alloc = StringDictionary::Allocate(kInitialSize);
+  StringDictionary* dictionary;
+  if (!dict_alloc->To<StringDictionary>(&dictionary)) return dict_alloc;
+  MaybeObject* store_result =
+      SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+                                 dictionary,
+                                 DONT_ENUM,
+                                 kNonStrictMode);
+  if (store_result->IsFailure()) return store_result;
+  return dictionary;
+}
+
+
+MaybeObject* JSObject::SetHiddenPropertiesDictionary(
+    StringDictionary* dictionary) {
+  ASSERT(!IsJSGlobalProxy());
+  ASSERT(HasHiddenProperties());
+  if (HasFastProperties()) {
+    // If the object has fast properties, check whether the first slot
+    // in the descriptor array matches the hidden symbol. Since the
+    // hidden symbols hash code is zero (and no other string has hash
+    // code zero) it will always occupy the first entry if present.
+    DescriptorArray* descriptors = this->map()->instance_descriptors();
+    if ((descriptors->number_of_descriptors() > 0) &&
+        (descriptors->GetKey(0) == GetHeap()->hidden_symbol())) {
+      if (descriptors->GetType(0) == FIELD) {
+        this->FastPropertyAtPut(descriptors->GetFieldIndex(0), dictionary);
+        return this;
+      } else {
+        ASSERT(descriptors->GetType(0) == NULL_DESCRIPTOR ||
+               descriptors->GetType(0) == MAP_TRANSITION);
+      }
+    }
+  }
+  MaybeObject* store_result =
+      SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+                                 dictionary,
+                                 DONT_ENUM,
+                                 kNonStrictMode);
+  if (store_result->IsFailure()) return store_result;
+  return this;
+}
+
+
 MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
                                                      DeleteMode mode) {
   // Check local property, ignore interceptor.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookupRealNamedProperty(name, &result);
   if (!result.IsProperty()) return GetHeap()->true_value();
 
@@ -3171,6 +3877,14 @@
 }
 
 
+Handle<Object> JSObject::DeleteElement(Handle<JSObject> obj,
+                                       uint32_t index) {
+  CALL_HEAP_FUNCTION(obj->GetIsolate(),
+                     obj->DeleteElement(index, JSObject::NORMAL_DELETION),
+                     Object);
+}
+
+
 MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
   Isolate* isolate = GetIsolate();
   // Check access rights if needed.
@@ -3199,12 +3913,11 @@
 }
 
 
-MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
-  if (IsJSProxy()) {
-    return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
-  } else {
-    return JSObject::cast(this)->DeleteProperty(name, mode);
-  }
+Handle<Object> JSObject::DeleteProperty(Handle<JSObject> obj,
+                              Handle<String> prop) {
+  CALL_HEAP_FUNCTION(obj->GetIsolate(),
+                     obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
+                     Object);
 }
 
 
@@ -3231,7 +3944,7 @@
   if (name->AsArrayIndex(&index)) {
     return DeleteElement(index, mode);
   } else {
-    LookupResult result;
+    LookupResult result(isolate);
     LocalLookup(name, &result);
     if (!result.IsProperty()) return isolate->heap()->true_value();
     // Ignore attributes if forcing a deletion.
@@ -3265,10 +3978,27 @@
 }
 
 
+MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
+  }
+  return JSObject::cast(this)->DeleteElement(index, mode);
+}
+
+
+MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
+  }
+  return JSObject::cast(this)->DeleteProperty(name, mode);
+}
+
+
 bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
                                             ElementsKind kind,
                                             Object* object) {
-  ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+  ASSERT(kind == FAST_ELEMENTS ||
+         kind == DICTIONARY_ELEMENTS);
   if (kind == FAST_ELEMENTS) {
     int length = IsJSArray()
         ? Smi::cast(JSArray::cast(this)->length())->value()
@@ -3289,7 +4019,7 @@
 // Check whether this object references another object.
 bool JSObject::ReferencesObject(Object* obj) {
   Map* map_of_this = map();
-  Heap* heap = map_of_this->heap();
+  Heap* heap = GetHeap();
   AssertNoAllocation no_alloc;
 
   // Is the object the constructor for this object?
@@ -3324,6 +4054,8 @@
       // Raw pixels and external arrays do not reference other
       // objects.
       break;
+    case FAST_SMI_ONLY_ELEMENTS:
+      break;
     case FAST_ELEMENTS:
     case DICTIONARY_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(this->elements());
@@ -3389,6 +4121,11 @@
 }
 
 
+Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
+}
+
+
 MaybeObject* JSObject::PreventExtensions() {
   Isolate* isolate = GetIsolate();
   if (IsAccessCheckNeeded() &&
@@ -3440,15 +4177,16 @@
 
 
 // Tests for the fast common case for property enumeration:
-// - This object and all prototypes has an enum cache (which means that it has
-//   no interceptors and needs no access checks).
+// - This object and all prototypes has an enum cache (which means that
+//   it is no proxy, has no interceptors and needs no access checks).
 // - This object has no elements.
 // - No prototype has enumerable properties/elements.
-bool JSObject::IsSimpleEnum() {
+bool JSReceiver::IsSimpleEnum() {
   Heap* heap = GetHeap();
   for (Object* o = this;
        o != heap->null_value();
        o = JSObject::cast(o)->GetPrototype()) {
+    if (!o->IsJSObject()) return false;
     JSObject* curr = JSObject::cast(o);
     if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
     ASSERT(!curr->HasNamedInterceptor());
@@ -3465,11 +4203,14 @@
 }
 
 
-int Map::NumberOfDescribedProperties() {
+int Map::NumberOfDescribedProperties(PropertyAttributes filter) {
   int result = 0;
   DescriptorArray* descs = instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    if (descs->IsProperty(i)) result++;
+    PropertyDetails details(descs->GetDetails(i));
+    if (descs->IsProperty(i) && (details.attributes() & filter) == 0) {
+      result++;
+    }
   }
   return result;
 }
@@ -3511,15 +4252,6 @@
 
 
 void JSReceiver::LocalLookup(String* name, LookupResult* result) {
-  if (IsJSProxy()) {
-    result->HandlerResult();
-  } else {
-    JSObject::cast(this)->LocalLookup(name, result);
-  }
-}
-
-
-void JSObject::LocalLookup(String* name, LookupResult* result) {
   ASSERT(name->IsString());
 
   Heap* heap = GetHeap();
@@ -3528,28 +4260,36 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return result->NotFound();
     ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->LocalLookup(name, result);
+    return JSReceiver::cast(proto)->LocalLookup(name, result);
+  }
+
+  if (IsJSProxy()) {
+    result->HandlerResult(JSProxy::cast(this));
+    return;
   }
 
   // Do not use inline caching if the object is a non-global object
   // that requires access checks.
-  if (!IsJSGlobalProxy() && IsAccessCheckNeeded()) {
+  if (IsAccessCheckNeeded()) {
     result->DisallowCaching();
   }
 
+  JSObject* js_object = JSObject::cast(this);
+
   // Check __proto__ before interceptor.
   if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
-    result->ConstantResult(this);
+    result->ConstantResult(js_object);
     return;
   }
 
   // Check for lookup interceptor except when bootstrapping.
-  if (HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) {
-    result->InterceptorResult(this);
+  if (js_object->HasNamedInterceptor() &&
+      !heap->isolate()->bootstrapper()->IsActive()) {
+    result->InterceptorResult(js_object);
     return;
   }
 
-  LocalLookupRealNamedProperty(name, result);
+  js_object->LocalLookupRealNamedProperty(name, result);
 }
 
 
@@ -3559,7 +4299,7 @@
   for (Object* current = this;
        current != heap->null_value();
        current = JSObject::cast(current)->GetPrototype()) {
-    JSObject::cast(current)->LocalLookup(name, result);
+    JSReceiver::cast(current)->LocalLookup(name, result);
     if (result->IsProperty()) return;
   }
   result->NotFound();
@@ -3570,128 +4310,144 @@
 void JSObject::LookupCallback(String* name, LookupResult* result) {
   Heap* heap = GetHeap();
   for (Object* current = this;
-       current != heap->null_value();
+       current != heap->null_value() && current->IsJSObject();
        current = JSObject::cast(current)->GetPrototype()) {
     JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
-    if (result->IsProperty() && result->type() == CALLBACKS) return;
+    if (result->IsFound() && result->type() == CALLBACKS) return;
   }
   result->NotFound();
 }
 
 
-// Search for a getter or setter in an elements dictionary.  Returns either
-// undefined if the element is read-only, or the getter/setter pair (fixed
-// array) if there is an existing one, or the hole value if the element does
-// not exist or is a normal non-getter/setter data element.
-static Object* FindGetterSetterInDictionary(SeededNumberDictionary* dictionary,
-                                            uint32_t index,
-                                            Heap* heap) {
+// Try to update an accessor in an elements dictionary. Return true if the
+// update succeeded, and false otherwise.
+static bool UpdateGetterSetterInDictionary(
+    SeededNumberDictionary* dictionary,
+    uint32_t index,
+    Object* getter,
+    Object* setter,
+    PropertyAttributes attributes) {
   int entry = dictionary->FindEntry(index);
   if (entry != SeededNumberDictionary::kNotFound) {
     Object* result = dictionary->ValueAt(entry);
     PropertyDetails details = dictionary->DetailsAt(entry);
-    if (details.IsReadOnly()) return heap->undefined_value();
-    if (details.type() == CALLBACKS && result->IsFixedArray()) return result;
+    if (details.type() == CALLBACKS && result->IsAccessorPair()) {
+      ASSERT(!details.IsDontDelete());
+      if (details.attributes() != attributes) {
+        dictionary->DetailsAtPut(entry,
+                                 PropertyDetails(attributes, CALLBACKS, index));
+      }
+      AccessorPair::cast(result)->SetComponents(getter, setter);
+      return true;
+    }
   }
-  return heap->the_hole_value();
+  return false;
 }
 
 
-MaybeObject* JSObject::DefineGetterSetter(String* name,
-                                          PropertyAttributes attributes) {
-  Heap* heap = GetHeap();
-  // Make sure that the top context does not change when doing callbacks or
-  // interceptor calls.
-  AssertNoContextChange ncc;
-
-  // Try to flatten before operating on the string.
-  name->TryFlatten();
-
-  if (!CanSetCallback(name)) {
-    return heap->undefined_value();
-  }
-
-  uint32_t index = 0;
-  bool is_element = name->AsArrayIndex(&index);
-
-  if (is_element) {
-    switch (GetElementsKind()) {
-      case FAST_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-        break;
-      case EXTERNAL_PIXEL_ELEMENTS:
-      case EXTERNAL_BYTE_ELEMENTS:
-      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      case EXTERNAL_SHORT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      case EXTERNAL_INT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      case EXTERNAL_FLOAT_ELEMENTS:
-      case EXTERNAL_DOUBLE_ELEMENTS:
-        // Ignore getters and setters on pixel and external array
-        // elements.
-        return heap->undefined_value();
-      case DICTIONARY_ELEMENTS: {
-        Object* probe =
-            FindGetterSetterInDictionary(element_dictionary(), index, heap);
-        if (!probe->IsTheHole()) return probe;
-        // Otherwise allow to override it.
-        break;
+MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
+                                             Object* getter,
+                                             Object* setter,
+                                             PropertyAttributes attributes) {
+  switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS:
+      break;
+    case EXTERNAL_PIXEL_ELEMENTS:
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      // Ignore getters and setters on pixel and external array elements.
+      return GetHeap()->undefined_value();
+    case DICTIONARY_ELEMENTS:
+      if (UpdateGetterSetterInDictionary(element_dictionary(),
+                                         index,
+                                         getter,
+                                         setter,
+                                         attributes)) {
+        return GetHeap()->undefined_value();
       }
-      case NON_STRICT_ARGUMENTS_ELEMENTS: {
-        // Ascertain whether we have read-only properties or an existing
-        // getter/setter pair in an arguments elements dictionary backing
-        // store.
-        FixedArray* parameter_map = FixedArray::cast(elements());
-        uint32_t length = parameter_map->length();
-        Object* probe =
-            index < (length - 2) ? parameter_map->get(index + 2) : NULL;
-        if (probe == NULL || probe->IsTheHole()) {
-          FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-          if (arguments->IsDictionary()) {
-            SeededNumberDictionary* dictionary =
-                SeededNumberDictionary::cast(arguments);
-            probe = FindGetterSetterInDictionary(dictionary, index, heap);
-            if (!probe->IsTheHole()) return probe;
+      break;
+    case NON_STRICT_ARGUMENTS_ELEMENTS: {
+      // Ascertain whether we have read-only properties or an existing
+      // getter/setter pair in an arguments elements dictionary backing
+      // store.
+      FixedArray* parameter_map = FixedArray::cast(elements());
+      uint32_t length = parameter_map->length();
+      Object* probe =
+          index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+      if (probe == NULL || probe->IsTheHole()) {
+        FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+        if (arguments->IsDictionary()) {
+          SeededNumberDictionary* dictionary =
+              SeededNumberDictionary::cast(arguments);
+          if (UpdateGetterSetterInDictionary(dictionary,
+                                             index,
+                                             getter,
+                                             setter,
+                                             attributes)) {
+            return GetHeap()->undefined_value();
           }
         }
-        break;
       }
+      break;
     }
-  } else {
-    // Lookup the name.
-    LookupResult result;
-    LocalLookup(name, &result);
-    if (result.IsProperty()) {
-      if (result.IsReadOnly()) return heap->undefined_value();
-      if (result.type() == CALLBACKS) {
-        Object* obj = result.GetCallbackObject();
-        // Need to preserve old getters/setters.
-        if (obj->IsFixedArray()) {
-          // Use set to update attributes.
-          return SetPropertyCallback(name, obj, attributes);
+  }
+
+  AccessorPair* accessors;
+  { MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
+    if (!maybe_accessors->To(&accessors)) return maybe_accessors;
+  }
+  accessors->SetComponents(getter, setter);
+
+  return SetElementCallback(index, accessors, attributes);
+}
+
+
+MaybeObject* JSObject::DefinePropertyAccessor(String* name,
+                                              Object* getter,
+                                              Object* setter,
+                                              PropertyAttributes attributes) {
+  // Lookup the name.
+  LookupResult result(GetHeap()->isolate());
+  LocalLookupRealNamedProperty(name, &result);
+  if (result.IsFound()) {
+    if (result.type() == CALLBACKS) {
+      ASSERT(!result.IsDontDelete());
+      Object* obj = result.GetCallbackObject();
+      // Need to preserve old getters/setters.
+      if (obj->IsAccessorPair()) {
+        AccessorPair* copy;
+        { MaybeObject* maybe_copy =
+              AccessorPair::cast(obj)->CopyWithoutTransitions();
+          if (!maybe_copy->To(&copy)) return maybe_copy;
         }
+        copy->SetComponents(getter, setter);
+        // Use set to update attributes.
+        return SetPropertyCallback(name, copy, attributes);
       }
     }
   }
 
-  // Allocate the fixed array to hold getter and setter.
-  Object* structure;
-  { MaybeObject* maybe_structure = heap->AllocateFixedArray(2, TENURED);
-    if (!maybe_structure->ToObject(&structure)) return maybe_structure;
+  AccessorPair* accessors;
+  { MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
+    if (!maybe_accessors->To(&accessors)) return maybe_accessors;
   }
+  accessors->SetComponents(getter, setter);
 
-  if (is_element) {
-    return SetElementCallback(index, structure, attributes);
-  } else {
-    return SetPropertyCallback(name, structure, attributes);
-  }
+  return SetPropertyCallback(name, accessors, attributes);
 }
 
 
 bool JSObject::CanSetCallback(String* name) {
-  ASSERT(!IsAccessCheckNeeded()
-         || Isolate::Current()->MayNamedAccess(this, name, v8::ACCESS_SET));
+  ASSERT(!IsAccessCheckNeeded() ||
+         GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
 
   // Check if there is an API defined callback object which prohibits
   // callback overwriting in this object or it's prototype chain.
@@ -3699,7 +4455,7 @@
   // certain accessors such as window.location should not be allowed
   // to be overwritten because allowing overwriting could potentially
   // cause security problems.
-  LookupResult callback_result;
+  LookupResult callback_result(GetIsolate());
   LookupCallback(name, &callback_result);
   if (callback_result.IsProperty()) {
     Object* obj = callback_result.GetCallbackObject();
@@ -3719,19 +4475,15 @@
   PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
 
   // Normalize elements to make this operation simple.
-  SeededNumberDictionary* dictionary = NULL;
-  { Object* result;
-    MaybeObject* maybe = NormalizeElements();
-    if (!maybe->ToObject(&result)) return maybe;
-    dictionary = SeededNumberDictionary::cast(result);
+  SeededNumberDictionary* dictionary;
+  { MaybeObject* maybe_dictionary = NormalizeElements();
+    if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
   }
   ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
 
   // Update the dictionary with the new CALLBACKS property.
-  { Object* result;
-    MaybeObject* maybe = dictionary->Set(index, structure, details);
-    if (!maybe->ToObject(&result)) return maybe;
-    dictionary = SeededNumberDictionary::cast(result);
+  { MaybeObject* maybe_dictionary = dictionary->Set(index, structure, details);
+    if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
   }
 
   dictionary->set_requires_slow_elements();
@@ -3743,8 +4495,7 @@
     // switch to a direct backing store without the parameter map.  This
     // would allow GC of the context.
     FixedArray* parameter_map = FixedArray::cast(elements());
-    uint32_t length = parameter_map->length();
-    if (index < length - 2) {
+    if (index < static_cast<uint32_t>(parameter_map->length()) - 2) {
       parameter_map->set(index + 2, GetHeap()->the_hole_value());
     }
     parameter_map->set(1, dictionary);
@@ -3752,33 +4503,26 @@
     set_elements(dictionary);
   }
 
-  return structure;
+  return GetHeap()->undefined_value();
 }
 
 
 MaybeObject* JSObject::SetPropertyCallback(String* name,
                                            Object* structure,
                                            PropertyAttributes attributes) {
-  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
-
-  bool convert_back_to_fast = HasFastProperties() &&
-      (map()->instance_descriptors()->number_of_descriptors()
-          < DescriptorArray::kMaxNumberOfDescriptors);
-
   // Normalize object to make this operation simple.
-  Object* ok;
   { MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
-    if (!maybe_ok->ToObject(&ok)) return maybe_ok;
+    if (maybe_ok->IsFailure()) return maybe_ok;
   }
 
   // For the global object allocate a new map to invalidate the global inline
   // caches which have a global property cell reference directly in the code.
   if (IsGlobalObject()) {
-    Object* new_map;
+    Map* new_map;
     { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
-      if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+      if (!maybe_new_map->To(&new_map)) return maybe_new_map;
     }
-    set_map(Map::cast(new_map));
+    set_map(new_map);
     // When running crankshaft, changing the map is not enough. We
     // need to deoptimize all functions that rely on this global
     // object.
@@ -3786,24 +4530,29 @@
   }
 
   // Update the dictionary with the new CALLBACKS property.
-  Object* result;
-  { MaybeObject* maybe_result = SetNormalizedProperty(name, structure, details);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
+  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+  { MaybeObject* maybe_ok = SetNormalizedProperty(name, structure, details);
+    if (maybe_ok->IsFailure()) return maybe_ok;
   }
 
-  if (convert_back_to_fast) {
-    { MaybeObject* maybe_ok = TransformToFastProperties(0);
-      if (!maybe_ok->ToObject(&ok)) return maybe_ok;
-    }
-  }
-  return result;
+  return GetHeap()->undefined_value();
+}
+
+
+void JSObject::DefineAccessor(Handle<JSObject> object,
+                              Handle<String> name,
+                              Handle<Object> getter,
+                              Handle<Object> setter,
+                              PropertyAttributes attributes) {
+  CALL_HEAP_FUNCTION_VOID(
+      object->GetIsolate(),
+      object->DefineAccessor(*name, *getter, *setter, attributes));
 }
 
 MaybeObject* JSObject::DefineAccessor(String* name,
-                                      bool is_getter,
-                                      Object* fun,
+                                      Object* getter,
+                                      Object* setter,
                                       PropertyAttributes attributes) {
-  ASSERT(fun->IsJSFunction() || fun->IsUndefined());
   Isolate* isolate = GetIsolate();
   // Check access rights if needed.
   if (IsAccessCheckNeeded() &&
@@ -3816,17 +4565,23 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return this;
     ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->DefineAccessor(name, is_getter,
-                                                 fun, attributes);
+    return JSObject::cast(proto)->DefineAccessor(
+        name, getter, setter, attributes);
   }
 
-  Object* array;
-  { MaybeObject* maybe_array = DefineGetterSetter(name, attributes);
-    if (!maybe_array->ToObject(&array)) return maybe_array;
-  }
-  if (array->IsUndefined()) return array;
-  FixedArray::cast(array)->set(is_getter ? 0 : 1, fun);
-  return this;
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc;
+
+  // Try to flatten before operating on the string.
+  name->TryFlatten();
+
+  if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
+
+  uint32_t index = 0;
+  return name->AsArrayIndex(&index) ?
+      DefineElementAccessor(index, getter, setter, attributes) :
+      DefinePropertyAccessor(name, getter, setter, attributes);
 }
 
 
@@ -3866,6 +4621,7 @@
 
     // Accessors overwrite previous callbacks (cf. with getters/setters).
     switch (GetElementsKind()) {
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
         break;
@@ -3888,24 +4644,22 @@
         break;
     }
 
-    Object* ok;
     { MaybeObject* maybe_ok =
           SetElementCallback(index, info, info->property_attributes());
-      if (!maybe_ok->ToObject(&ok)) return maybe_ok;
+      if (maybe_ok->IsFailure()) return maybe_ok;
     }
   } else {
     // Lookup the name.
-    LookupResult result;
+    LookupResult result(isolate);
     LocalLookup(name, &result);
     // ES5 forbids turning a property into an accessor if it's not
     // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
     if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
       return isolate->heap()->undefined_value();
     }
-    Object* ok;
     { MaybeObject* maybe_ok =
           SetPropertyCallback(name, info, info->property_attributes());
-      if (!maybe_ok->ToObject(&ok)) return maybe_ok;
+      if (maybe_ok->IsFailure()) return maybe_ok;
     }
   }
 
@@ -3913,7 +4667,7 @@
 }
 
 
-Object* JSObject::LookupAccessor(String* name, bool is_getter) {
+Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
   Heap* heap = GetHeap();
 
   // Make sure that the top context does not change when doing callbacks or
@@ -3928,7 +4682,6 @@
   }
 
   // Make the lookup and include prototypes.
-  int accessor_index = is_getter ? kGetterIndex : kSetterIndex;
   uint32_t index = 0;
   if (name->AsArrayIndex(&index)) {
     for (Object* obj = this;
@@ -3940,11 +4693,9 @@
         int entry = dictionary->FindEntry(index);
         if (entry != SeededNumberDictionary::kNotFound) {
           Object* element = dictionary->ValueAt(entry);
-          PropertyDetails details = dictionary->DetailsAt(entry);
-          if (details.type() == CALLBACKS) {
-            if (element->IsFixedArray()) {
-              return FixedArray::cast(element)->get(accessor_index);
-            }
+          if (dictionary->DetailsAt(entry).type() == CALLBACKS &&
+              element->IsAccessorPair()) {
+            return AccessorPair::cast(element)->GetComponent(component);
           }
         }
       }
@@ -3953,14 +4704,14 @@
     for (Object* obj = this;
          obj != heap->null_value();
          obj = JSObject::cast(obj)->GetPrototype()) {
-      LookupResult result;
+      LookupResult result(heap->isolate());
       JSObject::cast(obj)->LocalLookup(name, &result);
       if (result.IsProperty()) {
         if (result.IsReadOnly()) return heap->undefined_value();
         if (result.type() == CALLBACKS) {
           Object* obj = result.GetCallbackObject();
-          if (obj->IsFixedArray()) {
-            return FixedArray::cast(obj)->get(accessor_index);
+          if (obj->IsAccessorPair()) {
+            return AccessorPair::cast(obj)->GetComponent(component);
           }
         }
       }
@@ -4061,7 +4812,7 @@
   Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
 
 #ifdef DEBUG
-  if (Map::cast(result)->is_shared()) {
+  if (FLAG_verify_heap && Map::cast(result)->is_shared()) {
     Map::cast(result)->SharedMapVerify();
   }
 #endif
@@ -4084,12 +4835,19 @@
   return new_map;
 }
 
+void Map::UpdateCodeCache(Handle<Map> map,
+                          Handle<String> name,
+                          Handle<Code> code) {
+  Isolate* isolate = map->GetIsolate();
+  CALL_HEAP_FUNCTION_VOID(isolate,
+                          map->UpdateCodeCache(*name, *code));
+}
 
 MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
   // Allocate the code cache if not present.
   if (code_cache()->IsFixedArray()) {
     Object* result;
-    { MaybeObject* maybe_result = code->heap()->AllocateCodeCache();
+    { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     set_code_cache(result);
@@ -4127,75 +4885,219 @@
 }
 
 
-void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
-  // Traverse the transition tree without using a stack.  We do this by
-  // reversing the pointers in the maps and descriptor arrays.
-  Map* current = this;
-  Map* meta_map = heap()->meta_map();
-  Object** map_or_index_field = NULL;
-  while (current != meta_map) {
-    DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
-        *RawField(current, Map::kInstanceDescriptorsOrBitField3Offset));
-    if (!d->IsEmpty()) {
-      FixedArray* contents = reinterpret_cast<FixedArray*>(
-          d->get(DescriptorArray::kContentArrayIndex));
-      map_or_index_field = RawField(contents, HeapObject::kMapOffset);
-      Object* map_or_index = *map_or_index_field;
-      bool map_done = true;  // Controls a nested continue statement.
-      for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
-           i < contents->length();
-           i += 2) {
-        PropertyDetails details(Smi::cast(contents->get(i + 1)));
-        if (details.IsTransition()) {
-          // Found a map in the transition array.  We record our progress in
-          // the transition array by recording the current map in the map field
-          // of the next map and recording the index in the transition array in
-          // the map field of the array.
-          Map* next = Map::cast(contents->get(i));
-          next->set_map(current);
-          *map_or_index_field = Smi::FromInt(i + 2);
-          current = next;
-          map_done = false;
+// An iterator over all map transitions in an descriptor array, reusing the map
+// field of the contens array while it is running.
+class IntrusiveMapTransitionIterator {
+ public:
+  explicit IntrusiveMapTransitionIterator(DescriptorArray* descriptor_array)
+      : descriptor_array_(descriptor_array) { }
+
+  void Start() {
+    ASSERT(!IsIterating());
+    if (HasContentArray()) *ContentHeader() = Smi::FromInt(0);
+  }
+
+  bool IsIterating() {
+    return HasContentArray() && (*ContentHeader())->IsSmi();
+  }
+
+  Map* Next() {
+    ASSERT(IsIterating());
+    FixedArray* contents = ContentArray();
+    // Attention, tricky index manipulation ahead: Every entry in the contents
+    // array consists of a value/details pair, so the index is typically even.
+    // An exception is made for CALLBACKS entries: An even index means we look
+    // at its getter, and an odd index means we look at its setter.
+    int index = Smi::cast(*ContentHeader())->value();
+    while (index < contents->length()) {
+      PropertyDetails details(Smi::cast(contents->get(index | 1)));
+      switch (details.type()) {
+        case MAP_TRANSITION:
+        case CONSTANT_TRANSITION:
+        case ELEMENTS_TRANSITION:
+          // We definitely have a map transition.
+          *ContentHeader() = Smi::FromInt(index + 2);
+          return static_cast<Map*>(contents->get(index));
+        case CALLBACKS: {
+          // We might have a map transition in a getter or in a setter.
+          AccessorPair* accessors =
+              static_cast<AccessorPair*>(contents->get(index & ~1));
+          Object* accessor =
+              ((index & 1) == 0) ? accessors->getter() : accessors->setter();
+          index++;
+          if (accessor->IsMap()) {
+            *ContentHeader() = Smi::FromInt(index);
+            return static_cast<Map*>(accessor);
+          }
           break;
         }
-      }
-      if (!map_done) continue;
-    } else {
-      map_or_index_field = NULL;
-    }
-    // That was the regular transitions, now for the prototype transitions.
-    FixedArray* prototype_transitions =
-        current->unchecked_prototype_transitions();
-    Object** proto_map_or_index_field =
-        RawField(prototype_transitions, HeapObject::kMapOffset);
-    Object* map_or_index = *proto_map_or_index_field;
-    const int start = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
-    int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : start;
-    if (i < prototype_transitions->length()) {
-      // Found a map in the prototype transition array.  Record progress in
-      // an analogous way to the regular transitions array above.
-      Object* perhaps_map = prototype_transitions->get(i);
-      if (perhaps_map->IsMap()) {
-        Map* next = Map::cast(perhaps_map);
-        next->set_map(current);
-        *proto_map_or_index_field =
-            Smi::FromInt(i + kProtoTransitionElementsPerEntry);
-        current = next;
-        continue;
+        case NORMAL:
+        case FIELD:
+        case CONSTANT_FUNCTION:
+        case HANDLER:
+        case INTERCEPTOR:
+        case NULL_DESCRIPTOR:
+          // We definitely have no map transition.
+          index += 2;
+          break;
       }
     }
-    *proto_map_or_index_field = heap()->fixed_array_map();
-    if (map_or_index_field != NULL) {
-      *map_or_index_field = heap()->fixed_array_map();
-    }
+    *ContentHeader() = descriptor_array_->GetHeap()->fixed_array_map();
+    return NULL;
+  }
 
-    // The callback expects a map to have a real map as its map, so we save
-    // the map field, which is being used to track the traversal and put the
-    // correct map (the meta_map) in place while we do the callback.
-    Map* prev = current->map();
-    current->set_map(meta_map);
-    callback(current, data);
-    current = prev;
+ private:
+  bool HasContentArray() {
+    return descriptor_array_-> length() > DescriptorArray::kContentArrayIndex;
+  }
+
+  FixedArray* ContentArray() {
+    Object* array = descriptor_array_->get(DescriptorArray::kContentArrayIndex);
+    return static_cast<FixedArray*>(array);
+  }
+
+  Object** ContentHeader() {
+    return HeapObject::RawField(ContentArray(), DescriptorArray::kMapOffset);
+  }
+
+  DescriptorArray* descriptor_array_;
+};
+
+
+// An iterator over all prototype transitions, reusing the map field of the
+// underlying array while it is running.
+class IntrusivePrototypeTransitionIterator {
+ public:
+  explicit IntrusivePrototypeTransitionIterator(FixedArray* proto_trans)
+      : proto_trans_(proto_trans) { }
+
+  void Start() {
+    ASSERT(!IsIterating());
+    if (HasTransitions()) *Header() = Smi::FromInt(0);
+  }
+
+  bool IsIterating() {
+    return HasTransitions() && (*Header())->IsSmi();
+  }
+
+  Map* Next() {
+    ASSERT(IsIterating());
+    int transitionNumber = Smi::cast(*Header())->value();
+    if (transitionNumber < NumberOfTransitions()) {
+      *Header() = Smi::FromInt(transitionNumber + 1);
+      return GetTransition(transitionNumber);
+    }
+    *Header() = proto_trans_->GetHeap()->fixed_array_map();
+    return NULL;
+  }
+
+ private:
+  bool HasTransitions() {
+    return proto_trans_->length() >= Map::kProtoTransitionHeaderSize;
+  }
+
+  Object** Header() {
+    return HeapObject::RawField(proto_trans_, FixedArray::kMapOffset);
+  }
+
+  int NumberOfTransitions() {
+    Object* num = proto_trans_->get(Map::kProtoTransitionNumberOfEntriesOffset);
+    return Smi::cast(num)->value();
+  }
+
+  Map* GetTransition(int transitionNumber) {
+    return Map::cast(proto_trans_->get(IndexFor(transitionNumber)));
+  }
+
+  int IndexFor(int transitionNumber) {
+    return Map::kProtoTransitionHeaderSize +
+        Map::kProtoTransitionMapOffset +
+        transitionNumber * Map::kProtoTransitionElementsPerEntry;
+  }
+
+  FixedArray* proto_trans_;
+};
+
+
+// To traverse the transition tree iteratively, we have to store two kinds of
+// information in a map: The parent map in the traversal and which children of a
+// node have already been visited. To do this without additional memory, we
+// temporarily reuse two maps with known values:
+//
+//  (1) The map of the map temporarily holds the parent, and is restored to the
+//      meta map afterwards.
+//
+//  (2) The info which children have already been visited depends on which part
+//      of the map we currently iterate:
+//
+//    (a) If we currently follow normal map transitions, we temporarily store
+//        the current index in the map of the FixedArray of the desciptor
+//        array's contents, and restore it to the fixed array map afterwards.
+//        Note that a single descriptor can have 0, 1, or 2 transitions.
+//
+//    (b) If we currently follow prototype transitions, we temporarily store
+//        the current index in the map of the FixedArray holding the prototype
+//        transitions, and restore it to the fixed array map afterwards.
+//
+// Note that the child iterator is just a concatenation of two iterators: One
+// iterating over map transitions and one iterating over prototype transisitons.
+class TraversableMap : public Map {
+ public:
+  // Record the parent in the traversal within this map. Note that this destroys
+  // this map's map!
+  void SetParent(TraversableMap* parent) { set_map_no_write_barrier(parent); }
+
+  // Reset the current map's map, returning the parent previously stored in it.
+  TraversableMap* GetAndResetParent() {
+    TraversableMap* old_parent = static_cast<TraversableMap*>(map());
+    set_map_no_write_barrier(GetHeap()->meta_map());
+    return old_parent;
+  }
+
+  // Start iterating over this map's children, possibly destroying a FixedArray
+  // map (see explanation above).
+  void ChildIteratorStart() {
+    IntrusiveMapTransitionIterator(instance_descriptors()).Start();
+    IntrusivePrototypeTransitionIterator(
+        unchecked_prototype_transitions()).Start();
+  }
+
+  // If we have an unvisited child map, return that one and advance. If we have
+  // none, return NULL and reset any destroyed FixedArray maps.
+  TraversableMap* ChildIteratorNext() {
+    IntrusiveMapTransitionIterator descriptor_iterator(instance_descriptors());
+    if (descriptor_iterator.IsIterating()) {
+      Map* next = descriptor_iterator.Next();
+      if (next != NULL) return static_cast<TraversableMap*>(next);
+    }
+    IntrusivePrototypeTransitionIterator
+        proto_iterator(unchecked_prototype_transitions());
+    if (proto_iterator.IsIterating()) {
+      Map* next = proto_iterator.Next();
+      if (next != NULL) return static_cast<TraversableMap*>(next);
+    }
+    return NULL;
+  }
+};
+
+
+// Traverse the transition tree in postorder without using the C++ stack by
+// doing pointer reversal.
+void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
+  TraversableMap* current = static_cast<TraversableMap*>(this);
+  current->ChildIteratorStart();
+  while (true) {
+    TraversableMap* child = current->ChildIteratorNext();
+    if (child != NULL) {
+      child->ChildIteratorStart();
+      child->SetParent(current);
+      current = child;
+    } else {
+      TraversableMap* parent = current->GetAndResetParent();
+      callback(current, data);
+      if (current == this) break;
+      current = parent;
+    }
   }
 }
 
@@ -4409,7 +5311,7 @@
   MUST_USE_RESULT MaybeObject* AsObject() {
     ASSERT(code_ != NULL);
     Object* obj;
-    { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2);
+    { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* pair = FixedArray::cast(obj);
@@ -4467,13 +5369,22 @@
 void CodeCacheHashTable::RemoveByIndex(int index) {
   ASSERT(index >= 0);
   Heap* heap = GetHeap();
-  set(EntryToIndex(index), heap->null_value());
-  set(EntryToIndex(index) + 1, heap->null_value());
+  set(EntryToIndex(index), heap->the_hole_value());
+  set(EntryToIndex(index) + 1, heap->the_hole_value());
   ElementRemoved();
 }
 
 
-MaybeObject* PolymorphicCodeCache::Update(MapList* maps,
+void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> cache,
+                                  MapHandleList* maps,
+                                  Code::Flags flags,
+                                  Handle<Code> code) {
+  Isolate* isolate = cache->GetIsolate();
+  CALL_HEAP_FUNCTION_VOID(isolate, cache->Update(maps, flags, *code));
+}
+
+
+MaybeObject* PolymorphicCodeCache::Update(MapHandleList* maps,
                                           Code::Flags flags,
                                           Code* code) {
   // Initialize cache if necessary.
@@ -4501,13 +5412,14 @@
 }
 
 
-Object* PolymorphicCodeCache::Lookup(MapList* maps, Code::Flags flags) {
+Handle<Object> PolymorphicCodeCache::Lookup(MapHandleList* maps,
+                                            Code::Flags flags) {
   if (!cache()->IsUndefined()) {
     PolymorphicCodeCacheHashTable* hash_table =
         PolymorphicCodeCacheHashTable::cast(cache());
-    return hash_table->Lookup(maps, flags);
+    return Handle<Object>(hash_table->Lookup(maps, flags));
   } else {
-    return GetHeap()->undefined_value();
+    return GetIsolate()->factory()->undefined_value();
   }
 }
 
@@ -4518,12 +5430,12 @@
 class PolymorphicCodeCacheHashTableKey : public HashTableKey {
  public:
   // Callers must ensure that |maps| outlives the newly constructed object.
-  PolymorphicCodeCacheHashTableKey(MapList* maps, int code_flags)
+  PolymorphicCodeCacheHashTableKey(MapHandleList* maps, int code_flags)
       : maps_(maps),
         code_flags_(code_flags) {}
 
   bool IsMatch(Object* other) {
-    MapList other_maps(kDefaultListAllocationSize);
+    MapHandleList other_maps(kDefaultListAllocationSize);
     int other_flags;
     FromObject(other, &other_flags, &other_maps);
     if (code_flags_ != other_flags) return false;
@@ -4539,7 +5451,7 @@
     for (int i = 0; i < maps_->length(); ++i) {
       bool match_found = false;
       for (int j = 0; j < other_maps.length(); ++j) {
-        if (maps_->at(i)->EquivalentTo(other_maps.at(j))) {
+        if (*(maps_->at(i)) == *(other_maps.at(j))) {
           match_found = true;
           break;
         }
@@ -4549,7 +5461,7 @@
     return true;
   }
 
-  static uint32_t MapsHashHelper(MapList* maps, int code_flags) {
+  static uint32_t MapsHashHelper(MapHandleList* maps, int code_flags) {
     uint32_t hash = code_flags;
     for (int i = 0; i < maps->length(); ++i) {
       hash ^= maps->at(i)->Hash();
@@ -4562,7 +5474,7 @@
   }
 
   uint32_t HashForObject(Object* obj) {
-    MapList other_maps(kDefaultListAllocationSize);
+    MapHandleList other_maps(kDefaultListAllocationSize);
     int other_flags;
     FromObject(obj, &other_flags, &other_maps);
     return MapsHashHelper(&other_maps, other_flags);
@@ -4580,29 +5492,32 @@
     FixedArray* list = FixedArray::cast(obj);
     list->set(0, Smi::FromInt(code_flags_));
     for (int i = 0; i < maps_->length(); ++i) {
-      list->set(i + 1, maps_->at(i));
+      list->set(i + 1, *maps_->at(i));
     }
     return list;
   }
 
  private:
-  static MapList* FromObject(Object* obj, int* code_flags, MapList* maps) {
+  static MapHandleList* FromObject(Object* obj,
+                                   int* code_flags,
+                                   MapHandleList* maps) {
     FixedArray* list = FixedArray::cast(obj);
     maps->Rewind(0);
     *code_flags = Smi::cast(list->get(0))->value();
     for (int i = 1; i < list->length(); ++i) {
-      maps->Add(Map::cast(list->get(i)));
+      maps->Add(Handle<Map>(Map::cast(list->get(i))));
     }
     return maps;
   }
 
-  MapList* maps_;  // weak.
+  MapHandleList* maps_;  // weak.
   int code_flags_;
   static const int kDefaultListAllocationSize = kMaxKeyedPolymorphism + 1;
 };
 
 
-Object* PolymorphicCodeCacheHashTable::Lookup(MapList* maps, int code_flags) {
+Object* PolymorphicCodeCacheHashTable::Lookup(MapHandleList* maps,
+                                              int code_flags) {
   PolymorphicCodeCacheHashTableKey key(maps, code_flags);
   int entry = FindEntry(&key);
   if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -4610,7 +5525,7 @@
 }
 
 
-MaybeObject* PolymorphicCodeCacheHashTable::Put(MapList* maps,
+MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps,
                                                 int code_flags,
                                                 Code* code) {
   PolymorphicCodeCacheHashTableKey key(maps, code_flags);
@@ -4634,7 +5549,7 @@
 MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
   ElementsAccessor* accessor = array->GetElementsAccessor();
   MaybeObject* maybe_result =
-      accessor->AddElementsToFixedArray(array->elements(), this, array, array);
+      accessor->AddElementsToFixedArray(array, array, this);
   FixedArray* result;
   if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
 #ifdef DEBUG
@@ -4652,7 +5567,7 @@
 MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
   ElementsAccessor* accessor = ElementsAccessor::ForArray(other);
   MaybeObject* maybe_result =
-      accessor->AddElementsToFixedArray(other, this, NULL, NULL);
+      accessor->AddElementsToFixedArray(NULL, NULL, this, other);
   FixedArray* result;
   if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
 #ifdef DEBUG
@@ -4679,7 +5594,9 @@
   AssertNoAllocation no_gc;
   int len = length();
   if (new_length < len) len = new_length;
-  result->set_map(map());
+  // We are taking the map from the old fixed array so the map is sure to
+  // be an immortal immutable object.
+  result->set_map_no_write_barrier(map());
   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < len; i++) {
     result->set(i, get(i), mode);
@@ -4736,23 +5653,61 @@
 
 
 void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
-                                   FixedArray* new_cache) {
+                                   FixedArray* new_cache,
+                                   Object* new_index_cache) {
   ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
+  ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
   if (HasEnumCache()) {
     FixedArray::cast(get(kEnumerationIndexIndex))->
       set(kEnumCacheBridgeCacheIndex, new_cache);
+    FixedArray::cast(get(kEnumerationIndexIndex))->
+      set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
   } else {
     if (IsEmpty()) return;  // Do nothing for empty descriptor array.
     FixedArray::cast(bridge_storage)->
       set(kEnumCacheBridgeCacheIndex, new_cache);
-    fast_set(FixedArray::cast(bridge_storage),
-             kEnumCacheBridgeEnumIndex,
-             get(kEnumerationIndexIndex));
+    FixedArray::cast(bridge_storage)->
+      set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
+    NoWriteBarrierSet(FixedArray::cast(bridge_storage),
+                      kEnumCacheBridgeEnumIndex,
+                      get(kEnumerationIndexIndex));
     set(kEnumerationIndexIndex, bridge_storage);
   }
 }
 
 
+static bool InsertionPointFound(String* key1, String* key2) {
+  return key1->Hash() > key2->Hash() || key1 == key2;
+}
+
+
+void DescriptorArray::CopyFrom(Handle<DescriptorArray> dst,
+                               int dst_index,
+                               Handle<DescriptorArray> src,
+                               int src_index,
+                               const WhitenessWitness& witness) {
+  CALL_HEAP_FUNCTION_VOID(dst->GetIsolate(),
+                          dst->CopyFrom(dst_index, *src, src_index, witness));
+}
+
+
+MaybeObject* DescriptorArray::CopyFrom(int dst_index,
+                                       DescriptorArray* src,
+                                       int src_index,
+                                       const WhitenessWitness& witness) {
+  Object* value = src->GetValue(src_index);
+  PropertyDetails details(src->GetDetails(src_index));
+  if (details.type() == CALLBACKS && value->IsAccessorPair()) {
+    MaybeObject* maybe_copy =
+        AccessorPair::cast(value)->CopyWithoutTransitions();
+    if (!maybe_copy->To(&value)) return maybe_copy;
+  }
+  Descriptor desc(src->GetKey(src_index), value, details);
+  Set(dst_index, &desc, witness);
+  return this;
+}
+
+
 MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
                                          TransitionFlag transition_flag) {
   // Transitions are only kept when inserting another transition.
@@ -4761,38 +5716,26 @@
   // Conversely, we filter after replacing, so replacing a transition and
   // removing all other transitions is not supported.
   bool remove_transitions = transition_flag == REMOVE_TRANSITIONS;
-  ASSERT(remove_transitions == !descriptor->GetDetails().IsTransition());
+  ASSERT(remove_transitions == !descriptor->ContainsTransition());
   ASSERT(descriptor->GetDetails().type() != NULL_DESCRIPTOR);
 
   // Ensure the key is a symbol.
-  Object* result;
   { MaybeObject* maybe_result = descriptor->KeyToSymbol();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
+    if (maybe_result->IsFailure()) return maybe_result;
   }
 
-  int transitions = 0;
-  int null_descriptors = 0;
-  if (remove_transitions) {
-    for (int i = 0; i < number_of_descriptors(); i++) {
-      if (IsTransition(i)) transitions++;
-      if (IsNullDescriptor(i)) null_descriptors++;
-    }
-  } else {
-    for (int i = 0; i < number_of_descriptors(); i++) {
-      if (IsNullDescriptor(i)) null_descriptors++;
-    }
+  int new_size = 0;
+  for (int i = 0; i < number_of_descriptors(); i++) {
+    if (IsNullDescriptor(i)) continue;
+    if (remove_transitions && IsTransitionOnly(i)) continue;
+    new_size++;
   }
-  int new_size = number_of_descriptors() - transitions - null_descriptors;
 
   // If key is in descriptor, we replace it in-place when filtering.
   // Count a null descriptor for key as inserted, not replaced.
   int index = Search(descriptor->GetKey());
-  const bool inserting = (index == kNotFound);
-  const bool replacing = !inserting;
+  const bool replacing = (index != kNotFound);
   bool keep_enumeration_index = false;
-  if (inserting) {
-    ++new_size;
-  }
   if (replacing) {
     // We are replacing an existing descriptor.  We keep the enumeration
     // index of a visible property.
@@ -4807,15 +5750,21 @@
      // a transition that will be replaced.  Adjust count in this case.
       ++new_size;
     }
+  } else {
+    ++new_size;
   }
+
+  DescriptorArray* new_descriptors;
   { MaybeObject* maybe_result = Allocate(new_size);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
+    if (!maybe_result->To(&new_descriptors)) return maybe_result;
   }
-  DescriptorArray* new_descriptors = DescriptorArray::cast(result);
+
+  DescriptorArray::WhitenessWitness witness(new_descriptors);
+
   // Set the enumeration index in the descriptors and set the enumeration index
   // in the result.
   int enumeration_index = NextEnumerationIndex();
-  if (!descriptor->GetDetails().IsTransition()) {
+  if (!descriptor->ContainsTransition()) {
     if (keep_enumeration_index) {
       descriptor->SetEnumerationIndex(
           PropertyDetails(GetDetails(index)).index());
@@ -4828,28 +5777,26 @@
 
   // Copy the descriptors, filtering out transitions and null descriptors,
   // and inserting or replacing a descriptor.
-  uint32_t descriptor_hash = descriptor->GetKey()->Hash();
-  int from_index = 0;
   int to_index = 0;
-
-  for (; from_index < number_of_descriptors(); from_index++) {
-    String* key = GetKey(from_index);
-    if (key->Hash() > descriptor_hash || key == descriptor->GetKey()) {
-      break;
+  int insertion_index = -1;
+  int from_index = 0;
+  while (from_index < number_of_descriptors()) {
+    if (insertion_index < 0 &&
+        InsertionPointFound(GetKey(from_index), descriptor->GetKey())) {
+      insertion_index = to_index++;
+      if (replacing) from_index++;
+    } else {
+      if (!(IsNullDescriptor(from_index) ||
+            (remove_transitions && IsTransitionOnly(from_index)))) {
+        MaybeObject* copy_result =
+            new_descriptors->CopyFrom(to_index++, this, from_index, witness);
+        if (copy_result->IsFailure()) return copy_result;
+      }
+      from_index++;
     }
-    if (IsNullDescriptor(from_index)) continue;
-    if (remove_transitions && IsTransition(from_index)) continue;
-    new_descriptors->CopyFrom(to_index++, this, from_index);
   }
-
-  new_descriptors->Set(to_index++, descriptor);
-  if (replacing) from_index++;
-
-  for (; from_index < number_of_descriptors(); from_index++) {
-    if (IsNullDescriptor(from_index)) continue;
-    if (remove_transitions && IsTransition(from_index)) continue;
-    new_descriptors->CopyFrom(to_index++, this, from_index);
-  }
+  if (insertion_index < 0) insertion_index = to_index++;
+  new_descriptors->Set(insertion_index, descriptor, witness);
 
   ASSERT(to_index == new_descriptors->number_of_descriptors());
   SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
@@ -4859,27 +5806,25 @@
 
 
 MaybeObject* DescriptorArray::RemoveTransitions() {
-  // Remove all transitions and null descriptors. Return a copy of the array
-  // with all transitions removed, or a Failure object if the new array could
-  // not be allocated.
-
-  // Compute the size of the map transition entries to be removed.
-  int num_removed = 0;
-  for (int i = 0; i < number_of_descriptors(); i++) {
-    if (!IsProperty(i)) num_removed++;
-  }
-
   // Allocate the new descriptor array.
-  Object* result;
-  { MaybeObject* maybe_result = Allocate(number_of_descriptors() - num_removed);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
+  int new_number_of_descriptors = 0;
+  for (int i = 0; i < number_of_descriptors(); i++) {
+    if (IsProperty(i)) new_number_of_descriptors++;
   }
-  DescriptorArray* new_descriptors = DescriptorArray::cast(result);
+  DescriptorArray* new_descriptors;
+  { MaybeObject* maybe_result = Allocate(new_number_of_descriptors);
+    if (!maybe_result->To(&new_descriptors)) return maybe_result;
+  }
 
   // Copy the content.
+  DescriptorArray::WhitenessWitness witness(new_descriptors);
   int next_descriptor = 0;
   for (int i = 0; i < number_of_descriptors(); i++) {
-    if (IsProperty(i)) new_descriptors->CopyFrom(next_descriptor++, this, i);
+    if (IsProperty(i)) {
+      MaybeObject* copy_result =
+          new_descriptors->CopyFrom(next_descriptor++, this, i, witness);
+      if (copy_result->IsFailure()) return copy_result;
+    }
   }
   ASSERT(next_descriptor == new_descriptors->number_of_descriptors());
 
@@ -4887,7 +5832,7 @@
 }
 
 
-void DescriptorArray::SortUnchecked() {
+void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
   // In-place heap sort.
   int len = number_of_descriptors();
 
@@ -4908,7 +5853,7 @@
         }
       }
       if (child_hash <= parent_hash) break;
-      Swap(parent_index, child_index);
+      NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
       // Now element at child_index could be < its children.
       parent_index = child_index;  // parent_hash remains correct.
     }
@@ -4917,8 +5862,8 @@
   // Extract elements and create sorted array.
   for (int i = len - 1; i > 0; --i) {
     // Put max element at the back of the array.
-    Swap(0, i);
-    // Sift down the new top element.
+    NoIncrementalWriteBarrierSwapDescriptors(0, i);
+    // Shift down the new top element.
     int parent_index = 0;
     const uint32_t parent_hash = GetKey(parent_index)->Hash();
     const int max_parent_index = (i / 2) - 1;
@@ -4933,15 +5878,15 @@
         }
       }
       if (child_hash <= parent_hash) break;
-      Swap(parent_index, child_index);
+      NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
       parent_index = child_index;
     }
   }
 }
 
 
-void DescriptorArray::Sort() {
-  SortUnchecked();
+void DescriptorArray::Sort(const WhitenessWitness& witness) {
+  SortUnchecked(witness);
   SLOW_ASSERT(IsSortedNoDuplicates());
 }
 
@@ -4991,6 +5936,24 @@
 }
 
 
+MaybeObject* AccessorPair::CopyWithoutTransitions() {
+  Heap* heap = GetHeap();
+  AccessorPair* copy;
+  { MaybeObject* maybe_copy = heap->AllocateAccessorPair();
+    if (!maybe_copy->To(&copy)) return maybe_copy;
+  }
+  copy->set_getter(getter()->IsMap() ? heap->the_hole_value() : getter());
+  copy->set_setter(setter()->IsMap() ? heap->the_hole_value() : setter());
+  return copy;
+}
+
+
+Object* AccessorPair::GetComponent(AccessorComponent component) {
+    Object* accessor = (component == ACCESSOR_GETTER) ? getter() : setter();
+    return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
+}
+
+
 MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
                                                PretenureFlag pretenure) {
   ASSERT(deopt_entry_count > 0);
@@ -5026,24 +5989,6 @@
 }
 
 
-int String::Utf8Length() {
-  if (IsAsciiRepresentation()) return length();
-  // Attempt to flatten before accessing the string.  It probably
-  // doesn't make Utf8Length faster, but it is very likely that
-  // the string will be accessed later (for example by WriteUtf8)
-  // so it's still a good idea.
-  Heap* heap = GetHeap();
-  TryFlatten();
-  Access<StringInputBuffer> buffer(
-      heap->isolate()->objects_string_input_buffer());
-  buffer->Reset(0, this);
-  int result = 0;
-  while (buffer->has_more())
-    result += unibrow::Utf8::Length(buffer->GetNext());
-  return result;
-}
-
-
 String::FlatContent String::GetFlatContent() {
   int length = this->length();
   StringShape shape(this);
@@ -5070,7 +6015,7 @@
     if (shape.representation_tag() == kSeqStringTag) {
       start = SeqAsciiString::cast(string)->GetChars();
     } else {
-      start = ExternalAsciiString::cast(string)->resource()->data();
+      start = ExternalAsciiString::cast(string)->GetChars();
     }
     return FlatContent(Vector<const char>(start + offset, length));
   } else {
@@ -5079,7 +6024,7 @@
     if (shape.representation_tag() == kSeqStringTag) {
       start = SeqTwoByteString::cast(string)->GetChars();
     } else {
-      start = ExternalTwoByteString::cast(string)->resource()->data();
+      start = ExternalTwoByteString::cast(string)->GetChars();
     }
     return FlatContent(Vector<const uc16>(start + offset, length));
   }
@@ -5105,12 +6050,11 @@
   buffer->Reset(offset, this);
   int character_position = offset;
   int utf8_bytes = 0;
-  while (buffer->has_more()) {
+  int last = unibrow::Utf16::kNoPreviousCharacter;
+  while (buffer->has_more() && character_position++ < offset + length) {
     uint16_t character = buffer->GetNext();
-    if (character_position < offset + length) {
-      utf8_bytes += unibrow::Utf8::Length(character);
-    }
-    character_position++;
+    utf8_bytes += unibrow::Utf8::Length(character, last);
+    last = character;
   }
 
   if (length_return) {
@@ -5124,16 +6068,15 @@
   buffer->Seek(offset);
   character_position = offset;
   int utf8_byte_position = 0;
-  while (buffer->has_more()) {
+  last = unibrow::Utf16::kNoPreviousCharacter;
+  while (buffer->has_more() && character_position++ < offset + length) {
     uint16_t character = buffer->GetNext();
-    if (character_position < offset + length) {
-      if (allow_nulls == DISALLOW_NULLS && character == 0) {
-        character = ' ';
-      }
-      utf8_byte_position +=
-          unibrow::Utf8::Encode(result + utf8_byte_position, character);
+    if (allow_nulls == DISALLOW_NULLS && character == 0) {
+      character = ' ';
     }
-    character_position++;
+    utf8_byte_position +=
+        unibrow::Utf8::Encode(result + utf8_byte_position, character, last);
+    last = character;
   }
   result[utf8_byte_position] = 0;
   return SmartArrayPointer<char>(result);
@@ -5318,44 +6261,26 @@
 }
 
 
-uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
-  return resource()->data()[index];
-}
-
-
 const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
       unsigned* remaining,
       unsigned* offset_ptr,
       unsigned max_chars) {
   // Cast const char* to unibrow::byte* (signedness difference).
   const unibrow::byte* b =
-      reinterpret_cast<const unibrow::byte*>(resource()->data()) + *offset_ptr;
+      reinterpret_cast<const unibrow::byte*>(GetChars()) + *offset_ptr;
   *remaining = max_chars;
   *offset_ptr += max_chars;
   return b;
 }
 
 
-const uc16* ExternalTwoByteString::ExternalTwoByteStringGetData(
-      unsigned start) {
-  return resource()->data() + start;
-}
-
-
-uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
-  return resource()->data()[index];
-}
-
-
 void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
       ReadBlockBuffer* rbb,
       unsigned* offset_ptr,
       unsigned max_chars) {
   unsigned chars_read = 0;
   unsigned offset = *offset_ptr;
-  const uint16_t* data = resource()->data();
+  const uint16_t* data = GetChars();
   while (chars_read < max_chars) {
     uint16_t c = data[offset];
     if (c <= kMaxAsciiCharCode) {
@@ -5401,9 +6326,7 @@
       unsigned max_chars) {
   unsigned capacity = rbb->capacity - rbb->cursor;
   if (max_chars > capacity) max_chars = capacity;
-  memcpy(rbb->util_buffer + rbb->cursor,
-         resource()->data() + *offset_ptr,
-         max_chars);
+  memcpy(rbb->util_buffer + rbb->cursor, GetChars() + *offset_ptr, max_chars);
   rbb->remaining += max_chars;
   *offset_ptr += max_chars;
   rbb->cursor += max_chars;
@@ -5778,13 +6701,13 @@
     switch (StringShape(source).full_representation_tag()) {
       case kAsciiStringTag | kExternalStringTag: {
         CopyChars(sink,
-                  ExternalAsciiString::cast(source)->resource()->data() + from,
+                  ExternalAsciiString::cast(source)->GetChars() + from,
                   to - from);
         return;
       }
       case kTwoByteStringTag | kExternalStringTag: {
         const uc16* data =
-            ExternalTwoByteString::cast(source)->resource()->data();
+            ExternalTwoByteString::cast(source)->GetChars();
         CopyChars(sink,
                   data + from,
                   to - from);
@@ -5822,10 +6745,21 @@
           // Left hand side is longer.  Recurse over right.
           if (to > boundary) {
             String* second = cons_string->second();
-            WriteToFlat(second,
-                        sink + boundary - from,
-                        0,
+            // When repeatedly appending to a string, we get a cons string that
+            // is unbalanced to the left, a list, essentially.  We inline the
+            // common case of sequential ascii right child.
+            if (to - boundary == 1) {
+              sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
+            } else if (second->IsSeqAsciiString()) {
+              CopyChars(sink + boundary - from,
+                        SeqAsciiString::cast(second)->GetChars(),
                         to - boundary);
+            } else {
+              WriteToFlat(second,
+                          sink + boundary - from,
+                          0,
+                          to - boundary);
+            }
             to = boundary;
           }
           source = first;
@@ -5849,8 +6783,10 @@
   // General slow case check.  We know that the ia and ib iterators
   // have the same length.
   while (ia->has_more()) {
-    uc32 ca = ia->GetNext();
-    uc32 cb = ib->GetNext();
+    uint32_t ca = ia->GetNext();
+    uint32_t cb = ib->GetNext();
+    ASSERT(ca <= unibrow::Utf16::kMaxNonSurrogateCharCode);
+    ASSERT(cb <= unibrow::Utf16::kMaxNonSurrogateCharCode);
     if (ca != cb)
       return false;
   }
@@ -6012,7 +6948,7 @@
   if (StringShape(this).IsSymbol()) return false;
 
   Map* map = this->map();
-  Heap* heap = map->heap();
+  Heap* heap = GetHeap();
   if (map == heap->string_map()) {
     this->set_map(heap->undetectable_string_map());
     return true;
@@ -6033,8 +6969,14 @@
   decoder->Reset(str.start(), str.length());
   int i;
   for (i = 0; i < slen && decoder->has_more(); i++) {
-    uc32 r = decoder->GetNext();
-    if (Get(i) != r) return false;
+    uint32_t r = decoder->GetNext();
+    if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      if (i > slen - 1) return false;
+      if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false;
+      if (Get(i) != unibrow::Utf16::TrailSurrogate(r)) return false;
+    } else {
+      if (Get(i) != r) return false;
+    }
   }
   return i == slen && !decoder->has_more();
 }
@@ -6164,6 +7106,22 @@
 }
 
 
+void StringHasher::AddSurrogatePair(uc32 c) {
+  uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
+  AddCharacter(lead);
+  uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
+  AddCharacter(trail);
+}
+
+
+void StringHasher::AddSurrogatePairNoIndex(uc32 c) {
+  uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
+  AddCharacterNoIndex(lead);
+  uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
+  AddCharacterNoIndex(trail);
+}
+
+
 uint32_t StringHasher::GetHashField() {
   ASSERT(is_valid());
   if (length_ <= String::kMaxHashCalcLength) {
@@ -6220,66 +7178,155 @@
 }
 
 
+void Map::CreateOneBackPointer(Object* transition_target) {
+  if (!transition_target->IsMap()) return;
+  Map* target = Map::cast(transition_target);
+#ifdef DEBUG
+  // Verify target.
+  Object* source_prototype = prototype();
+  Object* target_prototype = target->prototype();
+  ASSERT(source_prototype->IsJSReceiver() ||
+         source_prototype->IsMap() ||
+         source_prototype->IsNull());
+  ASSERT(target_prototype->IsJSReceiver() ||
+         target_prototype->IsNull());
+  ASSERT(source_prototype->IsMap() ||
+         source_prototype == target_prototype);
+#endif
+  // Point target back to source.  set_prototype() will not let us set
+  // the prototype to a map, as we do here.
+  *RawField(target, kPrototypeOffset) = this;
+}
+
+
 void Map::CreateBackPointers() {
   DescriptorArray* descriptors = instance_descriptors();
   for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
-    if (descriptors->GetType(i) == MAP_TRANSITION ||
-        descriptors->GetType(i) == ELEMENTS_TRANSITION ||
-        descriptors->GetType(i) == CONSTANT_TRANSITION) {
-      // Get target.
-      Map* target = Map::cast(descriptors->GetValue(i));
-#ifdef DEBUG
-      // Verify target.
-      Object* source_prototype = prototype();
-      Object* target_prototype = target->prototype();
-      ASSERT(source_prototype->IsJSObject() ||
-             source_prototype->IsMap() ||
-             source_prototype->IsNull());
-      ASSERT(target_prototype->IsJSObject() ||
-             target_prototype->IsNull());
-      ASSERT(source_prototype->IsMap() ||
-             source_prototype == target_prototype);
-#endif
-      // Point target back to source.  set_prototype() will not let us set
-      // the prototype to a map, as we do here.
-      *RawField(target, kPrototypeOffset) = this;
+    switch (descriptors->GetType(i)) {
+      case MAP_TRANSITION:
+      case CONSTANT_TRANSITION:
+        CreateOneBackPointer(descriptors->GetValue(i));
+        break;
+      case ELEMENTS_TRANSITION: {
+        Object* object = descriptors->GetValue(i);
+        if (object->IsMap()) {
+          CreateOneBackPointer(object);
+        } else {
+          FixedArray* array = FixedArray::cast(object);
+          for (int i = 0; i < array->length(); ++i) {
+            CreateOneBackPointer(array->get(i));
+          }
+        }
+        break;
+      }
+      case CALLBACKS: {
+        Object* object = descriptors->GetValue(i);
+        if (object->IsAccessorPair()) {
+          AccessorPair* accessors = AccessorPair::cast(object);
+          CreateOneBackPointer(accessors->getter());
+          CreateOneBackPointer(accessors->setter());
+        }
+        break;
+      }
+      case NORMAL:
+      case FIELD:
+      case CONSTANT_FUNCTION:
+      case HANDLER:
+      case INTERCEPTOR:
+      case NULL_DESCRIPTOR:
+        break;
     }
   }
 }
 
 
+bool Map::RestoreOneBackPointer(Object* object,
+                                Object* real_prototype,
+                                bool* keep_entry) {
+  if (!object->IsMap()) return false;
+  Map* map = Map::cast(object);
+  if (Marking::MarkBitFrom(map).Get()) {
+    *keep_entry = true;
+    return false;
+  }
+  ASSERT(map->prototype() == this || map->prototype() == real_prototype);
+  // Getter prototype() is read-only, set_prototype() has side effects.
+  *RawField(map, Map::kPrototypeOffset) = real_prototype;
+  return true;
+}
+
+
 void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
-  // Live DescriptorArray objects will be marked, so we must use
-  // low-level accessors to get and modify their data.
-  DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
+  DescriptorArray* d = DescriptorArray::cast(
       *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
   if (d->IsEmpty()) return;
   Smi* NullDescriptorDetails =
     PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
-  FixedArray* contents = reinterpret_cast<FixedArray*>(
+  FixedArray* contents = FixedArray::cast(
       d->get(DescriptorArray::kContentArrayIndex));
   ASSERT(contents->length() >= 2);
   for (int i = 0; i < contents->length(); i += 2) {
-    // If the pair (value, details) is a map transition,
-    // check if the target is live.  If not, null the descriptor.
-    // Also drop the back pointer for that map transition, so that this
-    // map is not reached again by following a back pointer from a
-    // non-live object.
+    // If the pair (value, details) is a map transition, check if the target is
+    // live. If not, null the descriptor. Also drop the back pointer for that
+    // map transition, so that this map is not reached again by following a back
+    // pointer from a non-live object.
+    bool keep_entry = false;
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
-    if (details.type() == MAP_TRANSITION ||
-        details.type() == ELEMENTS_TRANSITION ||
-        details.type() == CONSTANT_TRANSITION) {
-      Map* target = reinterpret_cast<Map*>(contents->get(i));
-      ASSERT(target->IsHeapObject());
-      if (!target->IsMarked()) {
-        ASSERT(target->IsMap());
-        contents->set_unchecked(i + 1, NullDescriptorDetails);
-        contents->set_null_unchecked(heap, i);
-        ASSERT(target->prototype() == this ||
-               target->prototype() == real_prototype);
-        // Getter prototype() is read-only, set_prototype() has side effects.
-        *RawField(target, Map::kPrototypeOffset) = real_prototype;
+    switch (details.type()) {
+      case MAP_TRANSITION:
+      case CONSTANT_TRANSITION:
+        RestoreOneBackPointer(contents->get(i), real_prototype, &keep_entry);
+        break;
+      case ELEMENTS_TRANSITION: {
+        Object* object = contents->get(i);
+        if (object->IsMap()) {
+          RestoreOneBackPointer(object, real_prototype, &keep_entry);
+        } else {
+          FixedArray* array = FixedArray::cast(object);
+          for (int j = 0; j < array->length(); ++j) {
+            if (RestoreOneBackPointer(array->get(j),
+                                      real_prototype,
+                                      &keep_entry)) {
+              array->set_undefined(j);
+            }
+          }
+        }
+        break;
       }
+      case CALLBACKS: {
+        Object* object = contents->get(i);
+        if (object->IsAccessorPair()) {
+          AccessorPair* accessors = AccessorPair::cast(object);
+          if (RestoreOneBackPointer(accessors->getter(),
+                                    real_prototype,
+                                    &keep_entry)) {
+            accessors->set_getter(heap->the_hole_value());
+          }
+          if (RestoreOneBackPointer(accessors->setter(),
+                                    real_prototype,
+                                    &keep_entry)) {
+            accessors->set_setter(heap->the_hole_value());
+          }
+        } else {
+          keep_entry = true;
+        }
+        break;
+      }
+      case NORMAL:
+      case FIELD:
+      case CONSTANT_FUNCTION:
+      case HANDLER:
+      case INTERCEPTOR:
+      case NULL_DESCRIPTOR:
+        keep_entry = true;
+        break;
+    }
+    // Make sure that an entry containing only dead transitions gets collected.
+    // What we *really* want to do here is removing this entry completely, but
+    // for technical reasons we can't do this, so we zero it out instead.
+    if (!keep_entry) {
+      contents->set_unchecked(i + 1, NullDescriptorDetails);
+      contents->set_null_unchecked(heap, i);
     }
   }
 }
@@ -6337,6 +7384,57 @@
 }
 
 
+bool SharedFunctionInfo::EnsureCompiled(Handle<SharedFunctionInfo> shared,
+                                        ClearExceptionFlag flag) {
+  return shared->is_compiled() || CompileLazy(shared, flag);
+}
+
+
+static bool CompileLazyHelper(CompilationInfo* info,
+                              ClearExceptionFlag flag) {
+  // Compile the source information to a code object.
+  ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
+  ASSERT(!info->isolate()->has_pending_exception());
+  bool result = Compiler::CompileLazy(info);
+  ASSERT(result != Isolate::Current()->has_pending_exception());
+  if (!result && flag == CLEAR_EXCEPTION) {
+    info->isolate()->clear_pending_exception();
+  }
+  return result;
+}
+
+
+bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
+                                     ClearExceptionFlag flag) {
+  CompilationInfo info(shared);
+  return CompileLazyHelper(&info, flag);
+}
+
+
+bool JSFunction::CompileLazy(Handle<JSFunction> function,
+                             ClearExceptionFlag flag) {
+  bool result = true;
+  if (function->shared()->is_compiled()) {
+    function->ReplaceCode(function->shared()->code());
+    function->shared()->set_code_age(0);
+  } else {
+    CompilationInfo info(function);
+    result = CompileLazyHelper(&info, flag);
+    ASSERT(!result || function->is_compiled());
+  }
+  return result;
+}
+
+
+bool JSFunction::CompileOptimized(Handle<JSFunction> function,
+                                  int osr_ast_id,
+                                  ClearExceptionFlag flag) {
+  CompilationInfo info(function);
+  info.SetOptimizing(osr_ast_id);
+  return CompileLazyHelper(&info, flag);
+}
+
+
 bool JSFunction::IsInlineable() {
   if (IsBuiltin()) return false;
   SharedFunctionInfo* shared_info = shared();
@@ -6351,11 +7449,19 @@
 }
 
 
-Object* JSFunction::SetInstancePrototype(Object* value) {
-  ASSERT(value->IsJSObject());
+MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
+  ASSERT(value->IsJSReceiver());
   Heap* heap = GetHeap();
   if (has_initial_map()) {
-    initial_map()->set_prototype(value);
+    // If the function has allocated the initial map
+    // replace it with a copy containing the new prototype.
+    Map* new_map;
+    MaybeObject* maybe_new_map = initial_map()->CopyDropTransitions();
+    if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+    new_map->set_prototype(value);
+    MaybeObject* maybe_object =
+        set_initial_map_and_cache_transitions(new_map);
+    if (maybe_object->IsFailure()) return maybe_object;
   } else {
     // Put the value in the initial map field until an initial map is
     // needed.  At that point, a new initial map is created and the
@@ -6371,20 +7477,19 @@
   ASSERT(should_have_prototype());
   Object* construct_prototype = value;
 
-  // If the value is not a JSObject, store the value in the map's
+  // If the value is not a JSReceiver, store the value in the map's
   // constructor field so it can be accessed.  Also, set the prototype
   // used for constructing objects to the original object prototype.
   // See ECMA-262 13.2.2.
-  if (!value->IsJSObject()) {
+  if (!value->IsJSReceiver()) {
     // Copy the map so this does not affect unrelated functions.
     // Remove map transitions because they point to maps with a
     // different prototype.
-    Object* new_object;
+    Map* new_map;
     { MaybeObject* maybe_new_map = map()->CopyDropTransitions();
-      if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map;
+      if (!maybe_new_map->To(&new_map)) return maybe_new_map;
     }
-    Map* new_map = Map::cast(new_object);
-    Heap* heap = new_map->heap();
+    Heap* heap = new_map->GetHeap();
     set_map(new_map);
     new_map->set_constructor(value);
     new_map->set_non_instance_prototype(true);
@@ -6401,21 +7506,21 @@
 
 Object* JSFunction::RemovePrototype() {
   Context* global_context = context()->global_context();
-  Map* no_prototype_map = shared()->strict_mode()
-      ? global_context->strict_mode_function_without_prototype_map()
-      : global_context->function_without_prototype_map();
+  Map* no_prototype_map = shared()->is_classic_mode()
+      ? global_context->function_without_prototype_map()
+      : global_context->strict_mode_function_without_prototype_map();
 
   if (map() == no_prototype_map) {
     // Be idempotent.
     return this;
   }
 
-  ASSERT(!shared()->strict_mode() ||
-         map() == global_context->strict_mode_function_map());
-  ASSERT(shared()->strict_mode() || map() == global_context->function_map());
+  ASSERT(map() == (shared()->is_classic_mode()
+                   ? global_context->function_map()
+                   : global_context->strict_mode_function_map()));
 
   set_map(no_prototype_map);
-  set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value());
+  set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
   return this;
 }
 
@@ -6440,12 +7545,12 @@
 MaybeObject* Oddball::Initialize(const char* to_string,
                                  Object* to_number,
                                  byte kind) {
-  Object* symbol;
+  String* symbol;
   { MaybeObject* maybe_symbol =
         Isolate::Current()->heap()->LookupAsciiSymbol(to_string);
-    if (!maybe_symbol->ToObject(&symbol)) return maybe_symbol;
+    if (!maybe_symbol->To(&symbol)) return maybe_symbol;
   }
-  set_to_string(String::cast(symbol));
+  set_to_string(symbol);
   set_to_number(to_number);
   set_kind(kind);
   return this;
@@ -6465,13 +7570,10 @@
 }
 
 
-Object* SharedFunctionInfo::GetSourceCode() {
-  Isolate* isolate = GetIsolate();
-  if (!HasSourceCode()) return isolate->heap()->undefined_value();
-  HandleScope scope(isolate);
-  Object* source = Script::cast(script())->source();
-  return *SubString(Handle<String>(String::cast(source), isolate),
-                    start_position(), end_position());
+Handle<Object> SharedFunctionInfo::GetSourceCode() {
+  if (!HasSourceCode()) return GetIsolate()->factory()->undefined_value();
+  Handle<String> source(String::cast(Script::cast(script())->source()));
+  return SubString(source, start_position(), end_position());
 }
 
 
@@ -6519,10 +7621,10 @@
        obj = obj->GetPrototype()) {
     JSObject* js_object = JSObject::cast(obj);
     for (int i = 0; i < this_property_assignments_count(); i++) {
-      LookupResult result;
+      LookupResult result(heap->isolate());
       String* name = GetThisPropertyAssignmentName(i);
       js_object->LocalLookupRealNamedProperty(name, &result);
-      if (result.IsProperty() && result.type() == CALLBACKS) {
+      if (result.IsFound() && result.type() == CALLBACKS) {
         return false;
       }
     }
@@ -6668,7 +7770,7 @@
 }
 
 
-void SharedFunctionInfo::DisableOptimization(JSFunction* function) {
+void SharedFunctionInfo::DisableOptimization() {
   // Disable optimization for the shared function info and mark the
   // code as non-optimizable. The marker on the shared function info
   // is there because we flush non-optimized code thereby loosing the
@@ -6684,17 +7786,12 @@
     code()->set_optimizable(false);
   }
   if (FLAG_trace_opt) {
-    PrintF("[disabled optimization for: ");
-    function->PrintName();
-    PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+    PrintF("[disabled optimization for %s]\n", *DebugName()->ToCString());
   }
 }
 
 
 bool SharedFunctionInfo::VerifyBailoutId(int id) {
-  // TODO(srdjan): debugging ARM crashes in hydrogen. OK to disable while
-  // we are always bailing out on ARM.
-
   ASSERT(id != AstNode::kNoNumber);
   Code* unoptimized = code();
   DeoptimizationOutputData* data =
@@ -6708,6 +7805,8 @@
 void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
   ASSERT(!IsInobjectSlackTrackingInProgress());
 
+  if (!FLAG_clever_optimizations) return;
+
   // Only initiate the tracking the first time.
   if (live_objects_may_exist()) return;
   set_live_objects_may_exist(true);
@@ -6723,7 +7822,7 @@
     set_construction_count(kGenerousAllocationCount);
   }
   set_initial_map(map);
-  Builtins* builtins = map->heap()->isolate()->builtins();
+  Builtins* builtins = map->GetHeap()->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
             construct_stub());
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -6743,8 +7842,9 @@
   // then StartInobjectTracking will be called again the next time the
   // constructor is called. The countdown will continue and (possibly after
   // several more GCs) CompleteInobjectSlackTracking will eventually be called.
-  set_initial_map(map->heap()->raw_unchecked_undefined_value());
-  Builtins* builtins = map->heap()->isolate()->builtins();
+  Heap* heap = map->GetHeap();
+  set_initial_map(heap->raw_unchecked_undefined_value());
+  Builtins* builtins = heap->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
             *RawField(this, kConstructStubOffset));
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
@@ -6760,7 +7860,7 @@
 
   // Resume inobject slack tracking.
   set_initial_map(map);
-  Builtins* builtins = map->heap()->isolate()->builtins();
+  Builtins* builtins = map->GetHeap()->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
             *RawField(this, kConstructStubOffset));
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -6792,7 +7892,7 @@
   ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
   Map* map = Map::cast(initial_map());
 
-  Heap* heap = map->heap();
+  Heap* heap = map->GetHeap();
   set_initial_map(heap->undefined_value());
   Builtins* builtins = heap->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
@@ -6812,6 +7912,22 @@
 }
 
 
+#define DECLARE_TAG(ignore1, name, ignore2) name,
+const char* const VisitorSynchronization::kTags[
+    VisitorSynchronization::kNumberOfSyncTags] = {
+  VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
+};
+#undef DECLARE_TAG
+
+
+#define DECLARE_TAG(ignore1, ignore2, name) name,
+const char* const VisitorSynchronization::kTagNames[
+    VisitorSynchronization::kNumberOfSyncTags] = {
+  VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
+};
+#undef DECLARE_TAG
+
+
 void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
   Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -6854,8 +7970,18 @@
 }
 
 
+void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
+  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+  VisitPointer(rinfo->target_object_address());
+}
+
+void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
+  Address* p = rinfo->target_reference_address();
+  VisitExternalReferences(p, p + 1);
+}
+
 void Code::InvalidateRelocation() {
-  set_relocation_info(heap()->empty_byte_array());
+  set_relocation_info(GetHeap()->empty_byte_array());
 }
 
 
@@ -6868,6 +7994,8 @@
 
 
 void Code::CopyFrom(const CodeDesc& desc) {
+  ASSERT(Marking::Color(this) == Marking::WHITE_OBJECT);
+
   // copy code
   memmove(instruction_start(), desc.buffer, desc.instr_size);
 
@@ -6887,16 +8015,17 @@
     RelocInfo::Mode mode = it.rinfo()->rmode();
     if (mode == RelocInfo::EMBEDDED_OBJECT) {
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
-      it.rinfo()->set_target_object(*p);
+      it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER);
     } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-      Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
-      it.rinfo()->set_target_cell(*cell);
+      Handle<JSGlobalPropertyCell> cell  = it.rinfo()->target_cell_handle();
+      it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER);
     } else if (RelocInfo::IsCodeTarget(mode)) {
       // rewrite code handles in inline cache targets to direct
       // pointers to the first instruction in the code object
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
       Code* code = Code::cast(*p);
-      it.rinfo()->set_target_address(code->instruction_start());
+      it.rinfo()->set_target_address(code->instruction_start(),
+                                     SKIP_WRITE_BARRIER);
     } else {
       it.rinfo()->apply(delta);
     }
@@ -7015,8 +8144,11 @@
         static_cast<Translation::Opcode>(iterator.Next());
     ASSERT(Translation::BEGIN == opcode);
     int frame_count = iterator.Next();
-    PrintF(out, "  %s {count=%d}\n", Translation::StringFor(opcode),
-           frame_count);
+    int jsframe_count = iterator.Next();
+    PrintF(out, "  %s {frame count=%d, js frame count=%d}\n",
+           Translation::StringFor(opcode),
+           frame_count,
+           jsframe_count);
 
     while (iterator.HasNext() &&
            Translation::BEGIN !=
@@ -7028,7 +8160,7 @@
           UNREACHABLE();
           break;
 
-        case Translation::FRAME: {
+        case Translation::JS_FRAME: {
           int ast_id = iterator.Next();
           int function_id = iterator.Next();
           JSFunction* function =
@@ -7040,6 +8172,18 @@
           break;
         }
 
+        case Translation::ARGUMENTS_ADAPTOR_FRAME:
+        case Translation::CONSTRUCT_STUB_FRAME: {
+          int function_id = iterator.Next();
+          JSFunction* function =
+              JSFunction::cast(LiteralArray()->get(function_id));
+          unsigned height = iterator.Next();
+          PrintF(out, "{function=");
+          function->PrintName(out);
+          PrintF(out, ", height=%u}", height);
+          break;
+        }
+
         case Translation::DUPLICATE:
           break;
 
@@ -7163,7 +8307,7 @@
     case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
     case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
   }
-  UNREACHABLE();
+  UNREACHABLE();  // keep the compiler happy
   return NULL;
 }
 
@@ -7272,125 +8416,55 @@
 #endif  // ENABLE_DISASSEMBLER
 
 
-static void CopyFastElementsToFast(FixedArray* source,
-                                   FixedArray* destination,
-                                   WriteBarrierMode mode) {
-  uint32_t count = static_cast<uint32_t>(source->length());
-  for (uint32_t i = 0; i < count; ++i) {
-    destination->set(i, source->get(i), mode);
-  }
-}
-
-
-static void CopySlowElementsToFast(SeededNumberDictionary* source,
-                                   FixedArray* destination,
-                                   WriteBarrierMode mode) {
-  for (int i = 0; i < source->Capacity(); ++i) {
-    Object* key = source->KeyAt(i);
-    if (key->IsNumber()) {
-      uint32_t entry = static_cast<uint32_t>(key->Number());
-      destination->set(entry, source->ValueAt(i), mode);
-    }
-  }
-}
-
-
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
-                                                        int length) {
+MaybeObject* JSObject::SetFastElementsCapacityAndLength(
+    int capacity,
+    int length,
+    SetFastElementsCapacityMode set_capacity_mode) {
   Heap* heap = GetHeap();
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
 
   // Allocate a new fast elements backing store.
-  FixedArray* new_elements = NULL;
-  { Object* object;
-    MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity);
-    if (!maybe->ToObject(&object)) return maybe;
-    new_elements = FixedArray::cast(object);
+  FixedArray* new_elements;
+  { MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity);
+    if (!maybe->To(&new_elements)) return maybe;
   }
 
   // Find the new map to use for this object if there is a map change.
   Map* new_map = NULL;
   if (elements()->map() != heap->non_strict_arguments_elements_map()) {
-    Object* object;
-    MaybeObject* maybe = map()->GetFastElementsMap();
-    if (!maybe->ToObject(&object)) return maybe;
-    new_map = Map::cast(object);
+    // The resized array has FAST_SMI_ONLY_ELEMENTS if the capacity mode forces
+    // it, or if it's allowed and the old elements array contained only SMIs.
+    bool has_fast_smi_only_elements =
+        (set_capacity_mode == kForceSmiOnlyElements) ||
+        ((set_capacity_mode == kAllowSmiOnlyElements) &&
+         (elements()->map()->has_fast_smi_only_elements() ||
+          elements() == heap->empty_fixed_array()));
+    ElementsKind elements_kind = has_fast_smi_only_elements
+        ? FAST_SMI_ONLY_ELEMENTS
+        : FAST_ELEMENTS;
+    MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
+    if (!maybe->To(&new_map)) return maybe;
   }
 
-  switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-      AssertNoAllocation no_gc;
-      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
-      CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode);
-      set_map(new_map);
-      set_elements(new_elements);
-      break;
-    }
-    case DICTIONARY_ELEMENTS: {
-      AssertNoAllocation no_gc;
-      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
-      CopySlowElementsToFast(SeededNumberDictionary::cast(elements()),
-                             new_elements,
-                             mode);
-      set_map(new_map);
-      set_elements(new_elements);
-      break;
-    }
-    case NON_STRICT_ARGUMENTS_ELEMENTS: {
-      AssertNoAllocation no_gc;
-      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
-      // The object's map and the parameter map are unchanged, the unaliased
-      // arguments are copied to the new backing store.
-      FixedArray* parameter_map = FixedArray::cast(elements());
-      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-      if (arguments->IsDictionary()) {
-        CopySlowElementsToFast(SeededNumberDictionary::cast(arguments),
-                               new_elements,
-                               mode);
-      } else {
-        CopyFastElementsToFast(arguments, new_elements, mode);
-      }
-      parameter_map->set(1, new_elements);
-      break;
-    }
-    case FAST_DOUBLE_ELEMENTS: {
-      FixedDoubleArray* old_elements = FixedDoubleArray::cast(elements());
-      uint32_t old_length = static_cast<uint32_t>(old_elements->length());
-      // Fill out the new array with this content and array holes.
-      for (uint32_t i = 0; i < old_length; i++) {
-        if (!old_elements->is_the_hole(i)) {
-          Object* obj;
-          // Objects must be allocated in the old object space, since the
-          // overall number of HeapNumbers needed for the conversion might
-          // exceed the capacity of new space, and we would fail repeatedly
-          // trying to convert the FixedDoubleArray.
-          MaybeObject* maybe_value_object =
-              GetHeap()->AllocateHeapNumber(old_elements->get_scalar(i),
-                                            TENURED);
-          if (!maybe_value_object->ToObject(&obj)) return maybe_value_object;
-          // Force write barrier. It's not worth trying to exploit
-          // elems->GetWriteBarrierMode(), since it requires an
-          // AssertNoAllocation stack object that would have to be positioned
-          // after the HeapNumber allocation anyway.
-          new_elements->set(i, obj, UPDATE_WRITE_BARRIER);
-        }
-      }
-      set_map(new_map);
-      set_elements(new_elements);
-      break;
-    }
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-    case EXTERNAL_DOUBLE_ELEMENTS:
-    case EXTERNAL_PIXEL_ELEMENTS:
-      UNREACHABLE();
-      break;
+  FixedArrayBase* old_elements = elements();
+  ElementsKind elements_kind = GetElementsKind();
+  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+  ElementsKind to_kind = (elements_kind == FAST_SMI_ONLY_ELEMENTS)
+      ? FAST_SMI_ONLY_ELEMENTS
+      : FAST_ELEMENTS;
+  //  int copy_size = Min(old_elements_raw->length(), new_elements->length());
+  accessor->CopyElements(this, new_elements, to_kind);
+  if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
+    set_map_and_elements(new_map, new_elements);
+  } else {
+    FixedArray* parameter_map = FixedArray::cast(old_elements);
+    parameter_map->set(1, new_elements);
+  }
+
+  if (FLAG_trace_elements_transitions) {
+    PrintElementsTransition(stdout, elements_kind, old_elements,
+                            GetElementsKind(), new_elements);
   }
 
   // Update the length if necessary.
@@ -7409,41 +8483,33 @@
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
 
-  Object* obj;
+  FixedDoubleArray* elems;
   { MaybeObject* maybe_obj =
         heap->AllocateUninitializedFixedDoubleArray(capacity);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  FixedDoubleArray* elems = FixedDoubleArray::cast(obj);
-
-  { MaybeObject* maybe_obj = map()->GetFastDoubleElementsMap();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  Map* new_map = Map::cast(obj);
-
-  AssertNoAllocation no_gc;
-  switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-      elems->Initialize(FixedArray::cast(elements()));
-      break;
-    }
-    case FAST_DOUBLE_ELEMENTS: {
-      elems->Initialize(FixedDoubleArray::cast(elements()));
-      break;
-    }
-    case DICTIONARY_ELEMENTS: {
-      elems->Initialize(SeededNumberDictionary::cast(elements()));
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
+    if (!maybe_obj->To(&elems)) return maybe_obj;
   }
 
-  ASSERT(new_map->has_fast_double_elements());
-  set_map(new_map);
-  ASSERT(elems->IsFixedDoubleArray());
-  set_elements(elems);
+  Map* new_map;
+  { MaybeObject* maybe_obj =
+        GetElementsTransitionMap(heap->isolate(), FAST_DOUBLE_ELEMENTS);
+    if (!maybe_obj->To(&new_map)) return maybe_obj;
+  }
+
+  FixedArrayBase* old_elements = elements();
+  ElementsKind elements_kind = GetElementsKind();
+  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+  accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
+  if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
+    set_map_and_elements(new_map, elems);
+  } else {
+    FixedArray* parameter_map = FixedArray::cast(old_elements);
+    parameter_map->set(1, elems);
+  }
+
+  if (FLAG_trace_elements_transitions) {
+    PrintElementsTransition(stdout, elements_kind, old_elements,
+                            FAST_DOUBLE_ELEMENTS, elems);
+  }
 
   if (IsJSArray()) {
     JSArray::cast(this)->set_length(Smi::FromInt(length));
@@ -7453,53 +8519,6 @@
 }
 
 
-MaybeObject* JSObject::SetSlowElements(Object* len) {
-  // We should never end in here with a pixel or external array.
-  ASSERT(!HasExternalArrayElements());
-
-  uint32_t new_length = static_cast<uint32_t>(len->Number());
-
-  switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-    case FAST_DOUBLE_ELEMENTS:
-      // Make sure we never try to shrink dense arrays into sparse arrays.
-      ASSERT(static_cast<uint32_t>(
-          FixedArrayBase::cast(elements())->length()) <= new_length);
-      MaybeObject* result = NormalizeElements();
-      if (result->IsFailure()) return result;
-
-      // Update length for JSArrays.
-      if (IsJSArray()) JSArray::cast(this)->set_length(len);
-      break;
-    }
-    case DICTIONARY_ELEMENTS: {
-      if (IsJSArray()) {
-        uint32_t old_length =
-            static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
-        element_dictionary()->RemoveNumberEntries(new_length, old_length),
-        JSArray::cast(this)->set_length(len);
-      }
-      break;
-    }
-    case NON_STRICT_ARGUMENTS_ELEMENTS:
-      UNIMPLEMENTED();
-      break;
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-    case EXTERNAL_DOUBLE_ELEMENTS:
-    case EXTERNAL_PIXEL_ELEMENTS:
-      UNREACHABLE();
-      break;
-  }
-  return this;
-}
-
-
 MaybeObject* JSArray::Initialize(int capacity) {
   Heap* heap = GetHeap();
   ASSERT(capacity >= 0);
@@ -7508,11 +8527,8 @@
   if (capacity == 0) {
     new_elements = heap->empty_fixed_array();
   } else {
-    Object* obj;
-    { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
-      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    }
-    new_elements = FixedArray::cast(obj);
+    MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
+    if (!maybe_obj->To(&new_elements)) return maybe_obj;
   }
   set_elements(new_elements);
   return this;
@@ -7520,162 +8536,15 @@
 
 
 void JSArray::Expand(int required_size) {
-  Handle<JSArray> self(this);
-  Handle<FixedArray> old_backing(FixedArray::cast(elements()));
-  int old_size = old_backing->length();
-  int new_size = required_size > old_size ? required_size : old_size;
-  Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
-  // Can't use this any more now because we may have had a GC!
-  for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
-  self->SetContent(*new_backing);
+  GetIsolate()->factory()->SetElementsCapacityAndLength(
+      Handle<JSArray>(this), required_size, required_size);
 }
 
 
-static Failure* ArrayLengthRangeError(Heap* heap) {
-  HandleScope scope(heap->isolate());
-  return heap->isolate()->Throw(
-      *FACTORY->NewRangeError("invalid_array_length",
-          HandleVector<Object>(NULL, 0)));
-}
-
-
-MaybeObject* JSObject::SetElementsLength(Object* len) {
+MaybeObject* JSArray::SetElementsLength(Object* len) {
   // We should never end in here with a pixel or external array.
   ASSERT(AllowsSetElementsLength());
-
-  MaybeObject* maybe_smi_length = len->ToSmi();
-  Object* smi_length = Smi::FromInt(0);
-  if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
-    const int value = Smi::cast(smi_length)->value();
-    if (value < 0) return ArrayLengthRangeError(GetHeap());
-    ElementsKind elements_kind = GetElementsKind();
-    switch (elements_kind) {
-      case FAST_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS: {
-        int old_capacity = FixedArrayBase::cast(elements())->length();
-        if (value <= old_capacity) {
-          if (IsJSArray()) {
-            Object* obj;
-            if (elements_kind == FAST_ELEMENTS) {
-              MaybeObject* maybe_obj = EnsureWritableFastElements();
-              if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-            }
-            if (2 * value <= old_capacity) {
-              // If more than half the elements won't be used, trim the array.
-              if (value == 0) {
-                initialize_elements();
-              } else {
-                Address filler_start;
-                int filler_size;
-                if (GetElementsKind() == FAST_ELEMENTS) {
-                  FixedArray* fast_elements = FixedArray::cast(elements());
-                  fast_elements->set_length(value);
-                  filler_start = fast_elements->address() +
-                      FixedArray::OffsetOfElementAt(value);
-                  filler_size = (old_capacity - value) * kPointerSize;
-                } else {
-                  ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
-                  FixedDoubleArray* fast_double_elements =
-                      FixedDoubleArray::cast(elements());
-                  fast_double_elements->set_length(value);
-                  filler_start = fast_double_elements->address() +
-                      FixedDoubleArray::OffsetOfElementAt(value);
-                  filler_size = (old_capacity - value) * kDoubleSize;
-                }
-                GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
-              }
-            } else {
-              // Otherwise, fill the unused tail with holes.
-              int old_length = FastD2I(JSArray::cast(this)->length()->Number());
-              if (GetElementsKind() == FAST_ELEMENTS) {
-                FixedArray* fast_elements = FixedArray::cast(elements());
-                for (int i = value; i < old_length; i++) {
-                  fast_elements->set_the_hole(i);
-                }
-              } else {
-                ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
-                FixedDoubleArray* fast_double_elements =
-                    FixedDoubleArray::cast(elements());
-                for (int i = value; i < old_length; i++) {
-                  fast_double_elements->set_the_hole(i);
-                }
-              }
-            }
-            JSArray::cast(this)->set_length(Smi::cast(smi_length));
-          }
-          return this;
-        }
-        int min = NewElementsCapacity(old_capacity);
-        int new_capacity = value > min ? value : min;
-        if (!ShouldConvertToSlowElements(new_capacity)) {
-          MaybeObject* result;
-          if (GetElementsKind() == FAST_ELEMENTS) {
-            result = SetFastElementsCapacityAndLength(new_capacity, value);
-          }  else {
-            ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
-            result = SetFastDoubleElementsCapacityAndLength(new_capacity,
-                                                            value);
-          }
-          if (result->IsFailure()) return result;
-          return this;
-        }
-        break;
-      }
-      case DICTIONARY_ELEMENTS: {
-        if (IsJSArray()) {
-          if (value == 0) {
-            // If the length of a slow array is reset to zero, we clear
-            // the array and flush backing storage. This has the added
-            // benefit that the array returns to fast mode.
-            Object* obj;
-            { MaybeObject* maybe_obj = ResetElements();
-              if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-            }
-          } else {
-            // Remove deleted elements.
-            uint32_t old_length =
-            static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
-            element_dictionary()->RemoveNumberEntries(value, old_length);
-          }
-          JSArray::cast(this)->set_length(Smi::cast(smi_length));
-        }
-        return this;
-      }
-      case NON_STRICT_ARGUMENTS_ELEMENTS:
-      case EXTERNAL_BYTE_ELEMENTS:
-      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      case EXTERNAL_SHORT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      case EXTERNAL_INT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      case EXTERNAL_FLOAT_ELEMENTS:
-      case EXTERNAL_DOUBLE_ELEMENTS:
-      case EXTERNAL_PIXEL_ELEMENTS:
-        UNREACHABLE();
-        break;
-    }
-  }
-
-  // General slow case.
-  if (len->IsNumber()) {
-    uint32_t length;
-    if (len->ToArrayIndex(&length)) {
-      return SetSlowElements(len);
-    } else {
-      return ArrayLengthRangeError(GetHeap());
-    }
-  }
-
-  // len is not a number so make the array size one and
-  // set only element to len.
-  Object* obj;
-  { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  FixedArray::cast(obj)->set(0, len);
-  if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
-  set_elements(FixedArray::cast(obj));
-  return this;
+  return GetElementsAccessor()->SetLength(this, len);
 }
 
 
@@ -7718,8 +8587,8 @@
     FixedArray* new_cache;
     // Grow array by factor 2 over and above what we need.
     { MaybeObject* maybe_cache =
-          heap()->AllocateFixedArray(transitions * 2 * step + header);
-      if (!maybe_cache->To<FixedArray>(&new_cache)) return maybe_cache;
+          GetHeap()->AllocateFixedArray(transitions * 2 * step + header);
+      if (!maybe_cache->To(&new_cache)) return maybe_cache;
     }
 
     for (int i = 0; i < capacity * step; i++) {
@@ -7771,7 +8640,7 @@
   // It is sufficient to validate that the receiver is not in the new prototype
   // chain.
   for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
-    if (JSObject::cast(pt) == this) {
+    if (JSReceiver::cast(pt) == this) {
       // Cycle detected.
       HandleScope scope(heap->isolate());
       return heap->isolate()->Throw(
@@ -7786,8 +8655,8 @@
     // hidden and set the new prototype on that object.
     Object* current_proto = real_receiver->GetPrototype();
     while (current_proto->IsJSObject() &&
-          JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
-      real_receiver = JSObject::cast(current_proto);
+          JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) {
+      real_receiver = JSReceiver::cast(current_proto);
       current_proto = current_proto->GetPrototype();
     }
   }
@@ -7820,69 +8689,16 @@
 }
 
 
-bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
-  switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-      uint32_t length = IsJSArray() ?
-          static_cast<uint32_t>
-              (Smi::cast(JSArray::cast(this)->length())->value()) :
-          static_cast<uint32_t>(FixedArray::cast(elements())->length());
-      if ((index < length) &&
-          !FixedArray::cast(elements())->get(index)->IsTheHole()) {
-        return true;
-      }
-      break;
-    }
-    case FAST_DOUBLE_ELEMENTS: {
-      uint32_t length = IsJSArray() ?
-          static_cast<uint32_t>
-              (Smi::cast(JSArray::cast(this)->length())->value()) :
-          static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
-      if ((index < length) &&
-          !FixedDoubleArray::cast(elements())->is_the_hole(index)) {
-        return true;
-      }
-      break;
-    }
-    case EXTERNAL_PIXEL_ELEMENTS: {
-      ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
-      if (index < static_cast<uint32_t>(pixels->length())) {
-        return true;
-      }
-      break;
-    }
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-    case EXTERNAL_DOUBLE_ELEMENTS: {
-      ExternalArray* array = ExternalArray::cast(elements());
-      if (index < static_cast<uint32_t>(array->length())) {
-        return true;
-      }
-      break;
-    }
-    case DICTIONARY_ELEMENTS: {
-      if (element_dictionary()->FindEntry(index)
-          != SeededNumberDictionary::kNotFound) {
-        return true;
-      }
-      break;
-    }
-    case NON_STRICT_ARGUMENTS_ELEMENTS:
-      UNREACHABLE();
-      break;
-  }
-
-  // Handle [] on String objects.
-  if (this->IsStringObjectWithCharacterAt(index)) return true;
-
-  Object* pt = GetPrototype();
-  if (pt->IsNull()) return false;
-  return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
+MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
+                                                uint32_t first_arg,
+                                                uint32_t arg_count,
+                                                EnsureElementsMode mode) {
+  // Elements in |Arguments| are ordered backwards (because they're on the
+  // stack), but the method that's called here iterates over them in forward
+  // direction.
+  return EnsureCanContainElements(
+      args->arguments() - first_arg - (arg_count - 1),
+      arg_count, mode);
 }
 
 
@@ -7925,7 +8741,21 @@
     }
     if (!result.IsEmpty()) return true;
   }
-  return holder_handle->HasElementPostInterceptor(*receiver_handle, index);
+
+  if (holder_handle->GetElementsAccessor()->HasElement(
+          *receiver_handle, *holder_handle, index)) {
+    return true;
+  }
+
+  if (holder_handle->IsStringObjectWithCharacterAt(index)) return true;
+  Object* pt = holder_handle->GetPrototype();
+  if (pt->IsJSProxy()) {
+    // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+    return JSProxy::cast(pt)->GetElementAttributeWithHandler(
+        receiver, index) != ABSENT;
+  }
+  if (pt->IsNull()) return false;
+  return JSObject::cast(pt)->HasElementWithReceiver(*receiver_handle, index);
 }
 
 
@@ -7958,6 +8788,7 @@
   }
 
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>
@@ -8034,28 +8865,6 @@
 }
 
 
-bool JSObject::HasElementInElements(FixedArray* elements,
-                                    ElementsKind kind,
-                                    uint32_t index) {
-  ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
-  if (kind == FAST_ELEMENTS) {
-    int length = IsJSArray()
-        ? Smi::cast(JSArray::cast(this)->length())->value()
-        : elements->length();
-    if (index < static_cast<uint32_t>(length) &&
-        !elements->get(index)->IsTheHole()) {
-      return true;
-    }
-  } else {
-    if (SeededNumberDictionary::cast(elements)->FindEntry(index) !=
-        SeededNumberDictionary::kNotFound) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
 bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded()) {
@@ -8071,67 +8880,9 @@
     return HasElementWithInterceptor(receiver, index);
   }
 
-  ElementsKind kind = GetElementsKind();
-  switch (kind) {
-    case FAST_ELEMENTS: {
-      uint32_t length = IsJSArray() ?
-          static_cast<uint32_t>
-              (Smi::cast(JSArray::cast(this)->length())->value()) :
-          static_cast<uint32_t>(FixedArray::cast(elements())->length());
-      if ((index < length) &&
-          !FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
-      break;
-    }
-    case FAST_DOUBLE_ELEMENTS: {
-      uint32_t length = IsJSArray() ?
-          static_cast<uint32_t>
-              (Smi::cast(JSArray::cast(this)->length())->value()) :
-          static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
-      if ((index < length) &&
-          !FixedDoubleArray::cast(elements())->is_the_hole(index)) return true;
-      break;
-    }
-    case EXTERNAL_PIXEL_ELEMENTS: {
-      ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
-      if (index < static_cast<uint32_t>(pixels->length())) {
-        return true;
-      }
-      break;
-    }
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-    case EXTERNAL_DOUBLE_ELEMENTS: {
-      ExternalArray* array = ExternalArray::cast(elements());
-      if (index < static_cast<uint32_t>(array->length())) {
-        return true;
-      }
-      break;
-    }
-    case DICTIONARY_ELEMENTS: {
-      if (element_dictionary()->FindEntry(index)
-          != SeededNumberDictionary::kNotFound) {
-        return true;
-      }
-      break;
-    }
-    case NON_STRICT_ARGUMENTS_ELEMENTS: {
-      FixedArray* parameter_map = FixedArray::cast(elements());
-      uint32_t length = parameter_map->length();
-      Object* probe =
-          (index < length - 2) ? parameter_map->get(index + 2) : NULL;
-      if (probe != NULL && !probe->IsTheHole()) return true;
-
-      // Not a mapped parameter, check the arguments.
-      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-      kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
-      if (HasElementInElements(arguments, kind, index)) return true;
-      break;
-    }
+  ElementsAccessor* accessor = GetElementsAccessor();
+  if (accessor->HasElement(receiver, this, index)) {
+    return true;
   }
 
   // Handle [] on String objects.
@@ -8139,14 +8890,21 @@
 
   Object* pt = GetPrototype();
   if (pt->IsNull()) return false;
+  if (pt->IsJSProxy()) {
+    // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+    return JSProxy::cast(pt)->GetElementAttributeWithHandler(
+        receiver, index) != ABSENT;
+  }
   return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
 }
 
 
 MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
                                                  Object* value,
+                                                 PropertyAttributes attributes,
                                                  StrictModeFlag strict_mode,
-                                                 bool check_prototype) {
+                                                 bool check_prototype,
+                                                 SetPropertyMode set_mode) {
   Isolate* isolate = GetIsolate();
   // Make sure that the top context does not change when doing
   // callbacks or interceptor calls.
@@ -8174,8 +8932,10 @@
   MaybeObject* raw_result =
       this_handle->SetElementWithoutInterceptor(index,
                                                 *value_handle,
+                                                attributes,
                                                 strict_mode,
-                                                check_prototype);
+                                                check_prototype,
+                                                set_mode);
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   return raw_result;
 }
@@ -8213,11 +8973,11 @@
   }
 
   // __defineGetter__ callback
-  if (structure->IsFixedArray()) {
-    Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
-    if (getter->IsJSFunction()) {
-      return Object::GetPropertyWithDefinedGetter(receiver,
-                                                  JSFunction::cast(getter));
+  if (structure->IsAccessorPair()) {
+    Object* getter = AccessorPair::cast(structure)->getter();
+    if (getter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
     }
     // Getter is not a function.
     return isolate->heap()->undefined_value();
@@ -8270,10 +9030,11 @@
     return *value_handle;
   }
 
-  if (structure->IsFixedArray()) {
-    Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex));
-    if (setter->IsJSFunction()) {
-     return SetPropertyWithDefinedSetter(JSFunction::cast(*setter), value);
+  if (structure->IsAccessorPair()) {
+    Handle<Object> setter(AccessorPair::cast(structure)->setter());
+    if (setter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
     } else {
       if (strict_mode == kNonStrictMode) {
         return value;
@@ -8323,21 +9084,20 @@
                                       Object* value,
                                       StrictModeFlag strict_mode,
                                       bool check_prototype) {
-  ASSERT(HasFastElements() || HasFastArgumentsElements());
+  ASSERT(HasFastTypeElements() ||
+         HasFastArgumentsElements());
 
   FixedArray* backing_store = FixedArray::cast(elements());
   if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
     backing_store = FixedArray::cast(backing_store->get(1));
   } else {
-    Object* writable;
     MaybeObject* maybe = EnsureWritableFastElements();
-    if (!maybe->ToObject(&writable)) return maybe;
-    backing_store = FixedArray::cast(writable);
+    if (!maybe->To(&backing_store)) return maybe;
   }
-  uint32_t length = static_cast<uint32_t>(backing_store->length());
+  uint32_t capacity = static_cast<uint32_t>(backing_store->length());
 
   if (check_prototype &&
-      (index >= length || backing_store->get(index)->IsTheHole())) {
+      (index >= capacity || backing_store->get(index)->IsTheHole())) {
     bool found;
     MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
                                                                    value,
@@ -8346,46 +9106,88 @@
     if (found) return result;
   }
 
-  // Check whether there is extra space in fixed array.
-  if (index < length) {
-    backing_store->set(index, value);
-    if (IsJSArray()) {
-      // Update the length of the array if needed.
-      uint32_t array_length = 0;
-      CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
-      if (index >= array_length) {
-        JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+  uint32_t new_capacity = capacity;
+  // Check if the length property of this object needs to be updated.
+  uint32_t array_length = 0;
+  bool must_update_array_length = false;
+  if (IsJSArray()) {
+    CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+    if (index >= array_length) {
+      must_update_array_length = true;
+      array_length = index + 1;
+    }
+  }
+  // Check if the capacity of the backing store needs to be increased, or if
+  // a transition to slow elements is necessary.
+  if (index >= capacity) {
+    bool convert_to_slow = true;
+    if ((index - capacity) < kMaxGap) {
+      new_capacity = NewElementsCapacity(index + 1);
+      ASSERT(new_capacity > index);
+      if (!ShouldConvertToSlowElements(new_capacity)) {
+        convert_to_slow = false;
       }
     }
-    return value;
-  }
-
-  // Allow gap in fast case.
-  if ((index - length) < kMaxGap) {
-    // Try allocating extra space.
-    int new_capacity = NewElementsCapacity(index + 1);
-    if (!ShouldConvertToSlowElements(new_capacity)) {
-      ASSERT(static_cast<uint32_t>(new_capacity) > index);
-      Object* new_elements;
-      MaybeObject* maybe =
-          SetFastElementsCapacityAndLength(new_capacity, index + 1);
-      if (!maybe->ToObject(&new_elements)) return maybe;
-      FixedArray::cast(new_elements)->set(index, value);
-      return value;
+    if (convert_to_slow) {
+      MaybeObject* result = NormalizeElements();
+      if (result->IsFailure()) return result;
+      return SetDictionaryElement(index, value, NONE, strict_mode,
+                                  check_prototype);
     }
   }
-
-  // Otherwise default to slow case.
-  MaybeObject* result = NormalizeElements();
-  if (result->IsFailure()) return result;
-  return SetDictionaryElement(index, value, strict_mode, check_prototype);
+  // Convert to fast double elements if appropriate.
+  if (HasFastSmiOnlyElements() && !value->IsSmi() && value->IsNumber()) {
+    MaybeObject* maybe =
+        SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
+    if (maybe->IsFailure()) return maybe;
+    FixedDoubleArray::cast(elements())->set(index, value->Number());
+    return value;
+  }
+  // Change elements kind from SMI_ONLY to generic FAST if necessary.
+  if (HasFastSmiOnlyElements() && !value->IsSmi()) {
+    Map* new_map;
+    { MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
+                                                            FAST_ELEMENTS);
+      if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+    }
+    set_map(new_map);
+    if (FLAG_trace_elements_transitions) {
+      PrintElementsTransition(stdout, FAST_SMI_ONLY_ELEMENTS, elements(),
+                              FAST_ELEMENTS, elements());
+    }
+  }
+  // Increase backing store capacity if that's been decided previously.
+  if (new_capacity != capacity) {
+    FixedArray* new_elements;
+    SetFastElementsCapacityMode set_capacity_mode =
+        value->IsSmi() && HasFastSmiOnlyElements()
+            ? kAllowSmiOnlyElements
+            : kDontAllowSmiOnlyElements;
+    { MaybeObject* maybe =
+          SetFastElementsCapacityAndLength(new_capacity,
+                                           array_length,
+                                           set_capacity_mode);
+      if (!maybe->To(&new_elements)) return maybe;
+    }
+    new_elements->set(index, value);
+    return value;
+  }
+  // Finally, set the new element and length.
+  ASSERT(elements()->IsFixedArray());
+  backing_store->set(index, value);
+  if (must_update_array_length) {
+    JSArray::cast(this)->set_length(Smi::FromInt(array_length));
+  }
+  return value;
 }
 
 
 MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
                                             Object* value,
+                                            PropertyAttributes attributes,
                                             StrictModeFlag strict_mode,
-                                            bool check_prototype) {
+                                            bool check_prototype,
+                                            SetPropertyMode set_mode) {
   ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
   Isolate* isolate = GetIsolate();
   Heap* heap = isolate->heap();
@@ -8405,20 +9207,40 @@
   if (entry != SeededNumberDictionary::kNotFound) {
     Object* element = dictionary->ValueAt(entry);
     PropertyDetails details = dictionary->DetailsAt(entry);
-    if (details.type() == CALLBACKS) {
+    if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
       return SetElementWithCallback(element, index, value, this, strict_mode);
     } else {
       dictionary->UpdateMaxNumberKey(index);
-      // If put fails in strict mode, throw an exception.
-      if (!dictionary->ValueAtPut(entry, value) && strict_mode == kStrictMode) {
-        Handle<Object> holder(this);
-        Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
-        Handle<Object> args[2] = { number, holder };
-        Handle<Object> error =
-            isolate->factory()->NewTypeError("strict_read_only_property",
-                                             HandleVector(args, 2));
-        return isolate->Throw(*error);
+      // If a value has not been initialized we allow writing to it even if it
+      // is read-only (a declared const that has not been initialized).  If a
+      // value is being defined we skip attribute checks completely.
+      if (set_mode == DEFINE_PROPERTY) {
+        details = PropertyDetails(attributes, NORMAL, details.index());
+        dictionary->DetailsAtPut(entry, details);
+      } else if (details.IsReadOnly() && !element->IsTheHole()) {
+        if (strict_mode == kNonStrictMode) {
+          return isolate->heap()->undefined_value();
+        } else {
+          Handle<Object> holder(this);
+          Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+          Handle<Object> args[2] = { number, holder };
+          Handle<Object> error =
+              isolate->factory()->NewTypeError("strict_read_only_property",
+                                               HandleVector(args, 2));
+          return isolate->Throw(*error);
+        }
       }
+      // Elements of the arguments object in slow mode might be slow aliases.
+      if (is_arguments && element->IsAliasedArgumentsEntry()) {
+        AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(element);
+        Context* context = Context::cast(elements->get(0));
+        int context_index = entry->aliased_context_slot();
+        ASSERT(!context->get(context_index)->IsTheHole());
+        context->set(context_index, value);
+        // For elements that are still writable we keep slow aliasing.
+        if (!details.IsReadOnly()) value = element;
+      }
+      dictionary->ValueAtPut(entry, value);
     }
   } else {
     // Index not already used. Look for an accessor in the prototype chain.
@@ -8445,8 +9267,9 @@
       }
     }
     FixedArrayBase* new_dictionary;
-    MaybeObject* maybe = dictionary->AtNumberPut(index, value);
-    if (!maybe->To<FixedArrayBase>(&new_dictionary)) return maybe;
+    PropertyDetails details = PropertyDetails(attributes, NORMAL);
+    MaybeObject* maybe = dictionary->AddNumberEntry(index, value, details);
+    if (!maybe->To(&new_dictionary)) return maybe;
     if (dictionary != SeededNumberDictionary::cast(new_dictionary)) {
       if (is_arguments) {
         elements->set(1, new_dictionary);
@@ -8472,9 +9295,20 @@
     } else {
       new_length = dictionary->max_number_key() + 1;
     }
-    MaybeObject* result = CanConvertToFastDoubleElements()
+    SetFastElementsCapacityMode set_capacity_mode = FLAG_smi_only_arrays
+        ? kAllowSmiOnlyElements
+        : kDontAllowSmiOnlyElements;
+    bool has_smi_only_elements = false;
+    bool should_convert_to_fast_double_elements =
+        ShouldConvertToFastDoubleElements(&has_smi_only_elements);
+    if (has_smi_only_elements) {
+      set_capacity_mode = kForceSmiOnlyElements;
+    }
+    MaybeObject* result = should_convert_to_fast_double_elements
         ? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
-        : SetFastElementsCapacityAndLength(new_length, new_length);
+        : SetFastElementsCapacityAndLength(new_length,
+                                           new_length,
+                                           set_capacity_mode);
     if (result->IsFailure()) return result;
 #ifdef DEBUG
     if (FLAG_trace_normalization) {
@@ -8494,13 +9328,14 @@
     bool check_prototype) {
   ASSERT(HasFastDoubleElements());
 
-  FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
-  uint32_t elms_length = static_cast<uint32_t>(elms->length());
+  FixedArrayBase* base_elms = FixedArrayBase::cast(elements());
+  uint32_t elms_length = static_cast<uint32_t>(base_elms->length());
 
   // If storing to an element that isn't in the array, pass the store request
   // up the prototype chain before storing in the receiver's elements.
   if (check_prototype &&
-      (index >= elms_length || elms->is_the_hole(index))) {
+      (index >= elms_length ||
+       FixedDoubleArray::cast(base_elms)->is_the_hole(index))) {
     bool found;
     MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
                                                                    value,
@@ -8518,10 +9353,15 @@
     if (IsJSArray()) {
       CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
     }
-    MaybeObject* maybe_obj =
-        SetFastElementsCapacityAndLength(elms_length, length);
+    MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
+        elms_length,
+        length,
+        kDontAllowSmiOnlyElements);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    return SetFastElement(index, value, strict_mode, check_prototype);
+    return SetFastElement(index,
+                          value,
+                          strict_mode,
+                          check_prototype);
   }
 
   double double_value = value_is_smi
@@ -8530,6 +9370,7 @@
 
   // Check whether there is extra space in the fixed array.
   if (index < elms_length) {
+    FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
     elms->set(index, double_value);
     if (IsJSArray()) {
       // Update the length of the array if needed.
@@ -8568,14 +9409,64 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   ASSERT(HasDictionaryElements());
-  return SetElement(index, value, strict_mode, check_prototype);
+  return SetElement(index, value, NONE, strict_mode, check_prototype);
+}
+
+
+MaybeObject* JSReceiver::SetElement(uint32_t index,
+                                    Object* value,
+                                    PropertyAttributes attributes,
+                                    StrictModeFlag strict_mode,
+                                    bool check_proto) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->SetElementWithHandler(
+        index, value, strict_mode);
+  } else {
+    return JSObject::cast(this)->SetElement(
+        index, value, attributes, strict_mode, check_proto);
+  }
+}
+
+
+Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
+                                       uint32_t index,
+                                       Handle<Object> value,
+                                       StrictModeFlag strict_mode) {
+  ASSERT(!object->HasExternalArrayElements());
+  CALL_HEAP_FUNCTION(
+      object->GetIsolate(),
+      object->SetElement(index, *value, NONE, strict_mode, false),
+      Object);
+}
+
+
+Handle<Object> JSObject::SetElement(Handle<JSObject> object,
+                                    uint32_t index,
+                                    Handle<Object> value,
+                                    PropertyAttributes attr,
+                                    StrictModeFlag strict_mode,
+                                    SetPropertyMode set_mode) {
+  if (object->HasExternalArrayElements()) {
+    if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
+      bool has_exception;
+      Handle<Object> number = Execution::ToNumber(value, &has_exception);
+      if (has_exception) return Handle<Object>();
+      value = number;
+    }
+  }
+  CALL_HEAP_FUNCTION(
+      object->GetIsolate(),
+      object->SetElement(index, *value, attr, strict_mode, true, set_mode),
+      Object);
 }
 
 
 MaybeObject* JSObject::SetElement(uint32_t index,
                                   Object* value,
+                                  PropertyAttributes attributes,
                                   StrictModeFlag strict_mode,
-                                  bool check_prototype) {
+                                  bool check_prototype,
+                                  SetPropertyMode set_mode) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded()) {
     Heap* heap = GetHeap();
@@ -8593,31 +9484,62 @@
     ASSERT(proto->IsJSGlobalObject());
     return JSObject::cast(proto)->SetElement(index,
                                              value,
+                                             attributes,
                                              strict_mode,
-                                             check_prototype);
+                                             check_prototype,
+                                             set_mode);
+  }
+
+  // Don't allow element properties to be redefined for external arrays.
+  if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
+    Isolate* isolate = GetHeap()->isolate();
+    Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+    Handle<Object> args[] = { Handle<Object>(this), number };
+    Handle<Object> error = isolate->factory()->NewTypeError(
+        "redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
+    return isolate->Throw(*error);
+  }
+
+  // Normalize the elements to enable attributes on the property.
+  if ((attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) {
+    SeededNumberDictionary* dictionary;
+    MaybeObject* maybe_object = NormalizeElements();
+    if (!maybe_object->To(&dictionary)) return maybe_object;
+    // Make sure that we never go back to fast case.
+    dictionary->set_requires_slow_elements();
   }
 
   // Check for lookup interceptor
   if (HasIndexedInterceptor()) {
     return SetElementWithInterceptor(index,
                                      value,
+                                     attributes,
                                      strict_mode,
-                                     check_prototype);
+                                     check_prototype,
+                                     set_mode);
   }
 
   return SetElementWithoutInterceptor(index,
                                       value,
+                                      attributes,
                                       strict_mode,
-                                      check_prototype);
+                                      check_prototype,
+                                      set_mode);
 }
 
 
 MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
                                                     Object* value,
+                                                    PropertyAttributes attr,
                                                     StrictModeFlag strict_mode,
-                                                    bool check_prototype) {
+                                                    bool check_prototype,
+                                                    SetPropertyMode set_mode) {
+  ASSERT(HasDictionaryElements() ||
+         HasDictionaryArgumentsElements() ||
+         (attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
   Isolate* isolate = GetIsolate();
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
       return SetFastElement(index, value, strict_mode, check_prototype);
     case FAST_DOUBLE_ELEMENTS:
@@ -8662,7 +9584,8 @@
       return array->SetValue(index, value);
     }
     case DICTIONARY_ELEMENTS:
-      return SetDictionaryElement(index, value, strict_mode, check_prototype);
+      return SetDictionaryElement(index, value, attr, strict_mode,
+                                  check_prototype, set_mode);
     case NON_STRICT_ARGUMENTS_ELEMENTS: {
       FixedArray* parameter_map = FixedArray::cast(elements());
       uint32_t length = parameter_map->length();
@@ -8673,17 +9596,23 @@
         int context_index = Smi::cast(probe)->value();
         ASSERT(!context->get(context_index)->IsTheHole());
         context->set(context_index, value);
-        return value;
-      } else {
-        // Object is not mapped, defer to the arguments.
-        FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-        if (arguments->IsDictionary()) {
-          return SetDictionaryElement(index, value, strict_mode,
-                                      check_prototype);
-        } else {
-          return SetFastElement(index, value, strict_mode, check_prototype);
+        // Redefining attributes of an aliased element destroys fast aliasing.
+        if (set_mode == SET_PROPERTY || attr == NONE) return value;
+        parameter_map->set_the_hole(index + 2);
+        // For elements that are still writable we re-establish slow aliasing.
+        if ((attr & READ_ONLY) == 0) {
+          MaybeObject* maybe_entry =
+              isolate->heap()->AllocateAliasedArgumentsEntry(context_index);
+          if (!maybe_entry->ToObject(&value)) return maybe_entry;
         }
       }
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      if (arguments->IsDictionary()) {
+        return SetDictionaryElement(index, value, attr, strict_mode,
+                                    check_prototype, set_mode);
+      } else {
+        return SetFastElement(index, value, strict_mode, check_prototype);
+      }
     }
   }
   // All possible cases have been handled above. Add a return to avoid the
@@ -8693,6 +9622,79 @@
 }
 
 
+Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
+                                                ElementsKind to_kind) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->TransitionElementsKind(to_kind),
+                     Object);
+}
+
+
+MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
+  ElementsKind from_kind = map()->elements_kind();
+
+  Isolate* isolate = GetIsolate();
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      (to_kind == FAST_ELEMENTS ||
+       elements() == isolate->heap()->empty_fixed_array())) {
+    MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
+    Map* new_map;
+    if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+    set_map(new_map);
+    if (FLAG_trace_elements_transitions) {
+      FixedArrayBase* elms = FixedArrayBase::cast(elements());
+      PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
+    }
+    return this;
+  }
+
+  FixedArrayBase* elms = FixedArrayBase::cast(elements());
+  uint32_t capacity = static_cast<uint32_t>(elms->length());
+  uint32_t length = capacity;
+
+  if (IsJSArray()) {
+    Object* raw_length = JSArray::cast(this)->length();
+    if (raw_length->IsUndefined()) {
+      // If length is undefined, then JSArray is being initialized and has no
+      // elements, assume a length of zero.
+      length = 0;
+    } else {
+      CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+    }
+  }
+
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    MaybeObject* maybe_result =
+        SetFastDoubleElementsCapacityAndLength(capacity, length);
+    if (maybe_result->IsFailure()) return maybe_result;
+    return this;
+  }
+
+  if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
+        capacity, length, kDontAllowSmiOnlyElements);
+    if (maybe_result->IsFailure()) return maybe_result;
+    return this;
+  }
+
+  // This method should never be called for any other case than the ones
+  // handled above.
+  UNREACHABLE();
+  return GetIsolate()->heap()->null_value();
+}
+
+
+// static
+bool Map::IsValidElementsTransition(ElementsKind from_kind,
+                                    ElementsKind to_kind) {
+  return
+      (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+          (to_kind == FAST_DOUBLE_ELEMENTS || to_kind == FAST_ELEMENTS)) ||
+      (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS);
+}
+
+
 MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
                                                    Object* value) {
   uint32_t old_len = 0;
@@ -8740,10 +9742,9 @@
 
   Heap* heap = holder_handle->GetHeap();
   ElementsAccessor* handler = holder_handle->GetElementsAccessor();
-  MaybeObject* raw_result = handler->Get(holder_handle->elements(),
-                                         index,
+  MaybeObject* raw_result = handler->Get(*this_handle,
                                          *holder_handle,
-                                         *this_handle);
+                                         index);
   if (raw_result != heap->the_hole_value()) return raw_result;
 
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
@@ -8781,6 +9782,7 @@
         break;
       }
       // Fall through.
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
       backing_store = FixedArray::cast(backing_store_base);
       *capacity = backing_store->length();
@@ -8874,18 +9876,26 @@
 }
 
 
-bool JSObject::CanConvertToFastDoubleElements() {
+bool JSObject::ShouldConvertToFastDoubleElements(
+    bool* has_smi_only_elements) {
+  *has_smi_only_elements = false;
   if (FLAG_unbox_double_arrays) {
     ASSERT(HasDictionaryElements());
     SeededNumberDictionary* dictionary =
         SeededNumberDictionary::cast(elements());
+    bool found_double = false;
     for (int i = 0; i < dictionary->Capacity(); i++) {
       Object* key = dictionary->KeyAt(i);
       if (key->IsNumber()) {
-        if (!dictionary->ValueAt(i)->IsNumber()) return false;
+        Object* value = dictionary->ValueAt(i);
+        if (!value->IsNumber()) return false;
+        if (!value->IsSmi()) {
+          found_double = true;
+        }
       }
     }
-    return true;
+    *has_smi_only_elements = !found_double;
+    return found_double;
   } else {
     return false;
   }
@@ -8960,7 +9970,7 @@
     String* name,
     PropertyAttributes* attributes) {
   // Check local property in holder, ignore interceptor.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) {
     return GetProperty(receiver, &result, name, attributes);
@@ -8978,7 +9988,7 @@
     String* name,
     PropertyAttributes* attributes) {
   // Check local property in holder, ignore interceptor.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) {
     return GetProperty(receiver, &result, name, attributes);
@@ -9029,15 +10039,15 @@
 
 bool JSObject::HasRealNamedProperty(String* key) {
   // Check access rights if needed.
+  Isolate* isolate = GetIsolate();
   if (IsAccessCheckNeeded()) {
-    Heap* heap = GetHeap();
-    if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+      isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
       return false;
     }
   }
 
-  LookupResult result;
+  LookupResult result(isolate);
   LocalLookupRealNamedProperty(key, &result);
   return result.IsProperty() && (result.type() != INTERCEPTOR);
 }
@@ -9057,6 +10067,7 @@
   if (this->IsStringObjectWithCharacterAt(index)) return true;
 
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>(
@@ -9105,39 +10116,24 @@
 
 bool JSObject::HasRealNamedCallbackProperty(String* key) {
   // Check access rights if needed.
+  Isolate* isolate = GetIsolate();
   if (IsAccessCheckNeeded()) {
-    Heap* heap = GetHeap();
-    if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+      isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
       return false;
     }
   }
 
-  LookupResult result;
+  LookupResult result(isolate);
   LocalLookupRealNamedProperty(key, &result);
-  return result.IsProperty() && (result.type() == CALLBACKS);
+  return result.IsFound() && (result.type() == CALLBACKS);
 }
 
 
 int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
-  if (HasFastProperties()) {
-    DescriptorArray* descs = map()->instance_descriptors();
-    int result = 0;
-    for (int i = 0; i < descs->number_of_descriptors(); i++) {
-      PropertyDetails details(descs->GetDetails(i));
-      if (details.IsProperty() && (details.attributes() & filter) == 0) {
-        result++;
-      }
-    }
-    return result;
-  } else {
-    return property_dictionary()->NumberOfElementsFilterAttributes(filter);
-  }
-}
-
-
-int JSObject::NumberOfEnumProperties() {
-  return NumberOfLocalProperties(static_cast<PropertyAttributes>(DONT_ENUM));
+  return HasFastProperties() ?
+      map()->NumberOfDescribedProperties(filter) :
+      property_dictionary()->NumberOfElementsFilterAttributes(filter);
 }
 
 
@@ -9147,8 +10143,8 @@
   set(j, temp);
   if (this != numbers) {
     temp = numbers->get(i);
-    numbers->set(i, numbers->get(j));
-    numbers->set(j, temp);
+    numbers->set(i, Smi::cast(numbers->get(j)));
+    numbers->set(j, Smi::cast(temp));
   }
 }
 
@@ -9258,7 +10254,7 @@
 // purpose of this function is to provide reflection information for the object
 // mirrors.
 void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) {
-  ASSERT(storage->length() >= (NumberOfLocalProperties(NONE) - index));
+  ASSERT(storage->length() >= (NumberOfLocalProperties() - index));
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
@@ -9296,6 +10292,7 @@
                                   PropertyAttributes filter) {
   int counter = 0;
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       int length = IsJSArray() ?
           Smi::cast(JSArray::cast(this)->length())->value() :
@@ -9462,70 +10459,87 @@
  public:
   StringSharedKey(String* source,
                   SharedFunctionInfo* shared,
-                  StrictModeFlag strict_mode)
+                  LanguageMode language_mode,
+                  int scope_position)
       : source_(source),
         shared_(shared),
-        strict_mode_(strict_mode) { }
+        language_mode_(language_mode),
+        scope_position_(scope_position) { }
 
   bool IsMatch(Object* other) {
     if (!other->IsFixedArray()) return false;
-    FixedArray* pair = FixedArray::cast(other);
-    SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
+    FixedArray* other_array = FixedArray::cast(other);
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
     if (shared != shared_) return false;
-    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
-        Smi::cast(pair->get(2))->value());
-    if (strict_mode != strict_mode_) return false;
-    String* source = String::cast(pair->get(1));
+    int language_unchecked = Smi::cast(other_array->get(2))->value();
+    ASSERT(language_unchecked == CLASSIC_MODE ||
+           language_unchecked == STRICT_MODE ||
+           language_unchecked == EXTENDED_MODE);
+    LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+    if (language_mode != language_mode_) return false;
+    int scope_position = Smi::cast(other_array->get(3))->value();
+    if (scope_position != scope_position_) return false;
+    String* source = String::cast(other_array->get(1));
     return source->Equals(source_);
   }
 
   static uint32_t StringSharedHashHelper(String* source,
                                          SharedFunctionInfo* shared,
-                                         StrictModeFlag strict_mode) {
+                                         LanguageMode language_mode,
+                                         int scope_position) {
     uint32_t hash = source->Hash();
     if (shared->HasSourceCode()) {
       // Instead of using the SharedFunctionInfo pointer in the hash
       // code computation, we use a combination of the hash of the
-      // script source code and the start and end positions.  We do
-      // this to ensure that the cache entries can survive garbage
+      // script source code and the start position of the calling scope.
+      // We do this to ensure that the cache entries can survive garbage
       // collection.
       Script* script = Script::cast(shared->script());
       hash ^= String::cast(script->source())->Hash();
-      if (strict_mode == kStrictMode) hash ^= 0x8000;
-      hash += shared->start_position();
+      if (language_mode == STRICT_MODE) hash ^= 0x8000;
+      if (language_mode == EXTENDED_MODE) hash ^= 0x0080;
+      hash += scope_position;
     }
     return hash;
   }
 
   uint32_t Hash() {
-    return StringSharedHashHelper(source_, shared_, strict_mode_);
+    return StringSharedHashHelper(
+        source_, shared_, language_mode_, scope_position_);
   }
 
   uint32_t HashForObject(Object* obj) {
-    FixedArray* pair = FixedArray::cast(obj);
-    SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
-    String* source = String::cast(pair->get(1));
-    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
-        Smi::cast(pair->get(2))->value());
-    return StringSharedHashHelper(source, shared, strict_mode);
+    FixedArray* other_array = FixedArray::cast(obj);
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
+    String* source = String::cast(other_array->get(1));
+    int language_unchecked = Smi::cast(other_array->get(2))->value();
+    ASSERT(language_unchecked == CLASSIC_MODE ||
+           language_unchecked == STRICT_MODE ||
+           language_unchecked == EXTENDED_MODE);
+    LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+    int scope_position = Smi::cast(other_array->get(3))->value();
+    return StringSharedHashHelper(
+        source, shared, language_mode, scope_position);
   }
 
   MUST_USE_RESULT MaybeObject* AsObject() {
     Object* obj;
-    { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(3);
+    { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(4);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
-    FixedArray* pair = FixedArray::cast(obj);
-    pair->set(0, shared_);
-    pair->set(1, source_);
-    pair->set(2, Smi::FromInt(strict_mode_));
-    return pair;
+    FixedArray* other_array = FixedArray::cast(obj);
+    other_array->set(0, shared_);
+    other_array->set(1, source_);
+    other_array->set(2, Smi::FromInt(language_mode_));
+    other_array->set(3, Smi::FromInt(scope_position_));
+    return other_array;
   }
 
  private:
   String* source_;
   SharedFunctionInfo* shared_;
-  StrictModeFlag strict_mode_;
+  LanguageMode language_mode_;
+  int scope_position_;
 };
 
 
@@ -9583,7 +10597,7 @@
     if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
     unibrow::Utf8InputBuffer<> buffer(string_.start(),
                                       static_cast<unsigned>(string_.length()));
-    chars_ = buffer.Length();
+    chars_ = buffer.Utf16Length();
     hash_field_ = String::ComputeHashField(&buffer, chars_, seed_);
     uint32_t result = hash_field_ >> String::kHashShift;
     ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
@@ -9778,7 +10792,7 @@
     // Transform string to symbol if possible.
     Map* map = heap->SymbolMapForString(string_);
     if (map != NULL) {
-      string_->set_map(map);
+      string_->set_map_no_write_barrier(map);
       ASSERT(string_->IsSymbol());
       return string_;
     }
@@ -9839,7 +10853,7 @@
 
   // Optimized for symbol key. Knowledge of the key type allows:
   // 1. Move the check if the key is a symbol out of the loop.
-  // 2. Avoid comparing hash codes in symbol to symbol comparision.
+  // 2. Avoid comparing hash codes in symbol to symbol comparison.
   // 3. Detect a case when a dictionary key is not a symbol but the key is.
   //    In case of positive result the dictionary key may be replaced by
   //    the symbol with minimal performance penalty. It gives a chance to
@@ -9857,20 +10871,45 @@
     if (element->IsUndefined()) break;  // Empty entry.
     if (key == element) return entry;
     if (!element->IsSymbol() &&
-        !element->IsNull() &&
+        !element->IsTheHole() &&
         String::cast(element)->Equals(key)) {
       // Replace a non-symbol key by the equivalent symbol for faster further
       // lookups.
       set(index, key);
       return entry;
     }
-    ASSERT(element->IsNull() || !String::cast(element)->Equals(key));
+    ASSERT(element->IsTheHole() || !String::cast(element)->Equals(key));
     entry = NextProbe(entry, count++, capacity);
   }
   return kNotFound;
 }
 
 
+bool StringDictionary::ContainsTransition(int entry) {
+  switch (DetailsAt(entry).type()) {
+    case MAP_TRANSITION:
+    case CONSTANT_TRANSITION:
+    case ELEMENTS_TRANSITION:
+      return true;
+    case CALLBACKS: {
+      Object* value = ValueAt(entry);
+      if (!value->IsAccessorPair()) return false;
+      AccessorPair* accessors = AccessorPair::cast(value);
+      return accessors->getter()->IsMap() || accessors->setter()->IsMap();
+    }
+    case NORMAL:
+    case FIELD:
+    case CONSTANT_FUNCTION:
+    case HANDLER:
+    case INTERCEPTOR:
+    case NULL_DESCRIPTOR:
+      return false;
+  }
+  UNREACHABLE();  // Keep the compiler happy.
+  return false;
+}
+
+
 template<typename Shape, typename Key>
 MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
   ASSERT(NumberOfElements() < new_table->Capacity());
@@ -9968,7 +11007,7 @@
   // EnsureCapacity will guarantee the hash table is never full.
   while (true) {
     Object* element = KeyAt(entry);
-    if (element->IsUndefined() || element->IsNull()) break;
+    if (element->IsUndefined() || element->IsTheHole()) break;
     entry = NextProbe(entry, count++, capacity);
   }
   return entry;
@@ -9983,7 +11022,9 @@
 
 template class HashTable<MapCacheShape, HashTableKey*>;
 
-template class HashTable<ObjectHashTableShape, JSObject*>;
+template class HashTable<ObjectHashTableShape<1>, Object*>;
+
+template class HashTable<ObjectHashTableShape<2>, Object*>;
 
 template class Dictionary<StringDictionaryShape, String*>;
 
@@ -10006,6 +11047,9 @@
 template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
     AtPut(uint32_t, Object*);
 
+template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+    SlowReverseLookup(Object* value);
+
 template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
     SlowReverseLookup(Object* value);
 
@@ -10184,8 +11228,6 @@
 // If the object is in dictionary mode, it is converted to fast elements
 // mode.
 MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
-  ASSERT(!HasExternalArrayElements());
-
   Heap* heap = GetHeap();
 
   if (HasDictionaryElements()) {
@@ -10199,7 +11241,8 @@
     // Convert to fast elements.
 
     Object* obj;
-    { MaybeObject* maybe_obj = map()->GetFastElementsMap();
+    { MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
+                                                        FAST_ELEMENTS);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     Map* new_map = Map::cast(obj);
@@ -10215,13 +11258,16 @@
 
     set_map(new_map);
     set_elements(fast_elements);
+  } else if (HasExternalArrayElements()) {
+    // External arrays cannot have holes or undefined elements.
+    return Smi::FromInt(ExternalArray::cast(elements())->length());
   } else if (!HasFastDoubleElements()) {
     Object* obj;
     { MaybeObject* maybe_obj = EnsureWritableFastElements();
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
   }
-  ASSERT(HasFastElements() || HasFastDoubleElements());
+  ASSERT(HasFastTypeElements() || HasFastDoubleElements());
 
   // Collect holes at the end, undefined before that and the rest at the
   // start, and return the number of non-hole, non-undefined values.
@@ -10443,7 +11489,7 @@
 
 
 MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
-  float cast_value = 0;
+  float cast_value = static_cast<float>(OS::nan_value());
   Heap* heap = GetHeap();
   if (index < static_cast<uint32_t>(length())) {
     if (value->IsSmi()) {
@@ -10453,7 +11499,7 @@
       double double_value = HeapNumber::cast(value)->value();
       cast_value = static_cast<float>(double_value);
     } else {
-      // Clamp undefined to zero (default). All other types have been
+      // Clamp undefined to NaN (default). All other types have been
       // converted to a number type further up in the call chain.
       ASSERT(value->IsUndefined());
     }
@@ -10464,7 +11510,7 @@
 
 
 MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
-  double double_value = 0;
+  double double_value = OS::nan_value();
   Heap* heap = GetHeap();
   if (index < static_cast<uint32_t>(length())) {
     if (value->IsSmi()) {
@@ -10473,7 +11519,7 @@
     } else if (value->IsHeapNumber()) {
       double_value = HeapNumber::cast(value)->value();
     } else {
-      // Clamp undefined to zero (default). All other types have been
+      // Clamp undefined to NaN (default). All other types have been
       // converted to a number type further up in the call chain.
       ASSERT(value->IsUndefined());
     }
@@ -10490,6 +11536,16 @@
 }
 
 
+Handle<JSGlobalPropertyCell> GlobalObject::EnsurePropertyCell(
+    Handle<GlobalObject> global,
+    Handle<String> name) {
+  Isolate* isolate = global->GetIsolate();
+  CALL_HEAP_FUNCTION(isolate,
+                     global->EnsurePropertyCell(*name),
+                     JSGlobalPropertyCell);
+}
+
+
 MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
   ASSERT(!HasFastProperties());
   int entry = property_dictionary()->FindEntry(name);
@@ -10691,8 +11747,12 @@
 
 Object* CompilationCacheTable::LookupEval(String* src,
                                           Context* context,
-                                          StrictModeFlag strict_mode) {
-  StringSharedKey key(src, context->closure()->shared(), strict_mode);
+                                          LanguageMode language_mode,
+                                          int scope_position) {
+  StringSharedKey key(src,
+                      context->closure()->shared(),
+                      language_mode,
+                      scope_position);
   int entry = FindEntry(&key);
   if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
@@ -10727,10 +11787,12 @@
 
 MaybeObject* CompilationCacheTable::PutEval(String* src,
                                             Context* context,
-                                            SharedFunctionInfo* value) {
+                                            SharedFunctionInfo* value,
+                                            int scope_position) {
   StringSharedKey key(src,
                       context->closure()->shared(),
-                      value->strict_mode() ? kStrictMode : kNonStrictMode);
+                      value->language_mode(),
+                      scope_position);
   Object* obj;
   { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -10774,13 +11836,13 @@
 
 
 void CompilationCacheTable::Remove(Object* value) {
-  Object* null_value = GetHeap()->null_value();
+  Object* the_hole_value = GetHeap()->the_hole_value();
   for (int entry = 0, size = Capacity(); entry < size; entry++) {
     int entry_index = EntryToIndex(entry);
     int value_index = entry_index + 1;
     if (get(value_index) == value) {
-      fast_set(this, entry_index, null_value);
-      fast_set(this, value_index, null_value);
+      NoWriteBarrierSet(this, entry_index, the_hole_value);
+      NoWriteBarrierSet(this, value_index, the_hole_value);
       ElementRemoved();
     }
   }
@@ -10933,30 +11995,6 @@
 }
 
 
-void SeededNumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
-  // Do nothing if the interval [from, to) is empty.
-  if (from >= to) return;
-
-  Heap* heap = GetHeap();
-  int removed_entries = 0;
-  Object* sentinel = heap->null_value();
-  int capacity = Capacity();
-  for (int i = 0; i < capacity; i++) {
-    Object* key = KeyAt(i);
-    if (key->IsNumber()) {
-      uint32_t number = static_cast<uint32_t>(key->Number());
-      if (from <= number && number < to) {
-        SetEntry(i, sentinel, sentinel);
-        removed_entries++;
-      }
-    }
-  }
-
-  // Update the number of elements.
-  ElementsRemoved(removed_entries);
-}
-
-
 template<typename Shape, typename Key>
 Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
                                                JSReceiver::DeleteMode mode) {
@@ -10966,7 +12004,7 @@
   if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
     return heap->false_value();
   }
-  SetEntry(entry, heap->null_value(), heap->null_value());
+  SetEntry(entry, heap->the_hole_value(), heap->the_hole_value());
   HashTable<Shape, Key>::ElementRemoved();
   return heap->true_value();
 }
@@ -11098,6 +12136,27 @@
 }
 
 
+Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
+    Handle<SeededNumberDictionary> dictionary,
+    uint32_t index,
+    Handle<Object> value,
+    PropertyDetails details) {
+  CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
+                     dictionary->Set(index, *value, details),
+                     SeededNumberDictionary);
+}
+
+
+Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
+    Handle<UnseededNumberDictionary> dictionary,
+    uint32_t index,
+    Handle<Object> value) {
+  CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
+                     dictionary->Set(index, *value),
+                     UnseededNumberDictionary);
+}
+
+
 MaybeObject* SeededNumberDictionary::Set(uint32_t key,
                                          Object* value,
                                          PropertyDetails details) {
@@ -11279,14 +12338,15 @@
   }
 
   // Allocate the instance descriptor.
-  Object* descriptors_unchecked;
-  { MaybeObject* maybe_descriptors_unchecked =
+  DescriptorArray* descriptors;
+  { MaybeObject* maybe_descriptors =
         DescriptorArray::Allocate(instance_descriptor_length);
-    if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
-      return maybe_descriptors_unchecked;
+    if (!maybe_descriptors->To<DescriptorArray>(&descriptors)) {
+      return maybe_descriptors;
     }
   }
-  DescriptorArray* descriptors = DescriptorArray::cast(descriptors_unchecked);
+
+  DescriptorArray::WhitenessWitness witness(descriptors);
 
   int inobject_props = obj->map()->inobject_properties();
   int number_of_allocated_fields =
@@ -11324,7 +12384,7 @@
                                      JSFunction::cast(value),
                                      details.attributes(),
                                      details.index());
-        descriptors->Set(next_descriptor++, &d);
+        descriptors->Set(next_descriptor++, &d, witness);
       } else if (type == NORMAL) {
         if (current_offset < inobject_props) {
           obj->InObjectPropertyAtPut(current_offset,
@@ -11338,13 +12398,18 @@
                           current_offset++,
                           details.attributes(),
                           details.index());
-        descriptors->Set(next_descriptor++, &d);
+        descriptors->Set(next_descriptor++, &d, witness);
       } else if (type == CALLBACKS) {
+        if (value->IsAccessorPair()) {
+          MaybeObject* maybe_copy =
+              AccessorPair::cast(value)->CopyWithoutTransitions();
+          if (!maybe_copy->To(&value)) return maybe_copy;
+        }
         CallbacksDescriptor d(String::cast(key),
                               value,
                               details.attributes(),
                               details.index());
-        descriptors->Set(next_descriptor++, &d);
+        descriptors->Set(next_descriptor++, &d, witness);
       } else {
         UNREACHABLE();
       }
@@ -11352,7 +12417,7 @@
   }
   ASSERT(current_offset == number_of_fields);
 
-  descriptors->Sort();
+  descriptors->Sort(witness);
   // Allocate new map.
   Object* new_map;
   { MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
@@ -11375,20 +12440,84 @@
 }
 
 
-Object* ObjectHashTable::Lookup(JSObject* key) {
+bool ObjectHashSet::Contains(Object* key) {
+  ASSERT(IsKey(key));
+
   // If the object does not have an identity hash, it was never used as a key.
-  MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
-  if (maybe_hash->IsFailure()) return GetHeap()->undefined_value();
+  { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+    if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return false;
+  }
+  return (FindEntry(key) != kNotFound);
+}
+
+
+MaybeObject* ObjectHashSet::Add(Object* key) {
+  ASSERT(IsKey(key));
+
+  // Make sure the key object has an identity hash code.
+  int hash;
+  { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
+    if (maybe_hash->IsFailure()) return maybe_hash;
+    hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+  }
+  int entry = FindEntry(key);
+
+  // Check whether key is already present.
+  if (entry != kNotFound) return this;
+
+  // Check whether the hash set should be extended and add entry.
+  Object* obj;
+  { MaybeObject* maybe_obj = EnsureCapacity(1, key);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  ObjectHashSet* table = ObjectHashSet::cast(obj);
+  entry = table->FindInsertionEntry(hash);
+  table->set(EntryToIndex(entry), key);
+  table->ElementAdded();
+  return table;
+}
+
+
+MaybeObject* ObjectHashSet::Remove(Object* key) {
+  ASSERT(IsKey(key));
+
+  // If the object does not have an identity hash, it was never used as a key.
+  { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+    if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return this;
+  }
+  int entry = FindEntry(key);
+
+  // Check whether key is actually present.
+  if (entry == kNotFound) return this;
+
+  // Remove entry and try to shrink this hash set.
+  set_the_hole(EntryToIndex(entry));
+  ElementRemoved();
+  return Shrink(key);
+}
+
+
+Object* ObjectHashTable::Lookup(Object* key) {
+  ASSERT(IsKey(key));
+
+  // If the object does not have an identity hash, it was never used as a key.
+  { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+    if (maybe_hash->ToObjectUnchecked()->IsUndefined()) {
+      return GetHeap()->undefined_value();
+    }
+  }
   int entry = FindEntry(key);
   if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
 }
 
 
-MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) {
+MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
+  ASSERT(IsKey(key));
+
   // Make sure the key object has an identity hash code.
   int hash;
-  { MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::ALLOW_CREATION);
+  { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
     if (maybe_hash->IsFailure()) return maybe_hash;
     hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
   }
@@ -11418,16 +12547,16 @@
 }
 
 
-void ObjectHashTable::AddEntry(int entry, JSObject* key, Object* value) {
+void ObjectHashTable::AddEntry(int entry, Object* key, Object* value) {
   set(EntryToIndex(entry), key);
   set(EntryToIndex(entry) + 1, value);
   ElementAdded();
 }
 
 
-void ObjectHashTable::RemoveEntry(int entry, Heap* heap) {
-  set_null(heap, EntryToIndex(entry));
-  set_null(heap, EntryToIndex(entry) + 1);
+void ObjectHashTable::RemoveEntry(int entry) {
+  set_the_hole(EntryToIndex(entry));
+  set_the_hole(EntryToIndex(entry) + 1);
   ElementRemoved();
 }
 
@@ -11682,7 +12811,136 @@
   // Multiple break points.
   return FixedArray::cast(break_point_objects())->length();
 }
-#endif
+#endif  // ENABLE_DEBUGGER_SUPPORT
 
 
+MaybeObject* JSDate::GetField(Object* object, Smi* index) {
+  return JSDate::cast(object)->DoGetField(
+      static_cast<FieldIndex>(index->value()));
+}
+
+
+Object* JSDate::DoGetField(FieldIndex index) {
+  ASSERT(index != kDateValue);
+
+  DateCache* date_cache = GetIsolate()->date_cache();
+
+  if (index < kFirstUncachedField) {
+    Object* stamp = cache_stamp();
+    if (stamp != date_cache->stamp() && stamp->IsSmi()) {
+      // Since the stamp is not NaN, the value is also not NaN.
+      int64_t local_time_ms =
+          date_cache->ToLocal(static_cast<int64_t>(value()->Number()));
+      SetLocalFields(local_time_ms, date_cache);
+    }
+    switch (index) {
+      case kYear: return year();
+      case kMonth: return month();
+      case kDay: return day();
+      case kWeekday: return weekday();
+      case kHour: return hour();
+      case kMinute: return min();
+      case kSecond: return sec();
+      default: UNREACHABLE();
+    }
+  }
+
+  if (index >= kFirstUTCField) {
+    return GetUTCField(index, value()->Number(), date_cache);
+  }
+
+  double time = value()->Number();
+  if (isnan(time)) return GetIsolate()->heap()->nan_value();
+
+  int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(time));
+  int days = DateCache::DaysFromTime(local_time_ms);
+
+  if (index == kDays) return Smi::FromInt(days);
+
+  int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
+  if (index == kMillisecond) return Smi::FromInt(time_in_day_ms % 1000);
+  ASSERT(index == kTimeInDay);
+  return Smi::FromInt(time_in_day_ms);
+}
+
+
+Object* JSDate::GetUTCField(FieldIndex index,
+                            double value,
+                            DateCache* date_cache) {
+  ASSERT(index >= kFirstUTCField);
+
+  if (isnan(value)) return GetIsolate()->heap()->nan_value();
+
+  int64_t time_ms = static_cast<int64_t>(value);
+
+  if (index == kTimezoneOffset) {
+    return Smi::FromInt(date_cache->TimezoneOffset(time_ms));
+  }
+
+  int days = DateCache::DaysFromTime(time_ms);
+
+  if (index == kWeekdayUTC) return Smi::FromInt(date_cache->Weekday(days));
+
+  if (index <= kDayUTC) {
+    int year, month, day;
+    date_cache->YearMonthDayFromDays(days, &year, &month, &day);
+    if (index == kYearUTC) return Smi::FromInt(year);
+    if (index == kMonthUTC) return Smi::FromInt(month);
+    ASSERT(index == kDayUTC);
+    return Smi::FromInt(day);
+  }
+
+  int time_in_day_ms = DateCache::TimeInDay(time_ms, days);
+  switch (index) {
+    case kHourUTC: return Smi::FromInt(time_in_day_ms / (60 * 60 * 1000));
+    case kMinuteUTC: return Smi::FromInt((time_in_day_ms / (60 * 1000)) % 60);
+    case kSecondUTC: return Smi::FromInt((time_in_day_ms / 1000) % 60);
+    case kMillisecondUTC: return Smi::FromInt(time_in_day_ms % 1000);
+    case kDaysUTC: return Smi::FromInt(days);
+    case kTimeInDayUTC: return Smi::FromInt(time_in_day_ms);
+    default: UNREACHABLE();
+  }
+
+  UNREACHABLE();
+  return NULL;
+}
+
+
+void JSDate::SetValue(Object* value, bool is_value_nan) {
+  set_value(value);
+  if (is_value_nan) {
+    HeapNumber* nan = GetIsolate()->heap()->nan_value();
+    set_cache_stamp(nan, SKIP_WRITE_BARRIER);
+    set_year(nan, SKIP_WRITE_BARRIER);
+    set_month(nan, SKIP_WRITE_BARRIER);
+    set_day(nan, SKIP_WRITE_BARRIER);
+    set_hour(nan, SKIP_WRITE_BARRIER);
+    set_min(nan, SKIP_WRITE_BARRIER);
+    set_sec(nan, SKIP_WRITE_BARRIER);
+    set_weekday(nan, SKIP_WRITE_BARRIER);
+  } else {
+    set_cache_stamp(Smi::FromInt(DateCache::kInvalidStamp), SKIP_WRITE_BARRIER);
+  }
+}
+
+
+void JSDate::SetLocalFields(int64_t local_time_ms, DateCache* date_cache) {
+  int days = DateCache::DaysFromTime(local_time_ms);
+  int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
+  int year, month, day;
+  date_cache->YearMonthDayFromDays(days, &year, &month, &day);
+  int weekday = date_cache->Weekday(days);
+  int hour = time_in_day_ms / (60 * 60 * 1000);
+  int min = (time_in_day_ms / (60 * 1000)) % 60;
+  int sec = (time_in_day_ms / 1000) % 60;
+  set_cache_stamp(date_cache->stamp());
+  set_year(Smi::FromInt(year), SKIP_WRITE_BARRIER);
+  set_month(Smi::FromInt(month), SKIP_WRITE_BARRIER);
+  set_day(Smi::FromInt(day), SKIP_WRITE_BARRIER);
+  set_weekday(Smi::FromInt(weekday), SKIP_WRITE_BARRIER);
+  set_hour(Smi::FromInt(hour), SKIP_WRITE_BARRIER);
+  set_min(Smi::FromInt(min), SKIP_WRITE_BARRIER);
+  set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
+}
+
 } }  // namespace v8::internal
diff --git a/src/objects.h b/src/objects.h
index 1245ed0..a9cb8e0 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,6 +31,7 @@
 #include "allocation.h"
 #include "builtins.h"
 #include "list.h"
+#include "property-details.h"
 #include "smart-array-pointer.h"
 #include "unicode-inl.h"
 #if V8_TARGET_ARCH_ARM
@@ -38,6 +39,8 @@
 #elif V8_TARGET_ARCH_MIPS
 #include "mips/constants-mips.h"
 #endif
+#include "v8checks.h"
+
 
 //
 // Most object types in the V8 JavaScript are described in this file.
@@ -51,6 +54,8 @@
 //       - JSReceiver  (suitable for property access)
 //         - JSObject
 //           - JSArray
+//           - JSSet
+//           - JSMap
 //           - JSWeakMap
 //           - JSRegExp
 //           - JSFunction
@@ -59,6 +64,7 @@
 //             - JSBuiltinsObject
 //           - JSGlobalProxy
 //           - JSValue
+//             - JSDate
 //           - JSMessageObject
 //         - JSProxy
 //           - JSFunctionProxy
@@ -74,7 +80,7 @@
 //             - MapCache
 //           - Context
 //           - JSFunctionResultCache
-//           - SerializedScopeInfo
+//           - ScopeInfo
 //         - FixedDoubleArray
 //         - ExternalArray
 //           - ExternalPixelArray
@@ -102,6 +108,7 @@
 //       - SharedFunctionInfo
 //       - Struct
 //         - AccessorInfo
+//         - AccessorPair
 //         - AccessCheckInfo
 //         - InterceptorInfo
 //         - CallHandlerInfo
@@ -120,24 +127,17 @@
 //  HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
 //  Failure:    [30 bit signed int] 11
 
-// Ecma-262 3rd 8.6.1
-enum PropertyAttributes {
-  NONE              = v8::None,
-  READ_ONLY         = v8::ReadOnly,
-  DONT_ENUM         = v8::DontEnum,
-  DONT_DELETE       = v8::DontDelete,
-  ABSENT            = 16  // Used in runtime to indicate a property is absent.
-  // ABSENT can never be stored in or returned from a descriptor's attributes
-  // bitfield.  It is only used as a return value meaning the attributes of
-  // a non-existent property.
-};
-
 namespace v8 {
 namespace internal {
 
 enum ElementsKind {
-  // The "fast" kind for tagged values. Must be first to make it possible
-  // to efficiently check maps if they have fast elements.
+  // The "fast" kind for elements that only contain SMI values. Must be first
+  // to make it possible to efficiently check maps for this kind.
+  FAST_SMI_ONLY_ELEMENTS,
+
+  // The "fast" kind for tagged values. Must be second to make it possible to
+  // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
+  // together at once.
   FAST_ELEMENTS,
 
   // The "fast" kind for unwrapped, non-tagged double values.
@@ -160,101 +160,26 @@
   // Derived constants from ElementsKind
   FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
   LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
-  FIRST_ELEMENTS_KIND = FAST_ELEMENTS,
+  FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS,
   LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
 };
 
-static const int kElementsKindCount =
-    LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
-
-// PropertyDetails captures type and attributes for a property.
-// They are used both in property dictionaries and instance descriptors.
-class PropertyDetails BASE_EMBEDDED {
- public:
-  PropertyDetails(PropertyAttributes attributes,
-                  PropertyType type,
-                  int index = 0) {
-    ASSERT(type != ELEMENTS_TRANSITION);
-    ASSERT(TypeField::is_valid(type));
-    ASSERT(AttributesField::is_valid(attributes));
-    ASSERT(StorageField::is_valid(index));
-
-    value_ = TypeField::encode(type)
-        | AttributesField::encode(attributes)
-        | StorageField::encode(index);
-
-    ASSERT(type == this->type());
-    ASSERT(attributes == this->attributes());
-    ASSERT(index == this->index());
-  }
-
-  PropertyDetails(PropertyAttributes attributes,
-                  PropertyType type,
-                  ElementsKind elements_kind) {
-    ASSERT(type == ELEMENTS_TRANSITION);
-    ASSERT(TypeField::is_valid(type));
-    ASSERT(AttributesField::is_valid(attributes));
-    ASSERT(StorageField::is_valid(static_cast<int>(elements_kind)));
-
-    value_ = TypeField::encode(type)
-        | AttributesField::encode(attributes)
-        | StorageField::encode(static_cast<int>(elements_kind));
-
-    ASSERT(type == this->type());
-    ASSERT(attributes == this->attributes());
-    ASSERT(elements_kind == this->elements_kind());
-  }
-
-  // Conversion for storing details as Object*.
-  explicit inline PropertyDetails(Smi* smi);
-  inline Smi* AsSmi();
-
-  PropertyType type() { return TypeField::decode(value_); }
-
-  bool IsTransition() {
-    PropertyType t = type();
-    ASSERT(t != INTERCEPTOR);
-    return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
-        t == ELEMENTS_TRANSITION;
-  }
-
-  bool IsProperty() {
-    return type() < FIRST_PHANTOM_PROPERTY_TYPE;
-  }
-
-  PropertyAttributes attributes() { return AttributesField::decode(value_); }
-
-  int index() { return StorageField::decode(value_); }
-
-  ElementsKind elements_kind() {
-    ASSERT(type() == ELEMENTS_TRANSITION);
-    return static_cast<ElementsKind>(StorageField::decode(value_));
-  }
-
-  inline PropertyDetails AsDeleted();
-
-  static bool IsValidIndex(int index) {
-    return StorageField::is_valid(index);
-  }
-
-  bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
-  bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
-  bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
-  bool IsDeleted() { return DeletedField::decode(value_) != 0;}
-
-  // Bit fields in value_ (type, shift, size). Must be public so the
-  // constants can be embedded in generated code.
-  class TypeField:       public BitField<PropertyType,       0, 4> {};
-  class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
-  class DeletedField:    public BitField<uint32_t,           7, 1> {};
-  class StorageField:    public BitField<uint32_t,           8, 32-8> {};
-
-  static const int kInitialIndex = 1;
-
- private:
-  uint32_t value_;
+enum CompareMapMode {
+  REQUIRE_EXACT_MAP,
+  ALLOW_ELEMENT_TRANSITION_MAPS
 };
 
+enum KeyedAccessGrowMode {
+  DO_NOT_ALLOW_JSARRAY_GROWTH,
+  ALLOW_JSARRAY_GROWTH
+};
+
+const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
+
+void PrintElementsKind(FILE* out, ElementsKind kind);
+
+inline bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+                                                ElementsKind to_kind);
 
 // Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
 enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
@@ -276,8 +201,15 @@
 };
 
 
+// Indicates whether a get method should implicitly create the object looked up.
+enum CreationFlag {
+  ALLOW_CREATION,
+  OMIT_CREATION
+};
+
+
 // Instance size sentinel for objects of variable size.
-static const int kVariableSizeSentinel = 0;
+const int kVariableSizeSentinel = 0;
 
 
 // All Maps have a field instance_type containing a InstanceType.
@@ -291,7 +223,7 @@
 // encoding is considered TWO_BYTE.  It is not mentioned in the name.  ASCII
 // encoding is mentioned explicitly in the name.  Likewise, the default
 // representation is considered sequential.  It is not mentioned in the
-// name.  The other representations (eg, CONS, EXTERNAL) are explicitly
+// name.  The other representations (e.g. CONS, EXTERNAL) are explicitly
 // mentioned.  Finally, the string is either a SYMBOL_TYPE (if it is a
 // symbol) or a STRING_TYPE (if it is not a symbol).
 //
@@ -311,6 +243,9 @@
   V(EXTERNAL_SYMBOL_TYPE)                                                      \
   V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE)                                      \
   V(EXTERNAL_ASCII_SYMBOL_TYPE)                                                \
+  V(SHORT_EXTERNAL_SYMBOL_TYPE)                                                \
+  V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE)                                \
+  V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE)                                          \
   V(STRING_TYPE)                                                               \
   V(ASCII_STRING_TYPE)                                                         \
   V(CONS_STRING_TYPE)                                                          \
@@ -319,6 +254,9 @@
   V(EXTERNAL_STRING_TYPE)                                                      \
   V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE)                                      \
   V(EXTERNAL_ASCII_STRING_TYPE)                                                \
+  V(SHORT_EXTERNAL_STRING_TYPE)                                                \
+  V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE)                                \
+  V(SHORT_EXTERNAL_ASCII_STRING_TYPE)                                          \
   V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE)                                        \
                                                                                \
   V(MAP_TYPE)                                                                  \
@@ -329,6 +267,7 @@
   V(HEAP_NUMBER_TYPE)                                                          \
   V(FOREIGN_TYPE)                                                              \
   V(BYTE_ARRAY_TYPE)                                                           \
+  V(FREE_SPACE_TYPE)                                                           \
   /* Note: the order of these external array */                                \
   /* types is relied upon in */                                                \
   /* Object::IsExternalArray(). */                                             \
@@ -343,6 +282,7 @@
   V(FILLER_TYPE)                                                               \
                                                                                \
   V(ACCESSOR_INFO_TYPE)                                                        \
+  V(ACCESSOR_PAIR_TYPE)                                                        \
   V(ACCESS_CHECK_INFO_TYPE)                                                    \
   V(INTERCEPTOR_INFO_TYPE)                                                     \
   V(CALL_HANDLER_INFO_TYPE)                                                    \
@@ -353,6 +293,8 @@
   V(SCRIPT_TYPE)                                                               \
   V(CODE_CACHE_TYPE)                                                           \
   V(POLYMORPHIC_CODE_CACHE_TYPE)                                               \
+  V(TYPE_FEEDBACK_INFO_TYPE)                                                   \
+  V(ALIASED_ARGUMENTS_ENTRY_TYPE)                                              \
                                                                                \
   V(FIXED_ARRAY_TYPE)                                                          \
   V(FIXED_DOUBLE_ARRAY_TYPE)                                                   \
@@ -361,6 +303,7 @@
   V(JS_MESSAGE_OBJECT_TYPE)                                                    \
                                                                                \
   V(JS_VALUE_TYPE)                                                             \
+  V(JS_DATE_TYPE)                                                              \
   V(JS_OBJECT_TYPE)                                                            \
   V(JS_CONTEXT_EXTENSION_OBJECT_TYPE)                                          \
   V(JS_GLOBAL_OBJECT_TYPE)                                                     \
@@ -418,6 +361,18 @@
     ExternalAsciiString::kSize,                                                \
     external_ascii_symbol,                                                     \
     ExternalAsciiSymbol)                                                       \
+  V(SHORT_EXTERNAL_SYMBOL_TYPE,                                                \
+    ExternalTwoByteString::kShortSize,                                         \
+    short_external_symbol,                                                     \
+    ShortExternalSymbol)                                                       \
+  V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE,                                \
+    ExternalTwoByteString::kShortSize,                                         \
+    short_external_symbol_with_ascii_data,                                     \
+    ShortExternalSymbolWithAsciiData)                                          \
+  V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE,                                          \
+    ExternalAsciiString::kShortSize,                                           \
+    short_external_ascii_symbol,                                               \
+    ShortExternalAsciiSymbol)                                                  \
   V(STRING_TYPE,                                                               \
     kVariableSizeSentinel,                                                     \
     string,                                                                    \
@@ -453,7 +408,19 @@
   V(EXTERNAL_ASCII_STRING_TYPE,                                                \
     ExternalAsciiString::kSize,                                                \
     external_ascii_string,                                                     \
-    ExternalAsciiString)
+    ExternalAsciiString)                                                       \
+  V(SHORT_EXTERNAL_STRING_TYPE,                                                \
+    ExternalTwoByteString::kShortSize,                                         \
+    short_external_string,                                                     \
+    ShortExternalString)                                                       \
+  V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE,                                \
+    ExternalTwoByteString::kShortSize,                                         \
+    short_external_string_with_ascii_data,                                     \
+    ShortExternalStringWithAsciiData)                                          \
+  V(SHORT_EXTERNAL_ASCII_STRING_TYPE,                                          \
+    ExternalAsciiString::kShortSize,                                           \
+    short_external_ascii_string,                                               \
+    ShortExternalAsciiString)
 
 // A struct is a simple object a set of object-valued fields.  Including an
 // object type in this causes the compiler to generate most of the boilerplate
@@ -466,6 +433,7 @@
 // manually.
 #define STRUCT_LIST_ALL(V)                                                     \
   V(ACCESSOR_INFO, AccessorInfo, accessor_info)                                \
+  V(ACCESSOR_PAIR, AccessorPair, accessor_pair)                                \
   V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info)                     \
   V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info)                       \
   V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info)                     \
@@ -475,7 +443,9 @@
   V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info)                        \
   V(SCRIPT, Script, script)                                                    \
   V(CODE_CACHE, CodeCache, code_cache)                                         \
-  V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache)
+  V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache)      \
+  V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info)                  \
+  V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 #define STRUCT_LIST_DEBUGGER(V)                                                \
@@ -533,10 +503,15 @@
 STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
 
 // If bit 7 is clear, then bit 3 indicates whether this two-byte
-// string actually contains ascii data.
+// string actually contains ASCII data.
 const uint32_t kAsciiDataHintMask = 0x08;
 const uint32_t kAsciiDataHintTag = 0x08;
 
+// If bit 7 is clear and string representation indicates an external string,
+// then bit 4 indicates whether the data pointer is cached.
+const uint32_t kShortExternalStringMask = 0x10;
+const uint32_t kShortExternalStringTag = 0x10;
+
 
 // A ConsString with an empty string as the right side is a candidate
 // for being shortcut by the garbage collector unless it is a
@@ -556,6 +531,13 @@
   ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
   CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
   CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
+  SHORT_EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag |
+                               kExternalStringTag | kShortExternalStringTag,
+  SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
+      kTwoByteStringTag | kSymbolTag | kExternalStringTag |
+      kAsciiDataHintTag | kShortExternalStringTag,
+  SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kAsciiStringTag | kExternalStringTag |
+                                     kSymbolTag | kShortExternalStringTag,
   EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
   EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
       kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
@@ -567,6 +549,13 @@
   CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
   SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
   SLICED_ASCII_STRING_TYPE = kAsciiStringTag | kSlicedStringTag,
+  SHORT_EXTERNAL_STRING_TYPE =
+      kTwoByteStringTag | kExternalStringTag | kShortExternalStringTag,
+  SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
+      kTwoByteStringTag | kExternalStringTag |
+      kAsciiDataHintTag | kShortExternalStringTag,
+  SHORT_EXTERNAL_ASCII_STRING_TYPE =
+      kAsciiStringTag | kExternalStringTag | kShortExternalStringTag,
   EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
   EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
       kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
@@ -585,6 +574,7 @@
   HEAP_NUMBER_TYPE,
   FOREIGN_TYPE,
   BYTE_ARRAY_TYPE,
+  FREE_SPACE_TYPE,
   EXTERNAL_BYTE_ARRAY_TYPE,  // FIRST_EXTERNAL_ARRAY_TYPE
   EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
   EXTERNAL_SHORT_ARRAY_TYPE,
@@ -599,6 +589,7 @@
 
   // Structs.
   ACCESSOR_INFO_TYPE,
+  ACCESSOR_PAIR_TYPE,
   ACCESS_CHECK_INFO_TYPE,
   INTERCEPTOR_INFO_TYPE,
   CALL_HANDLER_INFO_TYPE,
@@ -609,6 +600,8 @@
   SCRIPT_TYPE,
   CODE_CACHE_TYPE,
   POLYMORPHIC_CODE_CACHE_TYPE,
+  TYPE_FEEDBACK_INFO_TYPE,
+  ALIASED_ARGUMENTS_ENTRY_TYPE,
   // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
   // is defined. However as include/v8.h contain some of the instance type
   // constants always having them avoids them getting different numbers
@@ -621,24 +614,33 @@
 
   JS_MESSAGE_OBJECT_TYPE,
 
-  JS_VALUE_TYPE,  // FIRST_NON_CALLABLE_OBJECT_TYPE, FIRST_JS_RECEIVER_TYPE
+  // All the following types are subtypes of JSReceiver, which corresponds to
+  // objects in the JS sense. The first and the last type in this range are
+  // the two forms of function. This organization enables using the same
+  // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
+  // NONCALLABLE_JS_OBJECT range.
+  JS_FUNCTION_PROXY_TYPE,  // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
+  JS_PROXY_TYPE,  // LAST_JS_PROXY_TYPE
+
+  JS_VALUE_TYPE,  // FIRST_JS_OBJECT_TYPE
+  JS_DATE_TYPE,
   JS_OBJECT_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GLOBAL_OBJECT_TYPE,
   JS_BUILTINS_OBJECT_TYPE,
   JS_GLOBAL_PROXY_TYPE,
   JS_ARRAY_TYPE,
-  JS_PROXY_TYPE,
+  JS_SET_TYPE,
+  JS_MAP_TYPE,
   JS_WEAK_MAP_TYPE,
 
-  JS_REGEXP_TYPE,  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE
+  JS_REGEXP_TYPE,
 
-  JS_FUNCTION_TYPE,  // FIRST_CALLABLE_SPEC_OBJECT_TYPE
-  JS_FUNCTION_PROXY_TYPE,  // LAST_CALLABLE_SPEC_OBJECT_TYPE
+  JS_FUNCTION_TYPE,  // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
 
   // Pseudo-types
   FIRST_TYPE = 0x0,
-  LAST_TYPE = JS_FUNCTION_PROXY_TYPE,
+  LAST_TYPE = JS_FUNCTION_TYPE,
   INVALID_TYPE = FIRST_TYPE - 1,
   FIRST_NONSTRING_TYPE = MAP_TYPE,
   // Boundaries for testing for an external array.
@@ -651,21 +653,27 @@
   // are not continuous in this enum! The enum ranges instead reflect the
   // external class names, where proxies are treated as either ordinary objects,
   // or functions.
-  FIRST_JS_RECEIVER_TYPE = JS_VALUE_TYPE,
+  FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE,
   LAST_JS_RECEIVER_TYPE = LAST_TYPE,
-  // Boundaries for testing the types for which typeof is "object".
-  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_VALUE_TYPE,
-  LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
-  // Boundaries for testing the types for which typeof is "function".
-  FIRST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_TYPE,
-  LAST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_PROXY_TYPE,
+  // Boundaries for testing the types represented as JSObject
+  FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
+  LAST_JS_OBJECT_TYPE = LAST_TYPE,
+  // Boundaries for testing the types represented as JSProxy
+  FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE,
+  LAST_JS_PROXY_TYPE = JS_PROXY_TYPE,
   // Boundaries for testing whether the type is a JavaScript object.
-  FIRST_SPEC_OBJECT_TYPE = FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
-  LAST_SPEC_OBJECT_TYPE = LAST_CALLABLE_SPEC_OBJECT_TYPE
+  FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE,
+  LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE,
+  // Boundaries for testing the types for which typeof is "object".
+  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE,
+  LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
+  // Note that the types for which typeof is "function" are not continuous.
+  // Define this so that we can put assertions on discrete checks.
+  NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2
 };
 
-static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE -
-    FIRST_EXTERNAL_ARRAY_TYPE + 1;
+const int kExternalArrayTypeCount =
+    LAST_EXTERNAL_ARRAY_TYPE - FIRST_EXTERNAL_ARRAY_TYPE + 1;
 
 STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
 STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
@@ -697,6 +705,7 @@
 class FixedArrayBase;
 class ObjectVisitor;
 class StringStream;
+class Failure;
 
 struct ValueInfo : public Malloced {
   ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -710,7 +719,6 @@
 // A template-ized version of the IsXXX functions.
 template <class C> static inline bool Is(Object* obj);
 
-class Failure;
 
 class MaybeObject BASE_EMBEDDED {
  public:
@@ -748,7 +756,7 @@
   // Prints this object with details.
   inline void Print() {
     Print(stdout);
-  };
+  }
   inline void PrintLn() {
     PrintLn(stdout);
   }
@@ -791,6 +799,7 @@
   V(ExternalDoubleArray)                       \
   V(ExternalPixelArray)                        \
   V(ByteArray)                                 \
+  V(FreeSpace)                                 \
   V(JSReceiver)                                \
   V(JSObject)                                  \
   V(JSContextExtensionObject)                  \
@@ -798,16 +807,18 @@
   V(DescriptorArray)                           \
   V(DeoptimizationInputData)                   \
   V(DeoptimizationOutputData)                  \
+  V(TypeFeedbackCells)                         \
   V(FixedArray)                                \
   V(FixedDoubleArray)                          \
   V(Context)                                   \
   V(GlobalContext)                             \
-  V(SerializedScopeInfo)                       \
+  V(ScopeInfo)                                 \
   V(JSFunction)                                \
   V(Code)                                      \
   V(Oddball)                                   \
   V(SharedFunctionInfo)                        \
   V(JSValue)                                   \
+  V(JSDate)                                    \
   V(JSMessageObject)                           \
   V(StringWrapper)                             \
   V(Foreign)                                   \
@@ -815,6 +826,8 @@
   V(JSArray)                                   \
   V(JSProxy)                                   \
   V(JSFunctionProxy)                           \
+  V(JSSet)                                     \
+  V(JSMap)                                     \
   V(JSWeakMap)                                 \
   V(JSRegExp)                                  \
   V(HashTable)                                 \
@@ -835,6 +848,9 @@
   V(AccessCheckNeeded)                         \
   V(JSGlobalPropertyCell)                      \
 
+
+class JSReceiver;
+
 // Object is the abstract superclass for all classes in the
 // object hierarchy.
 // Object does not use any virtual functions to avoid the
@@ -844,11 +860,15 @@
 class Object : public MaybeObject {
  public:
   // Type testing.
+  bool IsObject() { return true; }
+
 #define IS_TYPE_FUNCTION_DECL(type_)  inline bool Is##type_();
   OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
   HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
 #undef IS_TYPE_FUNCTION_DECL
 
+  inline bool IsFixedArrayBase();
+
   // Returns true if this object is an instance of the specified
   // function template.
   inline bool IsInstanceOf(FunctionTemplateInfo* type);
@@ -859,6 +879,7 @@
 #undef DECLARE_STRUCT_PREDICATE
 
   INLINE(bool IsSpecObject());
+  INLINE(bool IsSpecFunction());
 
   // Oddball testing.
   INLINE(bool IsUndefined());
@@ -867,9 +888,14 @@
   INLINE(bool IsTrue());
   INLINE(bool IsFalse());
   inline bool IsArgumentsMarker();
+  inline bool NonFailureIsHeapObject();
+
+  // Filler objects (fillers and free space objects).
+  inline bool IsFiller();
 
   // Extract the number.
   inline double Number();
+  inline bool IsNaN();
 
   // Returns true if the object is of the correct type to be used as a
   // implementation of a JSObject's elements.
@@ -899,28 +925,41 @@
       Object* receiver,
       String* key,
       PropertyAttributes* attributes);
+
+  static Handle<Object> GetProperty(Handle<Object> object,
+                                    Handle<Object> receiver,
+                                    LookupResult* result,
+                                    Handle<String> key,
+                                    PropertyAttributes* attributes);
+
   MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver,
                                            LookupResult* result,
                                            String* key,
                                            PropertyAttributes* attributes);
-  MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
-                                                       Object* structure,
-                                                       String* name,
-                                                       Object* holder);
-  MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(Object* receiver,
-                                                      String* name,
-                                                      Object* handler);
-  MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
-                                                            JSFunction* getter);
 
-  inline MaybeObject* GetElement(uint32_t index);
+  MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
+                                                            JSReceiver* getter);
+
+  static Handle<Object> GetElement(Handle<Object> object, uint32_t index);
+  MUST_USE_RESULT inline MaybeObject* GetElement(uint32_t index);
   // For use when we know that no exception can be thrown.
   inline Object* GetElementNoExceptionThrown(uint32_t index);
-  MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index);
+  MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Object* receiver,
+                                                      uint32_t index);
 
   // Return the object's prototype (might be Heap::null_value()).
   Object* GetPrototype();
 
+  // Returns the permanent hash code associated with this object depending on
+  // the actual object type.  Might return a failure in case no hash was
+  // created yet or GC was caused by creation.
+  MUST_USE_RESULT MaybeObject* GetHash(CreationFlag flag);
+
+  // Checks whether this object has the same value as the given one.  This
+  // function is implemented according to ES5, section 9.12 and can be used
+  // to implement the Harmony "egal" function.
+  bool SameValue(Object* other);
+
   // Tries to convert an object to an array index.  Returns true and sets
   // the output parameter if it succeeds.
   inline bool ToArrayIndex(uint32_t* index);
@@ -986,7 +1025,8 @@
   void SmiVerify();
 #endif
 
-  static const int kMinValue = (-1 << (kSmiValueSize - 1));
+  static const int kMinValue =
+      (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
   static const int kMaxValue = -(kMinValue + 1);
 
  private:
@@ -1067,7 +1107,7 @@
 
 
 // Heap objects typically have a map pointer in their first word.  However,
-// during GC other data (eg, mark bits, forwarding addresses) is sometimes
+// during GC other data (e.g. mark bits, forwarding addresses) is sometimes
 // encoded in the first word.  The class MapWord is an abstraction of the
 // value in a heap object's first word.
 class MapWord BASE_EMBEDDED {
@@ -1086,7 +1126,7 @@
 
   // True if this map word is a forwarding address for a scavenge
   // collection.  Only valid during a scavenge collection (specifically,
-  // when all map words are heap object pointers, ie. not during a full GC).
+  // when all map words are heap object pointers, i.e. not during a full GC).
   inline bool IsForwardingAddress();
 
   // Create a map word from a forwarding address.
@@ -1095,101 +1135,13 @@
   // View this map word as a forwarding address.
   inline HeapObject* ToForwardingAddress();
 
-  // Marking phase of full collection: the map word of live objects is
-  // marked, and may be marked as overflowed (eg, the object is live, its
-  // children have not been visited, and it does not fit in the marking
-  // stack).
+  static inline MapWord FromRawValue(uintptr_t value) {
+    return MapWord(value);
+  }
 
-  // True if this map word's mark bit is set.
-  inline bool IsMarked();
-
-  // Return this map word but with its mark bit set.
-  inline void SetMark();
-
-  // Return this map word but with its mark bit cleared.
-  inline void ClearMark();
-
-  // True if this map word's overflow bit is set.
-  inline bool IsOverflowed();
-
-  // Return this map word but with its overflow bit set.
-  inline void SetOverflow();
-
-  // Return this map word but with its overflow bit cleared.
-  inline void ClearOverflow();
-
-
-  // Compacting phase of a full compacting collection: the map word of live
-  // objects contains an encoding of the original map address along with the
-  // forwarding address (represented as an offset from the first live object
-  // in the same page as the (old) object address).
-
-  // Create a map word from a map address and a forwarding address offset.
-  static inline MapWord EncodeAddress(Address map_address, int offset);
-
-  // Return the map address encoded in this map word.
-  inline Address DecodeMapAddress(MapSpace* map_space);
-
-  // Return the forwarding offset encoded in this map word.
-  inline int DecodeOffset();
-
-
-  // During serialization: the map word is used to hold an encoded
-  // address, and possibly a mark bit (set and cleared with SetMark
-  // and ClearMark).
-
-  // Create a map word from an encoded address.
-  static inline MapWord FromEncodedAddress(Address address);
-
-  inline Address ToEncodedAddress();
-
-  // Bits used by the marking phase of the garbage collector.
-  //
-  // The first word of a heap object is normally a map pointer. The last two
-  // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to
-  // mark an object as live and/or overflowed:
-  //   last bit = 0, marked as alive
-  //   second bit = 1, overflowed
-  // An object is only marked as overflowed when it is marked as live while
-  // the marking stack is overflowed.
-  static const int kMarkingBit = 0;  // marking bit
-  static const int kMarkingMask = (1 << kMarkingBit);  // marking mask
-  static const int kOverflowBit = 1;  // overflow bit
-  static const int kOverflowMask = (1 << kOverflowBit);  // overflow mask
-
-  // Forwarding pointers and map pointer encoding. On 32 bit all the bits are
-  // used.
-  // +-----------------+------------------+-----------------+
-  // |forwarding offset|page offset of map|page index of map|
-  // +-----------------+------------------+-----------------+
-  //          ^                 ^                  ^
-  //          |                 |                  |
-  //          |                 |          kMapPageIndexBits
-  //          |         kMapPageOffsetBits
-  // kForwardingOffsetBits
-  static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
-  static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
-#ifdef V8_HOST_ARCH_64_BIT
-  static const int kMapPageIndexBits = 16;
-#else
-  // Use all the 32-bits to encode on a 32-bit platform.
-  static const int kMapPageIndexBits =
-      32 - (kMapPageOffsetBits + kForwardingOffsetBits);
-#endif
-
-  static const int kMapPageIndexShift = 0;
-  static const int kMapPageOffsetShift =
-      kMapPageIndexShift + kMapPageIndexBits;
-  static const int kForwardingOffsetShift =
-      kMapPageOffsetShift + kMapPageOffsetBits;
-
-  // Bit masks covering the different parts the encoding.
-  static const uintptr_t kMapPageIndexMask =
-      (1 << kMapPageOffsetShift) - 1;
-  static const uintptr_t kMapPageOffsetMask =
-      ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
-  static const uintptr_t kForwardingOffsetMask =
-      ~(kMapPageIndexMask | kMapPageOffsetMask);
+  inline uintptr_t ToRawValue() {
+    return value_;
+  }
 
  private:
   // HeapObject calls the private constructor and directly reads the value.
@@ -1209,6 +1161,10 @@
   // information.
   inline Map* map();
   inline void set_map(Map* value);
+  // The no-write-barrier version.  This is OK if the object is white and in
+  // new space, or if the value is an immortal immutable object, like the maps
+  // of primitive (non-JS) objects like strings, heap numbers etc.
+  inline void set_map_no_write_barrier(Map* value);
 
   // During garbage collection, the map word of a heap object does not
   // necessarily contain a map pointer.
@@ -1216,8 +1172,8 @@
   inline void set_map_word(MapWord map_word);
 
   // The Heap the object was allocated in. Used also to access Isolate.
-  // This method can not be used during GC, it ASSERTs this.
   inline Heap* GetHeap();
+
   // Convenience method to get current isolate. This method can be
   // accessed only when its result is the same as
   // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
@@ -1246,31 +1202,6 @@
   // GC internal.
   inline int SizeFromMap(Map* map);
 
-  // Support for the marking heap objects during the marking phase of GC.
-  // True if the object is marked live.
-  inline bool IsMarked();
-
-  // Mutate this object's map pointer to indicate that the object is live.
-  inline void SetMark();
-
-  // Mutate this object's map pointer to remove the indication that the
-  // object is live (ie, partially restore the map pointer).
-  inline void ClearMark();
-
-  // True if this object is marked as overflowed.  Overflowed objects have
-  // been reached and marked during marking of the heap, but their children
-  // have not necessarily been marked and they have not been pushed on the
-  // marking stack.
-  inline bool IsOverflowed();
-
-  // Mutate this object's map pointer to indicate that the object is
-  // overflowed.
-  inline void SetOverflow();
-
-  // Mutate this object's map pointer to remove the indication that the
-  // object is overflowed (ie, partially restore the map pointer).
-  inline void ClearOverflow();
-
   // Returns the field at offset in obj, as a read/write Object* reference.
   // Does no checking, and is safe to use during GC, while maps are invalid.
   // Does not invoke write barrier, so should only be assigned to
@@ -1294,18 +1225,14 @@
     HeapObjectPrint(stdout);
   }
   void HeapObjectPrint(FILE* out);
-#endif
-#ifdef DEBUG
-  void HeapObjectVerify();
-  inline void VerifyObjectField(int offset);
-  inline void VerifySmiField(int offset);
-#endif
-
-#ifdef OBJECT_PRINT
   void PrintHeader(FILE* out, const char* id);
 #endif
 
 #ifdef DEBUG
+  void HeapObjectVerify();
+  inline void VerifyObjectField(int offset);
+  inline void VerifySmiField(int offset);
+
   // Verify a pointer is a valid HeapObject pointer that points to object
   // areas in the heap.
   static void VerifyHeapPointer(Object* p);
@@ -1425,6 +1352,30 @@
 };
 
 
+enum EnsureElementsMode {
+  DONT_ALLOW_DOUBLE_ELEMENTS,
+  ALLOW_COPIED_DOUBLE_ELEMENTS,
+  ALLOW_CONVERTED_DOUBLE_ELEMENTS
+};
+
+
+// Indicates whether a property should be set or (re)defined.  Setting of a
+// property causes attributes to remain unchanged, writability to be checked
+// and callbacks to be called.  Defining of a property causes attributes to
+// be updated and callbacks to be overridden.
+enum SetPropertyMode {
+  SET_PROPERTY,
+  DEFINE_PROPERTY
+};
+
+
+// Indicator for one component of an AccessorPair.
+enum AccessorComponent {
+  ACCESSOR_GETTER,
+  ACCESSOR_SETTER
+};
+
+
 // JSReceiver includes types on which properties can be defined, i.e.,
 // JSObject and JSProxy.
 class JSReceiver: public HeapObject {
@@ -1438,6 +1389,11 @@
   // Casting.
   static inline JSReceiver* cast(Object* obj);
 
+  static Handle<Object> SetProperty(Handle<JSReceiver> object,
+                                    Handle<String> key,
+                                    Handle<Object> value,
+                                    PropertyAttributes attributes,
+                                    StrictModeFlag strict_mode);
   // Can cause GC.
   MUST_USE_RESULT MaybeObject* SetProperty(String* key,
                                            Object* value,
@@ -1448,8 +1404,22 @@
                                            Object* value,
                                            PropertyAttributes attributes,
                                            StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
+                                                            Object* value);
 
   MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
+  MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
+
+  // Set the index'th array element.
+  // Can cause GC, or return failure if GC is required.
+  MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
+                                          Object* value,
+                                          PropertyAttributes attributes,
+                                          StrictModeFlag strict_mode,
+                                          bool check_prototype);
+
+  // Tests for the fast common case for property enumeration.
+  bool IsSimpleEnum();
 
   // Returns the class name ([[Class]] property in the specification).
   String* class_name();
@@ -1466,6 +1436,7 @@
   // Can cause a GC.
   inline bool HasProperty(String* name);
   inline bool HasLocalProperty(String* name);
+  inline bool HasElement(uint32_t index);
 
   // Return the object's prototype (might be Heap::null_value()).
   inline Object* GetPrototype();
@@ -1474,11 +1445,18 @@
   MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
                                             bool skip_hidden_prototypes);
 
+  // Retrieves a permanent object identity hash code. The undefined value might
+  // be returned in case no hash was created yet and OMIT_CREATION was used.
+  inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+
   // Lookup a property.  If found, the result is valid and has
   // detailed information.
   void LocalLookup(String* name, LookupResult* result);
   void Lookup(String* name, LookupResult* result);
 
+ protected:
+  Smi* GenerateIdentityHash();
+
  private:
   PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
                                           LookupResult* result,
@@ -1525,8 +1503,14 @@
   MUST_USE_RESULT inline MaybeObject* ResetElements();
   inline ElementsKind GetElementsKind();
   inline ElementsAccessor* GetElementsAccessor();
+  inline bool HasFastSmiOnlyElements();
   inline bool HasFastElements();
+  // Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT
+  // elements.  TODO(danno): Rename HasFastTypeElements to HasFastElements() and
+  // HasFastElements to HasFastObjectElements.
+  inline bool HasFastTypeElements();
   inline bool HasFastDoubleElements();
+  inline bool HasNonStrictArgumentsElements();
   inline bool HasDictionaryElements();
   inline bool HasExternalPixelElements();
   inline bool HasExternalArrayElements();
@@ -1540,9 +1524,13 @@
   inline bool HasExternalDoubleElements();
   bool HasFastArgumentsElements();
   bool HasDictionaryArgumentsElements();
-  inline bool AllowsSetElementsLength();
   inline SeededNumberDictionary* element_dictionary();  // Gets slow elements.
 
+  inline void set_map_and_elements(
+      Map* map,
+      FixedArrayBase* value,
+      WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
   // Requires: HasFastElements().
   MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
 
@@ -1554,6 +1542,11 @@
   // a dictionary, and it will stay a dictionary.
   MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
 
+  MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
+                                                       Object* structure,
+                                                       String* name);
+
+  // Can cause GC.
   MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
                                            String* key,
                                            Object* value,
@@ -1571,8 +1564,6 @@
       Object* value,
       JSObject* holder,
       StrictModeFlag strict_mode);
-  MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter,
-                                                            Object* value);
   MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
       String* name,
       Object* value,
@@ -1583,6 +1574,14 @@
       Object* value,
       PropertyAttributes attributes,
       StrictModeFlag strict_mode);
+
+  static Handle<Object> SetLocalPropertyIgnoreAttributes(
+      Handle<JSObject> object,
+      Handle<String> key,
+      Handle<Object> value,
+      PropertyAttributes attributes);
+
+  // Can cause GC.
   MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
       String* key,
       Object* value,
@@ -1598,6 +1597,11 @@
 
   // Sets the property value in a normalized object given (key, value, details).
   // Handles the special representation of JS global objects.
+  static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
+                                              Handle<String> key,
+                                              Handle<Object> value,
+                                              PropertyDetails details);
+
   MUST_USE_RESULT MaybeObject* SetNormalizedProperty(String* name,
                                                      Object* value,
                                                      PropertyDetails details);
@@ -1623,31 +1627,37 @@
       String* name,
       bool continue_search);
 
+  static void DefineAccessor(Handle<JSObject> object,
+                             Handle<String> name,
+                             Handle<Object> getter,
+                             Handle<Object> setter,
+                             PropertyAttributes attributes);
   MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
-                                              bool is_getter,
-                                              Object* fun,
+                                              Object* getter,
+                                              Object* setter,
                                               PropertyAttributes attributes);
-  Object* LookupAccessor(String* name, bool is_getter);
+  Object* LookupAccessor(String* name, AccessorComponent component);
 
   MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
 
   // Used from Object::GetProperty().
-  MaybeObject* GetPropertyWithFailedAccessCheck(
+  MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck(
       Object* receiver,
       LookupResult* result,
       String* name,
       PropertyAttributes* attributes);
-  MaybeObject* GetPropertyWithInterceptor(
+  MUST_USE_RESULT MaybeObject* GetPropertyWithInterceptor(
       JSReceiver* receiver,
       String* name,
       PropertyAttributes* attributes);
-  MaybeObject* GetPropertyPostInterceptor(
+  MUST_USE_RESULT MaybeObject* GetPropertyPostInterceptor(
       JSReceiver* receiver,
       String* name,
       PropertyAttributes* attributes);
-  MaybeObject* GetLocalPropertyPostInterceptor(JSReceiver* receiver,
-                                               String* name,
-                                               PropertyAttributes* attributes);
+  MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor(
+      JSReceiver* receiver,
+      String* name,
+      PropertyAttributes* attributes);
 
   // Returns true if this is an instance of an api function and has
   // been modified since it was created.  May give false positives.
@@ -1660,43 +1670,58 @@
   // Accessors for hidden properties object.
   //
   // Hidden properties are not local properties of the object itself.
-  // Instead they are stored on an auxiliary JSObject stored as a local
+  // Instead they are stored in an auxiliary structure kept as a local
   // property with a special name Heap::hidden_symbol(). But if the
   // receiver is a JSGlobalProxy then the auxiliary object is a property
-  // of its prototype.
-  //
-  // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be
-  // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real
-  // holder.
-  //
-  // These accessors do not touch interceptors or accessors.
-  inline bool HasHiddenPropertiesObject();
-  inline Object* GetHiddenPropertiesObject();
-  MUST_USE_RESULT inline MaybeObject* SetHiddenPropertiesObject(
-      Object* hidden_obj);
+  // of its prototype, and if it's a detached proxy, then you can't have
+  // hidden properties.
 
-  // Indicates whether the hidden properties object should be created.
-  enum HiddenPropertiesFlag { ALLOW_CREATION, OMIT_CREATION };
+  // Sets a hidden property on this object. Returns this object if successful,
+  // undefined if called on a detached proxy.
+  static Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
+                                          Handle<String> key,
+                                          Handle<Object> value);
+  // Returns a failure if a GC is required.
+  MUST_USE_RESULT MaybeObject* SetHiddenProperty(String* key, Object* value);
+  // Gets the value of a hidden property with the given key. Returns undefined
+  // if the property doesn't exist (or if called on a detached proxy),
+  // otherwise returns the value set for the key.
+  Object* GetHiddenProperty(String* key);
+  // Deletes a hidden property. Deleting a non-existing property is
+  // considered successful.
+  void DeleteHiddenProperty(String* key);
+  // Returns true if the object has a property with the hidden symbol as name.
+  bool HasHiddenProperties();
 
-  // Retrieves the hidden properties object.
-  //
-  // The undefined value might be returned in case no hidden properties object
-  // is present and creation was omitted.
-  inline bool HasHiddenProperties();
-  MUST_USE_RESULT MaybeObject* GetHiddenProperties(HiddenPropertiesFlag flag);
+  static int GetIdentityHash(Handle<JSObject> obj);
+  MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+  MUST_USE_RESULT MaybeObject* SetIdentityHash(Object* hash, CreationFlag flag);
 
-  // Retrieves a permanent object identity hash code.
-  //
-  // The identity hash is stored as a hidden property. The undefined value might
-  // be returned in case no hidden properties object is present and creation was
-  // omitted.
-  MUST_USE_RESULT MaybeObject* GetIdentityHash(HiddenPropertiesFlag flag);
-
+  static Handle<Object> DeleteProperty(Handle<JSObject> obj,
+                                       Handle<String> name);
   MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
+
+  static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
   MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
 
-  // Tests for the fast common case for property enumeration.
-  bool IsSimpleEnum();
+  inline void ValidateSmiOnlyElements();
+
+  // Makes sure that this object can contain HeapObject as elements.
+  MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements();
+
+  // Makes sure that this object can contain the specified elements.
+  MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
+      Object** elements,
+      uint32_t count,
+      EnsureElementsMode mode);
+  MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
+      FixedArrayBase* elements,
+      EnsureElementsMode mode);
+  MUST_USE_RESULT MaybeObject* EnsureCanContainElements(
+      Arguments* arguments,
+      uint32_t first_arg,
+      uint32_t arg_count,
+      EnsureElementsMode mode);
 
   // Do we want to keep the elements in fast case when increasing the
   // capacity?
@@ -1707,11 +1732,11 @@
   // elements.
   bool ShouldConvertToFastElements();
   // Returns true if the elements of JSObject contains only values that can be
-  // represented in a FixedDoubleArray.
-  bool CanConvertToFastDoubleElements();
+  // represented in a FixedDoubleArray and has at least one value that can only
+  // be represented as a double and not a Smi.
+  bool ShouldConvertToFastDoubleElements(bool* has_smi_only_elements);
 
   // Tells whether the index'th element is present.
-  inline bool HasElement(uint32_t index);
   bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
 
   // Computes the new capacity when expanding the elements of a JSObject.
@@ -1741,16 +1766,19 @@
   LocalElementType HasLocalElement(uint32_t index);
 
   bool HasElementWithInterceptor(JSReceiver* receiver, uint32_t index);
-  bool HasElementPostInterceptor(JSReceiver* receiver, uint32_t index);
 
   MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
                                               Object* value,
                                               StrictModeFlag strict_mode,
                                               bool check_prototype);
-  MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
-                                                    Object* value,
-                                                    StrictModeFlag strict_mode,
-                                                    bool check_prototype);
+
+  MUST_USE_RESULT MaybeObject* SetDictionaryElement(
+      uint32_t index,
+      Object* value,
+      PropertyAttributes attributes,
+      StrictModeFlag strict_mode,
+      bool check_prototype,
+      SetPropertyMode set_mode = SET_PROPERTY);
 
   MUST_USE_RESULT MaybeObject* SetFastDoubleElement(
       uint32_t index,
@@ -1758,26 +1786,50 @@
       StrictModeFlag strict_mode,
       bool check_prototype = true);
 
-  // Set the index'th array element.
+  static Handle<Object> SetOwnElement(Handle<JSObject> object,
+                                      uint32_t index,
+                                      Handle<Object> value,
+                                      StrictModeFlag strict_mode);
+
+  // Empty handle is returned if the element cannot be set to the given value.
+  static MUST_USE_RESULT Handle<Object> SetElement(
+      Handle<JSObject> object,
+      uint32_t index,
+      Handle<Object> value,
+      PropertyAttributes attr,
+      StrictModeFlag strict_mode,
+      SetPropertyMode set_mode = SET_PROPERTY);
+
   // A Failure object is returned if GC is needed.
-  MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
-                                          Object* value,
-                                          StrictModeFlag strict_mode,
-                                          bool check_prototype);
+  MUST_USE_RESULT MaybeObject* SetElement(
+      uint32_t index,
+      Object* value,
+      PropertyAttributes attributes,
+      StrictModeFlag strict_mode,
+      bool check_prototype = true,
+      SetPropertyMode set_mode = SET_PROPERTY);
 
   // Returns the index'th element.
   // The undefined object if index is out of bounds.
-  MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
+  MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver,
+                                                         uint32_t index);
+
+  enum SetFastElementsCapacityMode {
+    kAllowSmiOnlyElements,
+    kForceSmiOnlyElements,
+    kDontAllowSmiOnlyElements
+  };
 
   // Replace the elements' backing store with fast elements of the given
   // capacity.  Update the length for JSArrays.  Returns the new backing
   // store.
-  MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
-                                                                int length);
+  MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
+      int capacity,
+      int length,
+      SetFastElementsCapacityMode set_capacity_mode);
   MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
       int capacity,
       int length);
-  MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);
 
   // Lookup interceptors are used for handling properties controlled by host
   // objects.
@@ -1789,9 +1841,6 @@
   bool HasRealElementProperty(uint32_t index);
   bool HasRealNamedCallbackProperty(String* key);
 
-  // Initializes the array to a certain length
-  MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
-
   // Get the header size for a JSObject.  Used to compute the index of
   // internal fields as well as the number of internal fields.
   inline int GetHeaderSize();
@@ -1800,10 +1849,7 @@
   inline int GetInternalFieldOffset(int index);
   inline Object* GetInternalField(int index);
   inline void SetInternalField(int index, Object* value);
-
-  // Lookup a property.  If found, the result is valid and has
-  // detailed information.
-  void LocalLookup(String* name, LookupResult* result);
+  inline void SetInternalField(int index, Smi* value);
 
   // The following lookup functions skip interceptors.
   void LocalLookupRealNamedProperty(String* name, LookupResult* result);
@@ -1816,9 +1862,7 @@
 
   // Returns the number of properties on this object filtering out properties
   // with the specified attributes (ignoring interceptors).
-  int NumberOfLocalProperties(PropertyAttributes filter);
-  // Returns the number of enumerable properties (ignoring interceptors).
-  int NumberOfEnumProperties();
+  int NumberOfLocalProperties(PropertyAttributes filter = NONE);
   // Fill in details for properties into storage starting at the specified
   // index.
   void GetLocalPropertyNames(FixedArray* storage, int index);
@@ -1860,6 +1904,21 @@
       Object* value,
       PropertyAttributes attributes);
 
+  // Returns a new map with all transitions dropped from the object's current
+  // map and the ElementsKind set.
+  static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
+                                              ElementsKind to_kind);
+  inline MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
+      Isolate* isolate,
+      ElementsKind elements_kind);
+  MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow(
+      ElementsKind elements_kind);
+
+  static Handle<Object> TransitionElementsKind(Handle<JSObject> object,
+                                               ElementsKind to_kind);
+
+  MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
+
   // Converts a descriptor of any other type to a real field,
   // backed by the properties array.  Descriptors of visible
   // types, such as CONSTANT_FUNCTION, keep their enumeration order.
@@ -1898,16 +1957,32 @@
   // representation. If the object is expected to have additional properties
   // added this number can be indicated to have the backing store allocated to
   // an initial capacity for holding these properties.
+  static void NormalizeProperties(Handle<JSObject> object,
+                                  PropertyNormalizationMode mode,
+                                  int expected_additional_properties);
+
   MUST_USE_RESULT MaybeObject* NormalizeProperties(
       PropertyNormalizationMode mode,
       int expected_additional_properties);
 
+  // Convert and update the elements backing store to be a
+  // SeededNumberDictionary dictionary.  Returns the backing after conversion.
+  static Handle<SeededNumberDictionary> NormalizeElements(
+      Handle<JSObject> object);
+
   MUST_USE_RESULT MaybeObject* NormalizeElements();
 
+  static void UpdateMapCodeCache(Handle<JSObject> object,
+                                 Handle<String> name,
+                                 Handle<Code> code);
+
   MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code);
 
   // Transform slow named properties to fast variants.
   // Returns failure if allocation failed.
+  static void TransformToFastProperties(Handle<JSObject> object,
+                                        int unused_property_fields);
+
   MUST_USE_RESULT MaybeObject* TransformToFastProperties(
       int unused_property_fields);
 
@@ -1923,11 +1998,14 @@
                                        WriteBarrierMode mode
                                        = UPDATE_WRITE_BARRIER);
 
-  // initializes the body after properties slot, properties slot is
-  // initialized by set_properties
-  // Note: this call does not update write barrier, it is caller's
-  // reponsibility to ensure that *v* can be collected without WB here.
-  inline void InitializeBody(int object_size, Object* value);
+  // Initializes the body after properties slot, properties slot is
+  // initialized by set_properties.  Fill the pre-allocated fields with
+  // pre_allocated_value and the rest with filler_value.
+  // Note: this call does not update write barrier, the caller is responsible
+  // to ensure that |filler_value| can be collected without WB here.
+  inline void InitializeBody(Map* map,
+                             Object* pre_allocated_value,
+                             Object* filler_value);
 
   // Check whether this object references another object
   bool ReferencesObject(Object* obj);
@@ -1936,6 +2014,7 @@
   static inline JSObject* cast(Object* obj);
 
   // Disalow further properties to be added to the object.
+  static Handle<Object> PreventExtensions(Handle<JSObject> object);
   MUST_USE_RESULT MaybeObject* PreventExtensions();
 
 
@@ -1962,6 +2041,10 @@
   void PrintElements(FILE* out);
 #endif
 
+  void PrintElementsTransition(
+      FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
+      ElementsKind to_kind, FixedArrayBase* to_elements);
+
 #ifdef DEBUG
   // Structure for collecting spill information about JSObjects.
   class SpillInformation {
@@ -2036,21 +2119,38 @@
                                                       Object* structure,
                                                       uint32_t index,
                                                       Object* holder);
-  MaybeObject* SetElementWithCallback(Object* structure,
-                                      uint32_t index,
-                                      Object* value,
-                                      JSObject* holder,
-                                      StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* SetElementWithCallback(
+      Object* structure,
+      uint32_t index,
+      Object* value,
+      JSObject* holder,
+      StrictModeFlag strict_mode);
   MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
       uint32_t index,
       Object* value,
+      PropertyAttributes attributes,
       StrictModeFlag strict_mode,
-      bool check_prototype);
+      bool check_prototype,
+      SetPropertyMode set_mode);
   MUST_USE_RESULT MaybeObject* SetElementWithoutInterceptor(
       uint32_t index,
       Object* value,
+      PropertyAttributes attributes,
       StrictModeFlag strict_mode,
-      bool check_prototype);
+      bool check_prototype,
+      SetPropertyMode set_mode);
+
+  // Searches the prototype chain for a callback setter and sets the property
+  // with the setter if it finds one. The '*found' flag indicates whether
+  // a setter was found or not.
+  // This function can cause GC and can return a failure result with
+  // '*found==true'.
+  MUST_USE_RESULT MaybeObject* SetPropertyWithCallbackSetterInPrototypes(
+      String* name,
+      Object* value,
+      PropertyAttributes attributes,
+      bool* found,
+      StrictModeFlag strict_mode);
 
   MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
                                                              DeleteMode mode);
@@ -2065,9 +2165,6 @@
   bool ReferencesObjectFromElements(FixedArray* elements,
                                     ElementsKind kind,
                                     Object* object);
-  bool HasElementInElements(FixedArray* elements,
-                            ElementsKind kind,
-                            uint32_t index);
 
   // Returns true if most of the elements backing storage is used.
   bool HasDenseElements();
@@ -2084,12 +2181,29 @@
       String* name,
       Object* structure,
       PropertyAttributes attributes);
-  MUST_USE_RESULT MaybeObject* DefineGetterSetter(
-      String* name,
+  MUST_USE_RESULT MaybeObject* DefineElementAccessor(
+      uint32_t index,
+      Object* getter,
+      Object* setter,
       PropertyAttributes attributes);
-
+  MUST_USE_RESULT MaybeObject* DefinePropertyAccessor(
+      String* name,
+      Object* getter,
+      Object* setter,
+      PropertyAttributes attributes);
   void LookupInDescriptor(String* name, LookupResult* result);
 
+  // Returns the hidden properties backing store object, currently
+  // a StringDictionary, stored on this object.
+  // If no hidden properties object has been put on this object,
+  // return undefined, unless create_if_absent is true, in which case
+  // a new dictionary is created, added to this object, and returned.
+  MUST_USE_RESULT MaybeObject* GetHiddenPropertiesDictionary(
+      bool create_if_absent);
+  // Updates the existing hidden properties dictionary.
+  MUST_USE_RESULT MaybeObject* SetHiddenPropertiesDictionary(
+      StringDictionary* dictionary);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
 };
 
@@ -2145,6 +2259,9 @@
   // Gives access to raw memory which stores the array's data.
   inline Object** data_start();
 
+  inline Object** GetFirstElementAddress();
+  inline bool ContainsOnlySmisOrHoles();
+
   // Copy operations.
   MUST_USE_RESULT inline MaybeObject* Copy();
   MUST_USE_RESULT MaybeObject* CopySize(int new_length);
@@ -2207,7 +2324,16 @@
  protected:
   // Set operation on FixedArray without using write barriers. Can
   // only be used for storing old space objects or smis.
-  static inline void fast_set(FixedArray* array, int index, Object* value);
+  static inline void NoWriteBarrierSet(FixedArray* array,
+                                       int index,
+                                       Object* value);
+
+  // Set operation on FixedArray without incremental write barrier. Can
+  // only be used if the object is guaranteed to be white (whiteness witness
+  // is present).
+  static inline void NoIncrementalWriteBarrierSet(FixedArray* array,
+                                                  int index,
+                                                  Object* value);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
@@ -2217,19 +2343,19 @@
 // FixedDoubleArray describes fixed-sized arrays with element type double.
 class FixedDoubleArray: public FixedArrayBase {
  public:
-  inline void Initialize(FixedArray* from);
-  inline void Initialize(FixedDoubleArray* from);
-  inline void Initialize(SeededNumberDictionary* from);
-
   // Setter and getter for elements.
   inline double get_scalar(int index);
-  inline MaybeObject* get(int index);
+  inline int64_t get_representation(int index);
+  MUST_USE_RESULT inline MaybeObject* get(int index);
   inline void set(int index, double value);
   inline void set_the_hole(int index);
 
   // Checking for the hole.
   inline bool is_the_hole(int index);
 
+  // Copy operations
+  MUST_USE_RESULT inline MaybeObject* Copy();
+
   // Garbage collection support.
   inline static int SizeFor(int length) {
     return kHeaderSize + length * kDoubleSize;
@@ -2269,6 +2395,9 @@
 };
 
 
+class IncrementalMarking;
+
+
 // DescriptorArrays are fixed arrays used to hold instance descriptors.
 // The format of the these objects is:
 // TODO(1399): It should be possible to make room for bit_field3 in the map
@@ -2310,7 +2439,7 @@
   // Set next enumeration index and flush any enum cache.
   void SetNextEnumerationIndex(int value) {
     if (!IsEmpty()) {
-      fast_set(this, kEnumerationIndexIndex, Smi::FromInt(value));
+      set(kEnumerationIndexIndex, Smi::FromInt(value));
     }
   }
   bool HasEnumCache() {
@@ -2331,7 +2460,9 @@
 
   // Initialize or change the enum cache,
   // using the supplied storage for the small "bridge".
-  void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache);
+  void SetEnumCache(FixedArray* bridge_storage,
+                    FixedArray* new_cache,
+                    Object* new_index_cache);
 
   // Accessors for fetching instance descriptor at descriptor number.
   inline String* GetKey(int descriptor_number);
@@ -2343,17 +2474,39 @@
   inline Object* GetCallbacksObject(int descriptor_number);
   inline AccessorDescriptor* GetCallbacks(int descriptor_number);
   inline bool IsProperty(int descriptor_number);
-  inline bool IsTransition(int descriptor_number);
+  inline bool IsTransitionOnly(int descriptor_number);
   inline bool IsNullDescriptor(int descriptor_number);
   inline bool IsDontEnum(int descriptor_number);
 
+  class WhitenessWitness {
+   public:
+    inline explicit WhitenessWitness(DescriptorArray* array);
+    inline ~WhitenessWitness();
+
+   private:
+    IncrementalMarking* marking_;
+  };
+
   // Accessor for complete descriptor.
   inline void Get(int descriptor_number, Descriptor* desc);
-  inline void Set(int descriptor_number, Descriptor* desc);
+  inline void Set(int descriptor_number,
+                  Descriptor* desc,
+                  const WhitenessWitness&);
 
-  // Transfer complete descriptor from another descriptor array to
-  // this one.
-  inline void CopyFrom(int index, DescriptorArray* src, int src_index);
+  // Transfer a complete descriptor from the src descriptor array to the dst
+  // one, dropping map transitions in CALLBACKS.
+  static void CopyFrom(Handle<DescriptorArray> dst,
+                       int dst_index,
+                       Handle<DescriptorArray> src,
+                       int src_index,
+                       const WhitenessWitness& witness);
+
+  // Transfer a complete descriptor from the src descriptor array to this
+  // descriptor array, dropping map transitions in CALLBACKS.
+  MUST_USE_RESULT MaybeObject* CopyFrom(int dst_index,
+                                        DescriptorArray* src,
+                                        int src_index,
+                                        const WhitenessWitness&);
 
   // Copy the descriptor array, insert a new descriptor and optionally
   // remove map transitions.  If the descriptor is already present, it is
@@ -2364,17 +2517,17 @@
   MUST_USE_RESULT MaybeObject* CopyInsert(Descriptor* descriptor,
                                           TransitionFlag transition_flag);
 
-  // Remove all transitions.  Return  a copy of the array with all transitions
-  // removed, or a Failure object if the new array could not be allocated.
+  // Return a copy of the array with all transitions and null descriptors
+  // removed. Return a Failure object in case of an allocation failure.
   MUST_USE_RESULT MaybeObject* RemoveTransitions();
 
   // Sort the instance descriptors by the hash codes of their keys.
   // Does not check for duplicates.
-  void SortUnchecked();
+  void SortUnchecked(const WhitenessWitness&);
 
   // Sort the instance descriptors by the hash codes of their keys.
   // Checks the result for duplicates.
-  void Sort();
+  void Sort(const WhitenessWitness&);
 
   // Search the instance descriptors for given name.
   inline int Search(String* name);
@@ -2413,9 +2566,10 @@
   static const int kFirstIndex = 3;
 
   // The length of the "bridge" to the enum cache.
-  static const int kEnumCacheBridgeLength = 2;
+  static const int kEnumCacheBridgeLength = 3;
   static const int kEnumCacheBridgeEnumIndex = 0;
   static const int kEnumCacheBridgeCacheIndex = 1;
+  static const int kEnumCacheBridgeIndicesCacheIndex = 2;
 
   // Layout description.
   static const int kBitField3StorageOffset = FixedArray::kHeaderSize;
@@ -2449,6 +2603,20 @@
   static const int kMaxNumberOfDescriptors = 1024 + 512;
 
  private:
+  // An entry in a DescriptorArray, represented as an (array, index) pair.
+  class Entry {
+   public:
+    inline explicit Entry(DescriptorArray* descs, int index) :
+        descs_(descs), index_(index) { }
+
+    inline PropertyType type() { return descs_->GetType(index_); }
+    inline Object* GetCallbackObject() { return descs_->GetValue(index_); }
+
+   private:
+    DescriptorArray* descs_;
+    int index_;
+  };
+
   // Conversion from descriptor number to array indices.
   static int ToKeyIndex(int descriptor_number) {
     return descriptor_number+kFirstIndex;
@@ -2467,10 +2635,12 @@
         NULL_DESCRIPTOR;
   }
   // Swap operation on FixedArray without using write barriers.
-  static inline void fast_swap(FixedArray* array, int first, int second);
+  static inline void NoIncrementalWriteBarrierSwap(
+      FixedArray* array, int first, int second);
 
   // Swap descriptor first and second.
-  inline void Swap(int first, int second);
+  inline void NoIncrementalWriteBarrierSwapDescriptors(
+      int first, int second);
 
   FixedArray* GetContentArray() {
     return FixedArray::cast(get(kContentArrayIndex));
@@ -2488,7 +2658,7 @@
 // encountered and stops when unused elements are encountered.
 //
 // - Elements with key == undefined have not been used yet.
-// - Elements with key == null have been deleted.
+// - Elements with key == the_hole have been deleted.
 //
 // The hash table class is parameterized with a Shape and a Key.
 // Shape must be a class with the following interface:
@@ -2523,7 +2693,7 @@
   }
   static uint32_t HashForObject(Key key, Object* object) { return 0; }
   static uint32_t SeededHashForObject(Key key, uint32_t seed, Object* object) {
-    // Won't be called if UsesSeed isn't overridden by child class.
+    ASSERT(UsesSeed);
     return HashForObject(key, object);
   }
 };
@@ -2534,7 +2704,8 @@
   // Wrapper methods
   inline uint32_t Hash(Key key) {
     if (Shape::UsesSeed) {
-      return Shape::SeededHash(key, GetHeap()->HashSeed());
+      return Shape::SeededHash(key,
+          GetHeap()->HashSeed());
     } else {
       return Shape::Hash(key);
     }
@@ -2542,7 +2713,8 @@
 
   inline uint32_t HashForObject(Key key, Object* object) {
     if (Shape::UsesSeed) {
-      return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object);
+      return Shape::SeededHashForObject(key,
+          GetHeap()->HashSeed(), object);
     } else {
       return Shape::HashForObject(key, object);
     }
@@ -2590,10 +2762,10 @@
   // Returns the key at entry.
   Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
 
-  // Tells whether k is a real key.  Null and undefined are not allowed
+  // Tells whether k is a real key.  The hole and undefined are not allowed
   // as keys and can be used to indicate missing or deleted elements.
   bool IsKey(Object* k) {
-    return !k->IsNull() && !k->IsUndefined();
+    return !k->IsTheHole() && !k->IsUndefined();
   }
 
   // Garbage collection support.
@@ -2645,12 +2817,12 @@
 
   // Update the number of elements in the hash table.
   void SetNumberOfElements(int nof) {
-    fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
+    set(kNumberOfElementsIndex, Smi::FromInt(nof));
   }
 
   // Update the number of deleted elements in the hash table.
   void SetNumberOfDeletedElements(int nod) {
-    fast_set(this, kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
+    set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
   }
 
   // Sets the capacity of the hash table.
@@ -2660,7 +2832,7 @@
     // and non-zero.
     ASSERT(capacity > 0);
     ASSERT(capacity <= kMaxCapacity);
-    fast_set(this, kCapacityIndex, Smi::FromInt(capacity));
+    set(kCapacityIndex, Smi::FromInt(capacity));
   }
 
 
@@ -2812,22 +2984,12 @@
 
   // Returns the value at entry.
   Object* ValueAt(int entry) {
-    return this->get(HashTable<Shape, Key>::EntryToIndex(entry)+1);
+    return this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 1);
   }
 
   // Set the value for entry.
-  // Returns false if the put wasn't performed due to property being read only.
-  // Returns true on successful put.
-  bool ValueAtPut(int entry, Object* value) {
-    // Check that this value can actually be written.
-    PropertyDetails details = DetailsAt(entry);
-    // If a value has not been initilized we allow writing to it even if
-    // it is read only (a declared const that has not been initialized).
-    if (details.IsReadOnly() && !ValueAt(entry)->IsTheHole()) {
-      return false;
-    }
+  void ValueAtPut(int entry, Object* value) {
     this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 1, value);
-    return true;
   }
 
   // Returns the property details for the property at entry.
@@ -2868,7 +3030,7 @@
 
   // Accessors for next enumeration index.
   void SetNextEnumerationIndex(int index) {
-    this->fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
+    this->set(kNextEnumerationIndexIndex, Smi::FromInt(index));
   }
 
   int NextEnumerationIndex() {
@@ -2948,9 +3110,11 @@
       JSObject* obj,
       int unused_property_fields);
 
-  // Find entry for key otherwise return kNotFound. Optimzed version of
+  // Find entry for key, otherwise return kNotFound. Optimized version of
   // HashTable::FindEntry.
   int FindEntry(String* key);
+
+  bool ContainsTransition(int entry);
 };
 
 
@@ -2999,6 +3163,13 @@
                                               PropertyDetails details);
 
   // Set an existing entry or add a new one if needed.
+  // Return the updated dictionary.
+  MUST_USE_RESULT static Handle<SeededNumberDictionary> Set(
+      Handle<SeededNumberDictionary> dictionary,
+      uint32_t index,
+      Handle<Object> value,
+      PropertyDetails details);
+
   MUST_USE_RESULT MaybeObject* Set(uint32_t key,
                                    Object* value,
                                    PropertyDetails details);
@@ -3018,9 +3189,6 @@
   // requires_slow_elements returns false.
   inline uint32_t max_number_key();
 
-  // Remove all entries were key is a number and (from <= key && key < to).
-  void RemoveNumberEntries(uint32_t from, uint32_t to);
-
   // Bit masks.
   static const int kRequiresSlowElementsMask = 1;
   static const int kRequiresSlowElementsTagSize = 1;
@@ -3041,24 +3209,51 @@
   MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key, Object* value);
 
   // Set an existing entry or add a new one if needed.
+  // Return the updated dictionary.
+  MUST_USE_RESULT static Handle<UnseededNumberDictionary> Set(
+      Handle<UnseededNumberDictionary> dictionary,
+      uint32_t index,
+      Handle<Object> value);
+
   MUST_USE_RESULT MaybeObject* Set(uint32_t key, Object* value);
 };
 
 
+template <int entrysize>
 class ObjectHashTableShape : public BaseShape<Object*> {
  public:
-  static inline bool IsMatch(JSObject* key, Object* other);
-  static inline uint32_t Hash(JSObject* key);
-  static inline uint32_t HashForObject(JSObject* key, Object* object);
-  MUST_USE_RESULT static inline MaybeObject* AsObject(JSObject* key);
+  static inline bool IsMatch(Object* key, Object* other);
+  static inline uint32_t Hash(Object* key);
+  static inline uint32_t HashForObject(Object* key, Object* object);
+  MUST_USE_RESULT static inline MaybeObject* AsObject(Object* key);
   static const int kPrefixSize = 0;
-  static const int kEntrySize = 2;
+  static const int kEntrySize = entrysize;
 };
 
 
-// ObjectHashTable maps keys that are JavaScript objects to object values by
+// ObjectHashSet holds keys that are arbitrary objects by using the identity
+// hash of the key for hashing purposes.
+class ObjectHashSet: public HashTable<ObjectHashTableShape<1>, Object*> {
+ public:
+  static inline ObjectHashSet* cast(Object* obj) {
+    ASSERT(obj->IsHashTable());
+    return reinterpret_cast<ObjectHashSet*>(obj);
+  }
+
+  // Looks up whether the given key is part of this hash set.
+  bool Contains(Object* key);
+
+  // Adds the given key to this hash set.
+  MUST_USE_RESULT MaybeObject* Add(Object* key);
+
+  // Removes the given key from this hash set.
+  MUST_USE_RESULT MaybeObject* Remove(Object* key);
+};
+
+
+// ObjectHashTable maps keys that are arbitrary objects to object values by
 // using the identity hash of the key for hashing purposes.
-class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> {
+class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
  public:
   static inline ObjectHashTable* cast(Object* obj) {
     ASSERT(obj->IsHashTable());
@@ -3067,18 +3262,17 @@
 
   // Looks up the value associated with the given key. The undefined value is
   // returned in case the key is not present.
-  Object* Lookup(JSObject* key);
+  Object* Lookup(Object* key);
 
   // Adds (or overwrites) the value associated with the given key. Mapping a
   // key to the undefined value causes removal of the whole entry.
-  MUST_USE_RESULT MaybeObject* Put(JSObject* key, Object* value);
+  MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
 
  private:
   friend class MarkCompactCollector;
 
-  void AddEntry(int entry, JSObject* key, Object* value);
-  void RemoveEntry(int entry, Heap* heap);
-  inline void RemoveEntry(int entry);
+  void AddEntry(int entry, Object* key, Object* value);
+  void RemoveEntry(int entry);
 
   // Returns the index to the value of an entry.
   static inline int EntryToValueIndex(int entry) {
@@ -3125,6 +3319,207 @@
 };
 
 
+// ScopeInfo represents information about different scopes of a source
+// program  and the allocation of the scope's variables. Scope information
+// is stored in a compressed form in ScopeInfo objects and is used
+// at runtime (stack dumps, deoptimization, etc.).
+
+// This object provides quick access to scope info details for runtime
+// routines.
+class ScopeInfo : public FixedArray {
+ public:
+  static inline ScopeInfo* cast(Object* object);
+
+  // Return the type of this scope.
+  ScopeType Type();
+
+  // Does this scope call eval?
+  bool CallsEval();
+
+  // Return the language mode of this scope.
+  LanguageMode language_mode();
+
+  // Does this scope make a non-strict eval call?
+  bool CallsNonStrictEval() {
+    return CallsEval() && (language_mode() == CLASSIC_MODE);
+  }
+
+  // Return the total number of locals allocated on the stack and in the
+  // context. This includes the parameters that are allocated in the context.
+  int LocalCount();
+
+  // Return the number of stack slots for code. This number consists of two
+  // parts:
+  //  1. One stack slot per stack allocated local.
+  //  2. One stack slot for the function name if it is stack allocated.
+  int StackSlotCount();
+
+  // Return the number of context slots for code if a context is allocated. This
+  // number consists of three parts:
+  //  1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
+  //  2. One context slot per context allocated local.
+  //  3. One context slot for the function name if it is context allocated.
+  // Parameters allocated in the context count as context allocated locals. If
+  // no contexts are allocated for this scope ContextLength returns 0.
+  int ContextLength();
+
+  // Is this scope the scope of a named function expression?
+  bool HasFunctionName();
+
+  // Return if this has context allocated locals.
+  bool HasHeapAllocatedLocals();
+
+  // Return if contexts are allocated for this scope.
+  bool HasContext();
+
+  // Return the function_name if present.
+  String* FunctionName();
+
+  // Return the name of the given parameter.
+  String* ParameterName(int var);
+
+  // Return the name of the given local.
+  String* LocalName(int var);
+
+  // Return the name of the given stack local.
+  String* StackLocalName(int var);
+
+  // Return the name of the given context local.
+  String* ContextLocalName(int var);
+
+  // Return the mode of the given context local.
+  VariableMode ContextLocalMode(int var);
+
+  // Return the initialization flag of the given context local.
+  InitializationFlag ContextLocalInitFlag(int var);
+
+  // Lookup support for serialized scope info. Returns the
+  // the stack slot index for a given slot name if the slot is
+  // present; otherwise returns a value < 0. The name must be a symbol
+  // (canonicalized).
+  int StackSlotIndex(String* name);
+
+  // Lookup support for serialized scope info. Returns the
+  // context slot index for a given slot name if the slot is present; otherwise
+  // returns a value < 0. The name must be a symbol (canonicalized).
+  // If the slot is present and mode != NULL, sets *mode to the corresponding
+  // mode for that variable.
+  int ContextSlotIndex(String* name,
+                       VariableMode* mode,
+                       InitializationFlag* init_flag);
+
+  // Lookup support for serialized scope info. Returns the
+  // parameter index for a given parameter name if the parameter is present;
+  // otherwise returns a value < 0. The name must be a symbol (canonicalized).
+  int ParameterIndex(String* name);
+
+  // Lookup support for serialized scope info. Returns the
+  // function context slot index if the function name is present (named
+  // function expressions, only), otherwise returns a value < 0. The name
+  // must be a symbol (canonicalized).
+  int FunctionContextSlotIndex(String* name, VariableMode* mode);
+
+  static Handle<ScopeInfo> Create(Scope* scope);
+
+  // Serializes empty scope info.
+  static ScopeInfo* Empty();
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+  // The layout of the static part of a ScopeInfo is as follows. Each entry is
+  // numeric and occupies one array slot.
+  // 1. A set of properties of the scope
+  // 2. The number of parameters. This only applies to function scopes. For
+  //    non-function scopes this is 0.
+  // 3. The number of non-parameter variables allocated on the stack.
+  // 4. The number of non-parameter and parameter variables allocated in the
+  //    context.
+#define FOR_EACH_NUMERIC_FIELD(V)          \
+  V(Flags)                                 \
+  V(ParameterCount)                        \
+  V(StackLocalCount)                       \
+  V(ContextLocalCount)
+
+#define FIELD_ACCESSORS(name)                            \
+  void Set##name(int value) {                            \
+    set(k##name, Smi::FromInt(value));                   \
+  }                                                      \
+  int name() {                                           \
+    if (length() > 0) {                                  \
+      return Smi::cast(get(k##name))->value();           \
+    } else {                                             \
+      return 0;                                          \
+    }                                                    \
+  }
+  FOR_EACH_NUMERIC_FIELD(FIELD_ACCESSORS)
+#undef FIELD_ACCESSORS
+
+ private:
+  enum {
+#define DECL_INDEX(name) k##name,
+  FOR_EACH_NUMERIC_FIELD(DECL_INDEX)
+#undef DECL_INDEX
+#undef FOR_EACH_NUMERIC_FIELD
+  kVariablePartIndex
+  };
+
+  // The layout of the variable part of a ScopeInfo is as follows:
+  // 1. ParameterEntries:
+  //    This part stores the names of the parameters for function scopes. One
+  //    slot is used per parameter, so in total this part occupies
+  //    ParameterCount() slots in the array. For other scopes than function
+  //    scopes ParameterCount() is 0.
+  // 2. StackLocalEntries:
+  //    Contains the names of local variables that are allocated on the stack,
+  //    in increasing order of the stack slot index. One slot is used per stack
+  //    local, so in total this part occupies StackLocalCount() slots in the
+  //    array.
+  // 3. ContextLocalNameEntries:
+  //    Contains the names of local variables and parameters that are allocated
+  //    in the context. They are stored in increasing order of the context slot
+  //    index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
+  //    context local, so in total this part occupies ContextLocalCount() slots
+  //    in the array.
+  // 4. ContextLocalInfoEntries:
+  //    Contains the variable modes and initialization flags corresponding to
+  //    the context locals in ContextLocalNameEntries. One slot is used per
+  //    context local, so in total this part occupies ContextLocalCount()
+  //    slots in the array.
+  // 5. FunctionNameEntryIndex:
+  //    If the scope belongs to a named function expression this part contains
+  //    information about the function variable. It always occupies two array
+  //    slots:  a. The name of the function variable.
+  //            b. The context or stack slot index for the variable.
+  int ParameterEntriesIndex();
+  int StackLocalEntriesIndex();
+  int ContextLocalNameEntriesIndex();
+  int ContextLocalInfoEntriesIndex();
+  int FunctionNameEntryIndex();
+
+  // Location of the function variable for named function expressions.
+  enum FunctionVariableInfo {
+    NONE,     // No function name present.
+    STACK,    // Function
+    CONTEXT,
+    UNUSED
+  };
+
+  // Properties of scopes.
+  class TypeField:             public BitField<ScopeType,            0, 3> {};
+  class CallsEvalField:        public BitField<bool,                 3, 1> {};
+  class LanguageModeField:     public BitField<LanguageMode,         4, 2> {};
+  class FunctionVariableField: public BitField<FunctionVariableInfo, 6, 2> {};
+  class FunctionVariableMode:  public BitField<VariableMode,         8, 3> {};
+
+  // BitFields representing the encoded information for context locals in the
+  // ContextLocalInfoEntries part.
+  class ContextLocalMode:      public BitField<VariableMode,         0, 3> {};
+  class ContextLocalInitFlag:  public BitField<InitializationFlag,   3, 1> {};
+};
+
+
 // The cache for maps used by normalized (dictionary mode) objects.
 // Such maps do not have property descriptors, so a typical program
 // needs very limited number of distinct normalized maps.
@@ -3146,11 +3541,12 @@
 };
 
 
-// ByteArray represents fixed sized byte arrays.  Used by the outside world,
-// such as PCRE, and also by the memory allocator and garbage collector to
-// fill in free blocks in the heap.
+// ByteArray represents fixed sized byte arrays.  Used for the relocation info
+// that is attached to code objects.
 class ByteArray: public FixedArrayBase {
  public:
+  inline int Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
+
   // Setter and getter.
   inline byte get(int index);
   inline void set(int index, byte value);
@@ -3207,6 +3603,41 @@
 };
 
 
+// FreeSpace represents fixed sized areas of the heap that are not currently in
+// use.  Used by the heap and GC.
+class FreeSpace: public HeapObject {
+ public:
+  // [size]: size of the free space including the header.
+  inline int size();
+  inline void set_size(int value);
+
+  inline int Size() { return size(); }
+
+  // Casting.
+  static inline FreeSpace* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void FreeSpacePrint() {
+    FreeSpacePrint(stdout);
+  }
+  void FreeSpacePrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void FreeSpaceVerify();
+#endif
+
+  // Layout description.
+  // Size is smi tagged when it is stored.
+  static const int kSizeOffset = HeapObject::kHeaderSize;
+  static const int kHeaderSize = kSizeOffset + kPointerSize;
+
+  static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
+};
+
+
 // An ExternalArray represents a fixed-size array of primitive values
 // which live outside the JavaScript heap. Its subclasses are used to
 // implement the CanvasArray types being defined in the WebGL
@@ -3257,7 +3688,7 @@
 
   // Setter and getter.
   inline uint8_t get_scalar(int index);
-  inline MaybeObject* get(int index);
+  MUST_USE_RESULT inline MaybeObject* get(int index);
   inline void set(int index, uint8_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber and
@@ -3286,12 +3717,12 @@
  public:
   // Setter and getter.
   inline int8_t get_scalar(int index);
-  inline MaybeObject* get(int index);
+  MUST_USE_RESULT inline MaybeObject* get(int index);
   inline void set(int index, int8_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MaybeObject* SetValue(uint32_t index, Object* value);
+  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalByteArray* cast(Object* obj);
@@ -3315,12 +3746,12 @@
  public:
   // Setter and getter.
   inline uint8_t get_scalar(int index);
-  inline MaybeObject* get(int index);
+  MUST_USE_RESULT inline MaybeObject* get(int index);
   inline void set(int index, uint8_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MaybeObject* SetValue(uint32_t index, Object* value);
+  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalUnsignedByteArray* cast(Object* obj);
@@ -3344,12 +3775,12 @@
  public:
   // Setter and getter.
   inline int16_t get_scalar(int index);
-  inline MaybeObject* get(int index);
+  MUST_USE_RESULT inline MaybeObject* get(int index);
   inline void set(int index, int16_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MaybeObject* SetValue(uint32_t index, Object* value);
+  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalShortArray* cast(Object* obj);
@@ -3373,12 +3804,12 @@
  public:
   // Setter and getter.
   inline uint16_t get_scalar(int index);
-  inline MaybeObject* get(int index);
+  MUST_USE_RESULT inline MaybeObject* get(int index);
   inline void set(int index, uint16_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MaybeObject* SetValue(uint32_t index, Object* value);
+  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalUnsignedShortArray* cast(Object* obj);
@@ -3402,12 +3833,12 @@
  public:
   // Setter and getter.
   inline int32_t get_scalar(int index);
-  inline MaybeObject* get(int index);
+  MUST_USE_RESULT inline MaybeObject* get(int index);
   inline void set(int index, int32_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MaybeObject* SetValue(uint32_t index, Object* value);
+  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalIntArray* cast(Object* obj);
@@ -3431,12 +3862,12 @@
  public:
   // Setter and getter.
   inline uint32_t get_scalar(int index);
-  inline MaybeObject* get(int index);
+  MUST_USE_RESULT inline MaybeObject* get(int index);
   inline void set(int index, uint32_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MaybeObject* SetValue(uint32_t index, Object* value);
+  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalUnsignedIntArray* cast(Object* obj);
@@ -3460,12 +3891,12 @@
  public:
   // Setter and getter.
   inline float get_scalar(int index);
-  inline MaybeObject* get(int index);
+  MUST_USE_RESULT inline MaybeObject* get(int index);
   inline void set(int index, float value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MaybeObject* SetValue(uint32_t index, Object* value);
+  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalFloatArray* cast(Object* obj);
@@ -3489,12 +3920,12 @@
  public:
   // Setter and getter.
   inline double get_scalar(int index);
-  inline MaybeObject* get(int index);
+  MUST_USE_RESULT inline MaybeObject* get(int index);
   inline void set(int index, double value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MaybeObject* SetValue(uint32_t index, Object* value);
+  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalDoubleArray* cast(Object* obj);
@@ -3553,11 +3984,6 @@
   DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
   DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
 
-  // Unchecked accessor to be used during GC.
-  FixedArray* UncheckedLiteralArray() {
-    return reinterpret_cast<FixedArray*>(get(kLiteralArrayIndex));
-  }
-
 #undef DEFINE_ELEMENT_ACCESSORS
 
   // Accessors for elements of the ith deoptimization entry.
@@ -3632,8 +4058,48 @@
 };
 
 
-class SafepointEntry;
+// Forward declaration.
+class JSGlobalPropertyCell;
 
+// TypeFeedbackCells is a fixed array used to hold the association between
+// cache cells and AST ids for code generated by the full compiler.
+// The format of the these objects is
+//   [i * 2]: Global property cell of ith cache cell.
+//   [i * 2 + 1]: Ast ID for ith cache cell.
+class TypeFeedbackCells: public FixedArray {
+ public:
+  int CellCount() { return length() / 2; }
+  static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
+
+  // Accessors for AST ids associated with cache values.
+  inline Smi* AstId(int index);
+  inline void SetAstId(int index, Smi* id);
+
+  // Accessors for global property cells holding the cache values.
+  inline JSGlobalPropertyCell* Cell(int index);
+  inline void SetCell(int index, JSGlobalPropertyCell* cell);
+
+  // The object that indicates an uninitialized cache.
+  static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
+
+  // The object that indicates a megamorphic state.
+  static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+
+  // A raw version of the uninitialized sentinel that's safe to read during
+  // garbage collection (e.g., for patching the cache).
+  static inline Object* RawUninitializedSentinel(Heap* heap);
+
+  // Casting.
+  static inline TypeFeedbackCells* cast(Object* obj);
+
+  static const int kForInFastCaseMarker = 0;
+  static const int kForInSlowCaseMarker = 1;
+};
+
+
+// Forward declaration.
+class SafepointEntry;
+class TypeFeedbackInfo;
 
 // Code describes objects with on-the-fly generated machine code.
 class Code: public HeapObject {
@@ -3699,14 +4165,25 @@
   DECL_ACCESSORS(relocation_info, ByteArray)
   void InvalidateRelocation();
 
+  // [handler_table]: Fixed array containing offsets of exception handlers.
+  DECL_ACCESSORS(handler_table, FixedArray)
+
   // [deoptimization_data]: Array containing data for deopt.
   DECL_ACCESSORS(deoptimization_data, FixedArray)
 
-  // [code_flushing_candidate]: Field only used during garbage
-  // collection to hold code flushing candidates. The contents of this
+  // [type_feedback_info]: Struct containing type feedback information.
+  // Will contain either a TypeFeedbackInfo object, or undefined.
+  DECL_ACCESSORS(type_feedback_info, Object)
+
+  // [gc_metadata]: Field used to hold GC related metadata. The contents of this
   // field does not have to be traced during garbage collection since
   // it is only used by the garbage collector itself.
-  DECL_ACCESSORS(next_code_flushing_candidate, Object)
+  DECL_ACCESSORS(gc_metadata, Object)
+
+  // [ic_age]: Inline caching age: the value of the Heap::global_ic_age
+  // at the moment when this object was created.
+  inline void set_ic_age(int count);
+  inline int ic_age();
 
   // Unchecked accessors to be used during GC.
   inline ByteArray* unchecked_relocation_info();
@@ -3742,6 +4219,11 @@
   inline int major_key();
   inline void set_major_key(int value);
 
+  // For stubs, tells whether they should always exist, so that they can be
+  // called from other stubs.
+  inline bool is_pregenerated();
+  inline void set_is_pregenerated(bool value);
+
   // [optimizable]: For FUNCTION kind, tells if it is optimizable.
   inline bool optimizable();
   inline void set_optimizable(bool value);
@@ -3756,6 +4238,16 @@
   inline bool has_debug_break_slots();
   inline void set_has_debug_break_slots(bool value);
 
+  // [compiled_with_optimizing]: For FUNCTION kind, tells if it has
+  // been compiled with IsOptimizing set to true.
+  inline bool is_compiled_optimizable();
+  inline void set_compiled_optimizable(bool value);
+
+  // [has_self_optimization_header]: For FUNCTION kind, tells if it has
+  // a self-optimization header.
+  inline bool has_self_optimization_header();
+  inline void set_self_optimization_header(bool value);
+
   // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
   // how long the function has been marked for OSR and therefore which
   // level of loop nesting we are willing to do on-stack replacement
@@ -3801,6 +4293,11 @@
   inline byte to_boolean_state();
   inline void set_to_boolean_state(byte value);
 
+  // [has_function_cache]: For kind STUB tells whether there is a function
+  // cache is passed to the stub.
+  inline bool has_function_cache();
+  inline void set_has_function_cache(bool flag);
+
   // Get the safepoint entry for the given pc.
   SafepointEntry GetSafepointEntry(Address pc);
 
@@ -3811,6 +4308,28 @@
   // Find the first map in an IC stub.
   Map* FindFirstMap();
 
+  class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
+  class ExtraICStateKeyedAccessGrowMode:
+      public BitField<KeyedAccessGrowMode, 1, 1> {};  // NOLINT
+
+  static const int kExtraICStateGrowModeShift = 1;
+
+  static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) {
+    return ExtraICStateStrictMode::decode(extra_ic_state);
+  }
+
+  static inline KeyedAccessGrowMode GetKeyedAccessGrowMode(
+      ExtraICState extra_ic_state) {
+    return ExtraICStateKeyedAccessGrowMode::decode(extra_ic_state);
+  }
+
+  static inline ExtraICState ComputeExtraICState(
+      KeyedAccessGrowMode grow_mode,
+      StrictModeFlag strict_mode) {
+    return ExtraICStateKeyedAccessGrowMode::encode(grow_mode) |
+        ExtraICStateStrictMode::encode(strict_mode);
+  }
+
   // Flags operations.
   static inline Flags ComputeFlags(
       Kind kind,
@@ -3905,10 +4424,6 @@
   void CodeVerify();
 #endif
 
-  // Returns the isolate/heap this code object belongs to.
-  inline Isolate* isolate();
-  inline Heap* heap();
-
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
   static const int kMaxLoopNestingMarker = 6;
@@ -3916,13 +4431,15 @@
   // Layout description.
   static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
   static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
+  static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
   static const int kDeoptimizationDataOffset =
-      kRelocationInfoOffset + kPointerSize;
-  static const int kNextCodeFlushingCandidateOffset =
+      kHandlerTableOffset + kPointerSize;
+  static const int kTypeFeedbackInfoOffset =
       kDeoptimizationDataOffset + kPointerSize;
-  static const int kFlagsOffset =
-      kNextCodeFlushingCandidateOffset + kPointerSize;
-
+  static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
+  static const int kICAgeOffset =
+      kGCMetadataOffset + kPointerSize;
+  static const int kFlagsOffset = kICAgeOffset + kIntSize;
   static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
   static const int kKindSpecificFlagsSize = 2 * kIntSize;
 
@@ -3944,11 +4461,14 @@
   static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
   static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
   static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
+  static const int kHasFunctionCacheOffset = kStubMajorKeyOffset + 1;
 
   static const int kFullCodeFlags = kOptimizableOffset + 1;
   class FullCodeFlagsHasDeoptimizationSupportField:
       public BitField<bool, 0, 1> {};  // NOLINT
   class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
+  class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
+  class FullCodeFlagsHasSelfOptimizationHeader: public BitField<bool, 3, 1> {};
 
   static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
 
@@ -3960,14 +4480,16 @@
   // Flags layout.  BitField<type, shift, size>.
   class ICStateField: public BitField<InlineCacheState, 0, 3> {};
   class TypeField: public BitField<PropertyType, 3, 4> {};
-  class KindField: public BitField<Kind, 7, 4> {};
-  class CacheHolderField: public BitField<InlineCacheHolderFlag, 11, 1> {};
+  class CacheHolderField: public BitField<InlineCacheHolderFlag, 7, 1> {};
+  class KindField: public BitField<Kind, 8, 4> {};
   class ExtraICStateField: public BitField<ExtraICState, 12, 2> {};
+  class IsPregeneratedField: public BitField<bool, 14, 1> {};
 
   // Signed field cannot be encoded using the BitField class.
-  static const int kArgumentsCountShift = 14;
+  static const int kArgumentsCountShift = 15;
   static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
 
+  // This constant should be encodable in an ARM instruction.
   static const int kFlagsNotUsedInLookup =
       TypeField::kMask | CacheHolderField::kMask;
 
@@ -4101,8 +4623,12 @@
         (bit_field2() & kElementsKindMask) >> kElementsKindShift);
   }
 
+  // Tells whether the instance has fast elements that are only Smis.
+  inline bool has_fast_smi_only_elements() {
+    return elements_kind() == FAST_SMI_ONLY_ELEMENTS;
+  }
+
   // Tells whether the instance has fast elements.
-  // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
   inline bool has_fast_elements() {
     return elements_kind() == FAST_ELEMENTS;
   }
@@ -4111,6 +4637,10 @@
     return elements_kind() == FAST_DOUBLE_ELEMENTS;
   }
 
+  inline bool has_non_strict_arguments_elements() {
+    return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+  }
+
   inline bool has_external_array_elements() {
     ElementsKind kind(elements_kind());
     return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
@@ -4121,6 +4651,14 @@
     return elements_kind() == DICTIONARY_ELEMENTS;
   }
 
+  inline bool has_slow_elements_kind() {
+    return elements_kind() == DICTIONARY_ELEMENTS
+        || elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+  }
+
+  static bool IsValidElementsTransition(ElementsKind from_kind,
+                                        ElementsKind to_kind);
+
   // Tells whether the map is attached to SharedFunctionInfo
   // (for inobject slack tracking).
   inline void set_attached_to_shared_function_info(bool value);
@@ -4169,6 +4707,7 @@
   //    1 + 2 * i: prototype
   //    2 + 2 * i: target map
   DECL_ACCESSORS(prototype_transitions, FixedArray)
+
   inline FixedArray* unchecked_prototype_transitions();
 
   static const int kProtoTransitionHeaderSize = 1;
@@ -4178,14 +4717,14 @@
   static const int kProtoTransitionMapOffset = 1;
 
   inline int NumberOfProtoTransitions() {
-    FixedArray* cache = unchecked_prototype_transitions();
+    FixedArray* cache = prototype_transitions();
     if (cache->length() == 0) return 0;
     return
         Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
   }
 
   inline void SetNumberOfProtoTransitions(int value) {
-    FixedArray* cache = unchecked_prototype_transitions();
+    FixedArray* cache = prototype_transitions();
     ASSERT(cache->length() != 0);
     cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
                          Smi::FromInt(value));
@@ -4207,35 +4746,15 @@
   // instance descriptors.
   MUST_USE_RESULT MaybeObject* CopyDropTransitions();
 
-  // Returns this map if it already has elements that are fast, otherwise
-  // returns a copy of the map, with all transitions dropped from the
-  // descriptors and the ElementsKind set to FAST_ELEMENTS.
-  MUST_USE_RESULT inline MaybeObject* GetFastElementsMap();
-
-  // Returns this map if it already has fast elements that are doubles,
-  // otherwise returns a copy of the map, with all transitions dropped from the
-  // descriptors and the ElementsKind set to FAST_DOUBLE_ELEMENTS.
-  MUST_USE_RESULT inline MaybeObject* GetFastDoubleElementsMap();
-
-  // Returns this map if already has dictionary elements, otherwise returns a
-  // copy of the map, with all transitions dropped from the descriptors and the
-  // ElementsKind set to DICTIONARY_ELEMENTS.
-  MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
-
-  // Returns a new map with all transitions dropped from the descriptors and the
-  // ElementsKind set.
-  MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
-      ElementsKind elements_kind,
-      bool safe_to_add_transition);
-
   // Returns the property index for name (only valid for FAST MODE).
   int PropertyIndexFor(String* name);
 
   // Returns the next free property index (only valid for FAST MODE).
   int NextFreePropertyIndex();
 
-  // Returns the number of properties described in instance_descriptors.
-  int NumberOfDescribedProperties();
+  // Returns the number of properties described in instance_descriptors
+  // filtering out properties with the specified attributes.
+  int NumberOfDescribedProperties(PropertyAttributes filter = NONE);
 
   // Casting.
   static inline Map* cast(Object* obj);
@@ -4249,6 +4768,9 @@
   inline void ClearCodeCache(Heap* heap);
 
   // Update code cache.
+  static void UpdateCodeCache(Handle<Map> map,
+                              Handle<String> name,
+                              Handle<Code> code);
   MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
 
   // Returns the found code or undefined if absent.
@@ -4266,12 +4788,21 @@
   // This is undone in MarkCompactCollector::ClearNonLiveTransitions().
   void CreateBackPointers();
 
+  void CreateOneBackPointer(Object* transition_target);
+
   // Set all map transitions from this map to dead maps to null.
   // Also, restore the original prototype on the targets of these
   // transitions, so that we do not process this map again while
   // following back pointers.
   void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
 
+  // Restore a possible back pointer in the prototype field of object.
+  // Return true in that case and false otherwise. Set *keep_entry to
+  // true when a live map transition has been found.
+  bool RestoreOneBackPointer(Object* object,
+                             Object* real_prototype,
+                             bool* keep_entry);
+
   // Computes a hash value for this map, to be used in HashTables and such.
   int Hash();
 
@@ -4281,11 +4812,30 @@
   // The "shared" flags of both this map and |other| are ignored.
   bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
 
-  // Returns true if this map and |other| describe equivalent objects.
-  // The "shared" flags of both this map and |other| are ignored.
-  bool EquivalentTo(Map* other) {
-    return EquivalentToForNormalization(other, KEEP_INOBJECT_PROPERTIES);
-  }
+  // Returns the contents of this map's descriptor array for the given string.
+  // May return NULL. |safe_to_add_transition| is set to false and NULL
+  // is returned if adding transitions is not allowed.
+  Object* GetDescriptorContents(String* sentinel_name,
+                                bool* safe_to_add_transitions);
+
+  // Returns the map that this map transitions to if its elements_kind
+  // is changed to |elements_kind|, or NULL if no such map is cached yet.
+  // |safe_to_add_transitions| is set to false if adding transitions is not
+  // allowed.
+  Map* LookupElementsTransitionMap(ElementsKind elements_kind,
+                                   bool* safe_to_add_transition);
+
+  // Adds an entry to this map's descriptor array for a transition to
+  // |transitioned_map| when its elements_kind is changed to |elements_kind|.
+  MUST_USE_RESULT MaybeObject* AddElementsTransition(
+      ElementsKind elements_kind, Map* transitioned_map);
+
+  // Returns the transitioned map for this map with the most generic
+  // elements_kind that's found in |candidates|, or null handle if no match is
+  // found at all.
+  Handle<Map> FindTransitionedMap(MapHandleList* candidates);
+  Map* FindTransitionedMap(MapList* candidates);
+
 
   // Dispatched behavior.
 #ifdef OBJECT_PRINT
@@ -4302,10 +4852,6 @@
   inline int visitor_id();
   inline void set_visitor_id(int visitor_id);
 
-  // Returns the isolate/heap this map belongs to.
-  inline Isolate* isolate();
-  inline Heap* heap();
-
   typedef void (*TraverseCallback)(Map* map, void* data);
 
   void TraverseTransitionTree(TraverseCallback callback, void* data);
@@ -4314,7 +4860,8 @@
 
   Object* GetPrototypeTransition(Object* prototype);
 
-  MaybeObject* PutPrototypeTransition(Object* prototype, Map* map);
+  MUST_USE_RESULT MaybeObject* PutPrototypeTransition(Object* prototype,
+                                                      Map* map);
 
   static const int kMaxPreAllocatedPropertyFields = 255;
 
@@ -4342,7 +4889,7 @@
   static const int kSize = MAP_POINTER_ALIGN(kPadStart);
 
   // Layout of pointer fields. Heap iteration code relies on them
-  // being continiously allocated.
+  // being continuously allocated.
   static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
   static const int kPointerFieldsEndOffset =
       Map::kPrototypeTransitionsOffset + kPointerSize;
@@ -4382,7 +4929,7 @@
   static const int kStringWrapperSafeForDefaultValueOf = 2;
   static const int kAttachedToSharedFunctionInfo = 3;
   // No bits can be used after kElementsKindFirstBit, they are all reserved for
-  // storing ElementKind.  for anything other than storing the ElementKind.
+  // storing ElementKind.
   static const int kElementsKindShift = 4;
   static const int kElementsKindBitCount = 4;
 
@@ -4391,6 +4938,9 @@
       ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
   static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
       (FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
+  static const int8_t kMaximumBitField2FastSmiOnlyElementValue =
+      static_cast<int8_t>((FAST_SMI_ONLY_ELEMENTS + 1) <<
+                          Map::kElementsKindShift) - 1;
 
   // Bit positions for bit field 3
   static const int kIsShared = 0;
@@ -4405,6 +4955,7 @@
                               kSize> BodyDescriptor;
 
  private:
+  String* elements_transition_sentinel_name();
   DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
 };
 
@@ -4435,6 +4986,12 @@
     COMPILATION_TYPE_EVAL = 1
   };
 
+  // Script compilation state.
+  enum CompilationState {
+    COMPILATION_STATE_INITIAL = 0,
+    COMPILATION_STATE_COMPILED = 1
+  };
+
   // [source]: the script source.
   DECL_ACCESSORS(source, Object)
 
@@ -4466,6 +5023,9 @@
   // [compilation]: how the the script was compiled.
   DECL_ACCESSORS(compilation_type, Smi)
 
+  // [is_compiled]: determines whether the script has already been compiled.
+  DECL_ACCESSORS(compilation_state, Smi)
+
   // [line_ends]: FixedArray of line ends positions.
   DECL_ACCESSORS(line_ends, Object)
 
@@ -4502,7 +5062,9 @@
   static const int kWrapperOffset = kContextOffset + kPointerSize;
   static const int kTypeOffset = kWrapperOffset + kPointerSize;
   static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
-  static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
+  static const int kCompilationStateOffset =
+      kCompilationTypeOffset + kPointerSize;
+  static const int kLineEndsOffset = kCompilationStateOffset + kPointerSize;
   static const int kIdOffset = kLineEndsOffset + kPointerSize;
   static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
   static const int kEvalFrominstructionsOffsetOffset =
@@ -4546,7 +5108,10 @@
   V(Math, atan, MathATan)                           \
   V(Math, exp, MathExp)                             \
   V(Math, sqrt, MathSqrt)                           \
-  V(Math, pow, MathPow)
+  V(Math, pow, MathPow)                             \
+  V(Math, random, MathRandom)                       \
+  V(Math, max, MathMax)                             \
+  V(Math, min, MathMin)
 
 
 enum BuiltinFunctionId {
@@ -4572,7 +5137,7 @@
   DECL_ACCESSORS(code, Code)
 
   // [scope_info]: Scope info.
-  DECL_ACCESSORS(scope_info, SerializedScopeInfo)
+  DECL_ACCESSORS(scope_info, ScopeInfo)
 
   // [construct stub]: Code stub for constructing instances of this function.
   DECL_ACCESSORS(construct_stub, Code)
@@ -4760,8 +5325,14 @@
 
   // A counter used to determine when to stress the deoptimizer with a
   // deopt.
-  inline Smi* deopt_counter();
-  inline void set_deopt_counter(Smi* counter);
+  inline int deopt_counter();
+  inline void set_deopt_counter(int counter);
+
+  inline int profiler_ticks();
+  inline void set_profiler_ticks(int ticks);
+
+  inline int ast_node_count();
+  inline void set_ast_node_count(int count);
 
   // Add information on assignments of the form this.x = ...;
   void SetThisPropertyAssignmentsInfo(
@@ -4794,8 +5365,20 @@
   // spending time attempting to optimize it again.
   DECL_BOOLEAN_ACCESSORS(optimization_disabled)
 
-  // Indicates whether the function is a strict mode function.
-  DECL_BOOLEAN_ACCESSORS(strict_mode)
+  // Indicates the language mode of the function's code as defined by the
+  // current harmony drafts for the next ES language standard. Possible
+  // values are:
+  // 1. CLASSIC_MODE - Unrestricted syntax and semantics, same as in ES5.
+  // 2. STRICT_MODE - Restricted syntax and semantics, same as in ES5.
+  // 3. EXTENDED_MODE - Only available under the harmony flag, not part of ES5.
+  inline LanguageMode language_mode();
+  inline void set_language_mode(LanguageMode language_mode);
+
+  // Indicates whether the language mode of this function is CLASSIC_MODE.
+  inline bool is_classic_mode();
+
+  // Indicates whether the language mode of this function is EXTENDED_MODE.
+  inline bool is_extended_mode();
 
   // False if the function definitely does not allocate an arguments object.
   DECL_BOOLEAN_ACCESSORS(uses_arguments)
@@ -4823,6 +5406,15 @@
   // through the API, which does not change this flag).
   DECL_BOOLEAN_ACCESSORS(is_anonymous)
 
+  // Is this a function or top-level/eval code.
+  DECL_BOOLEAN_ACCESSORS(is_function)
+
+  // Indicates that the function cannot be optimized.
+  DECL_BOOLEAN_ACCESSORS(dont_optimize)
+
+  // Indicates that the function cannot be inlined.
+  DECL_BOOLEAN_ACCESSORS(dont_inline)
+
   // Indicates whether or not the code in the shared function support
   // deoptimization.
   inline bool has_deoptimization_support();
@@ -4831,9 +5423,8 @@
   void EnableDeoptimizationSupport(Code* recompiled);
 
   // Disable (further) attempted optimization of all functions sharing this
-  // shared function info.  The function is the one we actually tried to
-  // optimize.
-  void DisableOptimization(JSFunction* function);
+  // shared function info.
+  void DisableOptimization();
 
   // Lookup the bailout ID and ASSERT that it exists in the non-optimized
   // code, returns whether it asserted (i.e., always true if assertions are
@@ -4860,7 +5451,7 @@
 
   // [source code]: Source code for the function.
   bool HasSourceCode();
-  Object* GetSourceCode();
+  Handle<Object> GetSourceCode();
 
   inline int opt_count();
   inline void set_opt_count(int opt_count);
@@ -4887,6 +5478,13 @@
   void SharedFunctionInfoVerify();
 #endif
 
+  // Helpers to compile the shared code.  Returns true on success, false on
+  // failure (e.g., stack overflow during compilation).
+  static bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
+                             ClearExceptionFlag flag);
+  static bool CompileLazy(Handle<SharedFunctionInfo> shared,
+                          ClearExceptionFlag flag);
+
   // Casting.
   static inline SharedFunctionInfo* cast(Object* obj);
 
@@ -4910,12 +5508,12 @@
       kInferredNameOffset + kPointerSize;
   static const int kThisPropertyAssignmentsOffset =
       kInitialMapOffset + kPointerSize;
-  static const int kDeoptCounterOffset =
+  static const int kProfilerTicksOffset =
       kThisPropertyAssignmentsOffset + kPointerSize;
 #if V8_HOST_ARCH_32_BIT
   // Smi fields.
   static const int kLengthOffset =
-      kDeoptCounterOffset + kPointerSize;
+      kProfilerTicksOffset + kPointerSize;
   static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
   static const int kExpectedNofPropertiesOffset =
       kFormalParameterCountOffset + kPointerSize;
@@ -4933,8 +5531,11 @@
       kCompilerHintsOffset + kPointerSize;
   static const int kOptCountOffset =
       kThisPropertyAssignmentsCountOffset + kPointerSize;
+  static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
+  static const int kDeoptCounterOffset =
+      kAstNodeCountOffset + kPointerSize;
   // Total size.
-  static const int kSize = kOptCountOffset + kPointerSize;
+  static const int kSize = kDeoptCounterOffset + kPointerSize;
 #else
   // The only reason to use smi fields instead of int fields
   // is to allow iteration without maps decoding during
@@ -4946,7 +5547,7 @@
   // word is not set and thus this word cannot be treated as pointer
   // to HeapObject during old space traversal.
   static const int kLengthOffset =
-      kDeoptCounterOffset + kPointerSize;
+      kProfilerTicksOffset + kPointerSize;
   static const int kFormalParameterCountOffset =
       kLengthOffset + kIntSize;
 
@@ -4970,8 +5571,11 @@
   static const int kOptCountOffset =
       kThisPropertyAssignmentsCountOffset + kIntSize;
 
+  static const int kAstNodeCountOffset = kOptCountOffset + kIntSize;
+  static const int kDeoptCounterOffset = kAstNodeCountOffset + kIntSize;
+
   // Total size.
-  static const int kSize = kOptCountOffset + kIntSize;
+  static const int kSize = kDeoptCounterOffset + kIntSize;
 
 #endif
 
@@ -5011,12 +5615,16 @@
     kCodeAgeShift,
     kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
     kStrictModeFunction,
+    kExtendedModeFunction,
     kUsesArguments,
     kHasDuplicateParameters,
     kNative,
     kBoundFunction,
     kIsAnonymous,
     kNameShouldPrintAsAnonymous,
+    kIsFunction,
+    kDontOptimize,
+    kDontInline,
     kCompilerHintsCount  // Pseudo entry
   };
 
@@ -5037,22 +5645,30 @@
  public:
   // Constants for optimizing codegen for strict mode function and
   // native tests.
-  // Allows to use byte-widgh instructions.
+  // Allows to use byte-width instructions.
   static const int kStrictModeBitWithinByte =
       (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
 
+  static const int kExtendedModeBitWithinByte =
+      (kExtendedModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
+
   static const int kNativeBitWithinByte =
       (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
 
 #if __BYTE_ORDER == __LITTLE_ENDIAN
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
       (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+  static const int kExtendedModeByteOffset = kCompilerHintsOffset +
+      (kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
   static const int kNativeByteOffset = kCompilerHintsOffset +
       (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
 #elif __BYTE_ORDER == __BIG_ENDIAN
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
       (kCompilerHintsSize - 1) -
       ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+  static const int kExtendedModeByteOffset = kCompilerHintsOffset +
+      (kCompilerHintsSize - 1) -
+      ((kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
   static const int kNativeByteOffset = kCompilerHintsOffset +
       (kCompilerHintsSize - 1) -
       ((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
@@ -5108,6 +5724,14 @@
   // recompiled the next time it is executed.
   void MarkForLazyRecompilation();
 
+  // Helpers to compile this function.  Returns true on success, false on
+  // failure (e.g., stack overflow during compilation).
+  static bool CompileLazy(Handle<JSFunction> function,
+                          ClearExceptionFlag flag);
+  static bool CompileOptimized(Handle<JSFunction> function,
+                               int osr_ast_id,
+                               ClearExceptionFlag flag);
+
   // Tells whether or not the function is already marked for lazy
   // recompilation.
   inline bool IsMarkedForLazyRecompilation();
@@ -5115,7 +5739,8 @@
   // Check whether or not this function is inlineable.
   bool IsInlineable();
 
-  // [literals]: Fixed array holding the materialized literals.
+  // [literals_or_bindings]: Fixed array holding either
+  // the materialized literals or the bindings of a bound function.
   //
   // If the function contains object, regexp or array literals, the
   // literals array prefix contains the object, regexp, and array
@@ -5124,11 +5749,23 @@
   // or array functions.  Performing a dynamic lookup, we might end up
   // using the functions from a new context that we should not have
   // access to.
-  DECL_ACCESSORS(literals, FixedArray)
+  //
+  // On bound functions, the array is a (copy-on-write) fixed-array containing
+  // the function that was bound, bound this-value and any bound
+  // arguments. Bound functions never contain literals.
+  DECL_ACCESSORS(literals_or_bindings, FixedArray)
+
+  inline FixedArray* literals();
+  inline void set_literals(FixedArray* literals);
+
+  inline FixedArray* function_bindings();
+  inline void set_function_bindings(FixedArray* bindings);
 
   // The initial map for an object created by this constructor.
   inline Map* initial_map();
   inline void set_initial_map(Map* value);
+  MUST_USE_RESULT inline MaybeObject* set_initial_map_and_cache_transitions(
+      Map* value);
   inline bool has_initial_map();
 
   // Get and set the prototype property on a JSFunction. If the
@@ -5139,7 +5776,7 @@
   inline bool has_instance_prototype();
   inline Object* prototype();
   inline Object* instance_prototype();
-  Object* SetInstancePrototype(Object* value);
+  MUST_USE_RESULT MaybeObject* SetInstancePrototype(Object* value);
   MUST_USE_RESULT MaybeObject* SetPrototype(Object* value);
 
   // After prototype is removed, it will not be created when accessed, and
@@ -5212,6 +5849,11 @@
   static const int kLiteralsPrefixSize = 1;
   static const int kLiteralGlobalContextIndex = 0;
 
+  // Layout of the bound-function binding array.
+  static const int kBoundFunctionIndex = 0;
+  static const int kBoundThisIndex = 1;
+  static const int kBoundArgumentsStartIndex = 2;
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
 };
@@ -5256,7 +5898,6 @@
 
 // Forward declaration.
 class JSBuiltinsObject;
-class JSGlobalPropertyCell;
 
 // Common super class for JavaScript global objects and the special
 // builtins global objects.
@@ -5284,6 +5925,11 @@
   }
 
   // Ensure that the global object has a cell for the given property name.
+  static Handle<JSGlobalPropertyCell> EnsurePropertyCell(
+      Handle<GlobalObject> global,
+      Handle<String> name);
+  // TODO(kmillikin): This function can be eliminated once the stub cache is
+  // full handlified (and the static helper can be written directly).
   MUST_USE_RESULT MaybeObject* EnsurePropertyCell(String* name);
 
   // Casting.
@@ -5296,8 +5942,6 @@
   static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
 
  private:
-  friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
-
   DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
 };
 
@@ -5376,7 +6020,7 @@
 };
 
 
-// Representation for JS Wrapper objects, String, Number, Boolean, Date, etc.
+// Representation for JS Wrapper objects, String, Number, Boolean, etc.
 class JSValue: public JSObject {
  public:
   // [value]: the object being wrapped.
@@ -5405,6 +6049,106 @@
 };
 
 
+class DateCache;
+
+// Representation for JS date objects.
+class JSDate: public JSObject {
+ public:
+  // If one component is NaN, all of them are, indicating a NaN time value.
+  // [value]: the time value.
+  DECL_ACCESSORS(value, Object)
+  // [year]: caches year. Either undefined, smi, or NaN.
+  DECL_ACCESSORS(year, Object)
+  // [month]: caches month. Either undefined, smi, or NaN.
+  DECL_ACCESSORS(month, Object)
+  // [day]: caches day. Either undefined, smi, or NaN.
+  DECL_ACCESSORS(day, Object)
+  // [weekday]: caches day of week. Either undefined, smi, or NaN.
+  DECL_ACCESSORS(weekday, Object)
+  // [hour]: caches hours. Either undefined, smi, or NaN.
+  DECL_ACCESSORS(hour, Object)
+  // [min]: caches minutes. Either undefined, smi, or NaN.
+  DECL_ACCESSORS(min, Object)
+  // [sec]: caches seconds. Either undefined, smi, or NaN.
+  DECL_ACCESSORS(sec, Object)
+  // [cache stamp]: sample of the date cache stamp at the
+  // moment when local fields were cached.
+  DECL_ACCESSORS(cache_stamp, Object)
+
+  // Casting.
+  static inline JSDate* cast(Object* obj);
+
+  // Returns the date field with the specified index.
+  // See FieldIndex for the list of date fields.
+  static MaybeObject* GetField(Object* date, Smi* index);
+
+  void SetValue(Object* value, bool is_value_nan);
+
+
+  // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void JSDatePrint() {
+    JSDatePrint(stdout);
+  }
+  void JSDatePrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void JSDateVerify();
+#endif
+  // The order is important. It must be kept in sync with date macros
+  // in macros.py.
+  enum FieldIndex {
+    kDateValue,
+    kYear,
+    kMonth,
+    kDay,
+    kWeekday,
+    kHour,
+    kMinute,
+    kSecond,
+    kFirstUncachedField,
+    kMillisecond = kFirstUncachedField,
+    kDays,
+    kTimeInDay,
+    kFirstUTCField,
+    kYearUTC = kFirstUTCField,
+    kMonthUTC,
+    kDayUTC,
+    kWeekdayUTC,
+    kHourUTC,
+    kMinuteUTC,
+    kSecondUTC,
+    kMillisecondUTC,
+    kDaysUTC,
+    kTimeInDayUTC,
+    kTimezoneOffset
+  };
+
+  // Layout description.
+  static const int kValueOffset = JSObject::kHeaderSize;
+  static const int kYearOffset = kValueOffset + kPointerSize;
+  static const int kMonthOffset = kYearOffset + kPointerSize;
+  static const int kDayOffset = kMonthOffset + kPointerSize;
+  static const int kWeekdayOffset = kDayOffset + kPointerSize;
+  static const int kHourOffset = kWeekdayOffset  + kPointerSize;
+  static const int kMinOffset = kHourOffset + kPointerSize;
+  static const int kSecOffset = kMinOffset + kPointerSize;
+  static const int kCacheStampOffset = kSecOffset + kPointerSize;
+  static const int kSize = kCacheStampOffset + kPointerSize;
+
+ private:
+  inline Object* DoGetField(FieldIndex index);
+
+  Object* GetUTCField(FieldIndex index, double value, DateCache* date_cache);
+
+  // Computes and caches the cacheable fields of the date.
+  inline void SetLocalFields(int64_t local_time_ms, DateCache* date_cache);
+
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSDate);
+};
+
+
 // Representation of message objects used for error reporting through
 // the API. The messages are formatted in JavaScript so this object is
 // a real JavaScript object. The information used for formatting the
@@ -5640,13 +6384,19 @@
  public:
   // Find cached value for a string key, otherwise return null.
   Object* Lookup(String* src);
-  Object* LookupEval(String* src, Context* context, StrictModeFlag strict_mode);
+  Object* LookupEval(String* src,
+                     Context* context,
+                     LanguageMode language_mode,
+                     int scope_position);
   Object* LookupRegExp(String* source, JSRegExp::Flags flags);
-  MaybeObject* Put(String* src, Object* value);
-  MaybeObject* PutEval(String* src,
-                       Context* context,
-                       SharedFunctionInfo* value);
-  MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
+  MUST_USE_RESULT MaybeObject* Put(String* src, Object* value);
+  MUST_USE_RESULT MaybeObject* PutEval(String* src,
+                                       Context* context,
+                                       SharedFunctionInfo* value,
+                                       int scope_position);
+  MUST_USE_RESULT MaybeObject* PutRegExp(String* src,
+                                         JSRegExp::Flags flags,
+                                         FixedArray* value);
 
   // Remove given value from cache.
   void Remove(Object* value);
@@ -5758,10 +6508,17 @@
  public:
   DECL_ACCESSORS(cache, Object)
 
-  MUST_USE_RESULT MaybeObject* Update(MapList* maps,
+  static void Update(Handle<PolymorphicCodeCache> cache,
+                     MapHandleList* maps,
+                     Code::Flags flags,
+                     Handle<Code> code);
+
+  MUST_USE_RESULT MaybeObject* Update(MapHandleList* maps,
                                       Code::Flags flags,
                                       Code* code);
-  Object* Lookup(MapList* maps, Code::Flags flags);
+
+  // Returns an undefined value if the entry is not found.
+  Handle<Object> Lookup(MapHandleList* maps, Code::Flags flags);
 
   static inline PolymorphicCodeCache* cast(Object* obj);
 
@@ -5786,8 +6543,11 @@
 class PolymorphicCodeCacheHashTable
     : public HashTable<CodeCacheHashTableShape, HashTableKey*> {
  public:
-  Object* Lookup(MapList* maps, int code_kind);
-  MUST_USE_RESULT MaybeObject* Put(MapList* maps, int code_kind, Code* code);
+  Object* Lookup(MapHandleList* maps, int code_kind);
+
+  MUST_USE_RESULT MaybeObject* Put(MapHandleList* maps,
+                                   int code_kind,
+                                   Code* code);
 
   static inline PolymorphicCodeCacheHashTable* cast(Object* obj);
 
@@ -5797,6 +6557,73 @@
 };
 
 
+class TypeFeedbackInfo: public Struct {
+ public:
+  inline int ic_total_count();
+  inline void set_ic_total_count(int count);
+
+  inline int ic_with_typeinfo_count();
+  inline void set_ic_with_typeinfo_count(int count);
+
+  DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
+
+  static inline TypeFeedbackInfo* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void TypeFeedbackInfoPrint() {
+    TypeFeedbackInfoPrint(stdout);
+  }
+  void TypeFeedbackInfoPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void TypeFeedbackInfoVerify();
+#endif
+
+  static const int kIcTotalCountOffset = HeapObject::kHeaderSize;
+  static const int kIcWithTypeinfoCountOffset =
+      kIcTotalCountOffset + kPointerSize;
+  static const int kTypeFeedbackCellsOffset =
+      kIcWithTypeinfoCountOffset + kPointerSize;
+  static const int kSize = kTypeFeedbackCellsOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackInfo);
+};
+
+
+// Representation of a slow alias as part of a non-strict arguments objects.
+// For fast aliases (if HasNonStrictArgumentsElements()):
+// - the parameter map contains an index into the context
+// - all attributes of the element have default values
+// For slow aliases (if HasDictionaryArgumentsElements()):
+// - the parameter map contains no fast alias mapping (i.e. the hole)
+// - this struct (in the slow backing store) contains an index into the context
+// - all attributes are available as part if the property details
+class AliasedArgumentsEntry: public Struct {
+ public:
+  inline int aliased_context_slot();
+  inline void set_aliased_context_slot(int count);
+
+  static inline AliasedArgumentsEntry* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void AliasedArgumentsEntryPrint() {
+    AliasedArgumentsEntryPrint(stdout);
+  }
+  void AliasedArgumentsEntryPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void AliasedArgumentsEntryVerify();
+#endif
+
+  static const int kAliasedContextSlot = HeapObject::kHeaderSize;
+  static const int kSize = kAliasedContextSlot + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AliasedArgumentsEntry);
+};
+
+
 enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
 enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
 
@@ -5810,12 +6637,17 @@
   inline bool has_trivial_hash();
 
   // Add a character to the hash and update the array index calculation.
-  inline void AddCharacter(uc32 c);
+  inline void AddCharacter(uint32_t c);
 
   // Adds a character to the hash but does not update the array index
   // calculation.  This can only be called when it has been verified
   // that the input is not an array index.
-  inline void AddCharacterNoIndex(uc32 c);
+  inline void AddCharacterNoIndex(uint32_t c);
+
+  // Add a character above 0xffff as a surrogate pair.  These can get into
+  // the hasher through the routines that take a UTF-8 string and make a symbol.
+  void AddSurrogatePair(uc32 c);
+  void AddSurrogatePairNoIndex(uc32 c);
 
   // Returns the value to store in the hash field of a string with
   // the given length and contents.
@@ -6033,7 +6865,7 @@
   inline String* GetUnderlying();
 
   // Mark the string as an undetectable object. It only applies to
-  // ascii and two byte string types.
+  // ASCII and two byte string types.
   bool MarkAsUndetectable();
 
   // Return a substring.
@@ -6065,8 +6897,6 @@
       RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
       int* length_output = 0);
 
-  int Utf8Length();
-
   // Return a 16 bit Unicode representation of the string.
   // The string should be nearly flat, otherwise the performance of
   // of this method may be very bad.  Setting robustness_flag to
@@ -6129,13 +6959,10 @@
   // value into an array index.
   static const int kMaxArrayIndexSize = 10;
 
-  // Max ascii char code.
+  // Max ASCII char code.
   static const int kMaxAsciiCharCode = unibrow::Utf8::kMaxOneByteChar;
   static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
-  static const int kMaxUC16CharCode = 0xffff;
-
-  // Minimum length for a cons string.
-  static const int kMinNonFlatLength = 13;
+  static const int kMaxUtf16CodeUnit = 0xffff;
 
   // Mask constant for checking if a string has a computed hash code
   // and if it is an array index.  The least significant bit indicates
@@ -6307,13 +7134,16 @@
   // Casting.
   static inline SeqString* cast(Object* obj);
 
+  // Layout description.
+  static const int kHeaderSize = String::kSize;
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
 };
 
 
-// The AsciiString class captures sequential ascii string objects.
-// Each character in the AsciiString is an ascii character.
+// The AsciiString class captures sequential ASCII string objects.
+// Each character in the AsciiString is an ASCII character.
 class SeqAsciiString: public SeqString {
  public:
   static const bool kHasAsciiEncoding = true;
@@ -6340,12 +7170,8 @@
     return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
   }
 
-  // Layout description.
-  static const int kHeaderSize = String::kSize;
-  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
   // Maximal memory usage for a single sequential ASCII string.
-  static const int kMaxSize = 512 * MB;
+  static const int kMaxSize = 512 * MB - 1;
   // Maximal length of a single sequential ASCII string.
   // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
   static const int kMaxLength = (kMaxSize - kHeaderSize);
@@ -6394,12 +7220,8 @@
     return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
   }
 
-  // Layout description.
-  static const int kHeaderSize = String::kSize;
-  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
   // Maximal memory usage for a single sequential two-byte string.
-  static const int kMaxSize = 512 * MB;
+  static const int kMaxSize = 512 * MB - 1;
   // Maximal length of a single sequential two-byte string.
   // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
   static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
@@ -6543,7 +7365,12 @@
 
   // Layout description.
   static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
-  static const int kSize = kResourceOffset + kPointerSize;
+  static const int kShortSize = kResourceOffset + kPointerSize;
+  static const int kResourceDataOffset = kResourceOffset + kPointerSize;
+  static const int kSize = kResourceDataOffset + kPointerSize;
+
+  // Return whether external string is short (data pointer is not cached).
+  inline bool is_short();
 
   STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
 
@@ -6561,11 +7388,19 @@
   typedef v8::String::ExternalAsciiStringResource Resource;
 
   // The underlying resource.
-  inline Resource* resource();
-  inline void set_resource(Resource* buffer);
+  inline const Resource* resource();
+  inline void set_resource(const Resource* buffer);
+
+  // Update the pointer cache to the external character array.
+  // The cached pointer is always valid, as the external character array does =
+  // not move during lifetime.  Deserialization is the only exception, after
+  // which the pointer cache has to be refreshed.
+  inline void update_data_cache();
+
+  inline const char* GetChars();
 
   // Dispatched behavior.
-  uint16_t ExternalAsciiStringGet(int index);
+  inline uint16_t ExternalAsciiStringGet(int index);
 
   // Casting.
   static inline ExternalAsciiString* cast(Object* obj);
@@ -6598,14 +7433,22 @@
   typedef v8::String::ExternalStringResource Resource;
 
   // The underlying string resource.
-  inline Resource* resource();
-  inline void set_resource(Resource* buffer);
+  inline const Resource* resource();
+  inline void set_resource(const Resource* buffer);
+
+  // Update the pointer cache to the external character array.
+  // The cached pointer is always valid, as the external character array does =
+  // not move during lifetime.  Deserialization is the only exception, after
+  // which the pointer cache has to be refreshed.
+  inline void update_data_cache();
+
+  inline const uint16_t* GetChars();
 
   // Dispatched behavior.
-  uint16_t ExternalTwoByteStringGet(int index);
+  inline uint16_t ExternalTwoByteStringGet(int index);
 
   // For regexp code.
-  const uint16_t* ExternalTwoByteStringGetData(unsigned start);
+  inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
 
   // Casting.
   static inline ExternalTwoByteString* cast(Object* obj);
@@ -6785,10 +7628,6 @@
                               kValueOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
-  // Returns the isolate/heap this cell object belongs to.
-  inline Isolate* isolate();
-  inline Heap* heap();
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
 };
@@ -6800,25 +7639,56 @@
   // [handler]: The handler property.
   DECL_ACCESSORS(handler, Object)
 
+  // [hash]: The hash code property (undefined if not initialized yet).
+  DECL_ACCESSORS(hash, Object)
+
   // Casting.
   static inline JSProxy* cast(Object* obj);
 
   bool HasPropertyWithHandler(String* name);
+  bool HasElementWithHandler(uint32_t index);
+
+  MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
+      Object* receiver,
+      String* name);
+  MUST_USE_RESULT MaybeObject* GetElementWithHandler(
+      Object* receiver,
+      uint32_t index);
 
   MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
       String* name,
       Object* value,
       PropertyAttributes attributes,
       StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* SetElementWithHandler(
+      uint32_t index,
+      Object* value,
+      StrictModeFlag strict_mode);
+
+  // If the handler defines an accessor property, invoke its setter
+  // (or throw if only a getter exists) and set *found to true. Otherwise false.
+  MUST_USE_RESULT MaybeObject* SetPropertyWithHandlerIfDefiningSetter(
+      String* name,
+      Object* value,
+      PropertyAttributes attributes,
+      StrictModeFlag strict_mode,
+      bool* found);
 
   MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
       String* name,
       DeleteMode mode);
+  MUST_USE_RESULT MaybeObject* DeleteElementWithHandler(
+      uint32_t index,
+      DeleteMode mode);
 
   MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
       JSReceiver* receiver,
-      String* name,
-      bool* has_exception);
+      String* name);
+  MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
+      JSReceiver* receiver,
+      uint32_t index);
+
+  MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
 
   // Turn this into an (empty) JSObject.
   void Fix();
@@ -6826,6 +7696,13 @@
   // Initializes the body after the handler slot.
   inline void InitializeBody(int object_size, Object* value);
 
+  // Invoke a trap by name. If the trap does not exist on this's handler,
+  // but derived_trap is non-NULL, invoke that instead.  May cause GC.
+  Handle<Object> CallTrap(const char* name,
+                          Handle<Object> derived_trap,
+                          int argc,
+                          Handle<Object> args[]);
+
   // Dispatched behavior.
 #ifdef OBJECT_PRINT
   inline void JSProxyPrint() {
@@ -6841,7 +7718,8 @@
   // size as a virgin JSObject. This is essential for becoming a JSObject
   // upon freeze.
   static const int kHandlerOffset = HeapObject::kHeaderSize;
-  static const int kPaddingOffset = kHandlerOffset + kPointerSize;
+  static const int kHashOffset = kHandlerOffset + kPointerSize;
+  static const int kPaddingOffset = kHashOffset + kPointerSize;
   static const int kSize = JSObject::kHeaderSize;
   static const int kHeaderSize = kPaddingOffset;
   static const int kPaddingSize = kSize - kPaddingOffset;
@@ -6849,7 +7727,7 @@
   STATIC_CHECK(kPaddingSize >= 0);
 
   typedef FixedBodyDescriptor<kHandlerOffset,
-                              kHandlerOffset + kPointerSize,
+                              kPaddingOffset,
                               kSize> BodyDescriptor;
 
  private:
@@ -6880,7 +7758,7 @@
 #endif
 
   // Layout description.
-  static const int kCallTrapOffset = kHandlerOffset + kPointerSize;
+  static const int kCallTrapOffset = JSProxy::kPaddingOffset;
   static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize;
   static const int kPaddingOffset = kConstructTrapOffset + kPointerSize;
   static const int kSize = JSFunction::kSize;
@@ -6897,18 +7775,69 @@
 };
 
 
+// The JSSet describes EcmaScript Harmony sets
+class JSSet: public JSObject {
+ public:
+  // [set]: the backing hash set containing keys.
+  DECL_ACCESSORS(table, Object)
+
+  // Casting.
+  static inline JSSet* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void JSSetPrint() {
+    JSSetPrint(stdout);
+  }
+  void JSSetPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void JSSetVerify();
+#endif
+
+  static const int kTableOffset = JSObject::kHeaderSize;
+  static const int kSize = kTableOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
+};
+
+
+// The JSMap describes EcmaScript Harmony maps
+class JSMap: public JSObject {
+ public:
+  // [table]: the backing hash table mapping keys to values.
+  DECL_ACCESSORS(table, Object)
+
+  // Casting.
+  static inline JSMap* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void JSMapPrint() {
+    JSMapPrint(stdout);
+  }
+  void JSMapPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void JSMapVerify();
+#endif
+
+  static const int kTableOffset = JSObject::kHeaderSize;
+  static const int kSize = kTableOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
+};
+
+
 // The JSWeakMap describes EcmaScript Harmony weak maps
 class JSWeakMap: public JSObject {
  public:
   // [table]: the backing hash table mapping keys to values.
-  DECL_ACCESSORS(table, ObjectHashTable)
+  DECL_ACCESSORS(table, Object)
 
   // [next]: linked list of encountered weak maps during GC.
   DECL_ACCESSORS(next, Object)
 
-  // Unchecked accessors to be used during GC.
-  inline ObjectHashTable* unchecked_table();
-
   // Casting.
   static inline JSWeakMap* cast(Object* obj);
 
@@ -6937,8 +7866,8 @@
 class Foreign: public HeapObject {
  public:
   // [address]: field containing the address.
-  inline Address address();
-  inline void set_address(Address value);
+  inline Address foreign_address();
+  inline void set_foreign_address(Address value);
 
   // Casting.
   static inline Foreign* cast(Object* obj);
@@ -6961,10 +7890,10 @@
 
   // Layout description.
 
-  static const int kAddressOffset = HeapObject::kHeaderSize;
-  static const int kSize = kAddressOffset + kPointerSize;
+  static const int kForeignAddressOffset = HeapObject::kHeaderSize;
+  static const int kSize = kForeignAddressOffset + kPointerSize;
 
-  STATIC_CHECK(kAddressOffset == Internals::kForeignAddressOffset);
+  STATIC_CHECK(kForeignAddressOffset == Internals::kForeignAddressOffset);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
@@ -6993,8 +7922,12 @@
   // capacity is non-zero.
   MUST_USE_RESULT MaybeObject* Initialize(int capacity);
 
+  // Initializes the array to a certain length.
+  inline bool AllowsSetElementsLength();
+  MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
+
   // Set the content of the array to the content of storage.
-  inline void SetContent(FixedArray* storage);
+  MUST_USE_RESULT inline MaybeObject* SetContent(FixedArrayBase* storage);
 
   // Casting.
   static inline JSArray* cast(Object* obj);
@@ -7109,6 +8042,59 @@
 };
 
 
+// Support for JavaScript accessors: A pair of a getter and a setter. Each
+// accessor can either be
+//   * a pointer to a JavaScript function or proxy: a real accessor
+//   * undefined: considered an accessor by the spec, too, strangely enough
+//   * the hole: an accessor which has not been set
+//   * a pointer to a map: a transition used to ensure map sharing
+class AccessorPair: public Struct {
+ public:
+  DECL_ACCESSORS(getter, Object)
+  DECL_ACCESSORS(setter, Object)
+
+  static inline AccessorPair* cast(Object* obj);
+
+  MUST_USE_RESULT MaybeObject* CopyWithoutTransitions();
+
+  // Note: Returns undefined instead in case of a hole.
+  Object* GetComponent(AccessorComponent component);
+
+  // Set both components, skipping arguments which are a JavaScript null.
+  void SetComponents(Object* getter, Object* setter) {
+    if (!getter->IsNull()) set_getter(getter);
+    if (!setter->IsNull()) set_setter(setter);
+  }
+
+  bool ContainsAccessor() {
+    return IsJSAccessor(getter()) || IsJSAccessor(setter());
+  }
+
+#ifdef OBJECT_PRINT
+  void AccessorPairPrint(FILE* out = stdout);
+#endif
+#ifdef DEBUG
+  void AccessorPairVerify();
+#endif
+
+  static const int kGetterOffset = HeapObject::kHeaderSize;
+  static const int kSetterOffset = kGetterOffset + kPointerSize;
+  static const int kSize = kSetterOffset + kPointerSize;
+
+ private:
+  // Strangely enough, in addition to functions and harmony proxies, the spec
+  // requires us to consider undefined as a kind of accessor, too:
+  //    var obj = {};
+  //    Object.defineProperty(obj, "foo", {get: undefined});
+  //    assertTrue("foo" in obj);
+  bool IsJSAccessor(Object* obj) {
+    return obj->IsSpecFunction() || obj->IsUndefined();
+  }
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorPair);
+};
+
+
 class AccessCheckInfo: public Struct {
  public:
   DECL_ACCESSORS(named_callback, Object)
@@ -7209,8 +8195,8 @@
   static const int kTagOffset          = HeapObject::kHeaderSize;
   static const int kPropertyListOffset = kTagOffset + kPointerSize;
   static const int kHeaderSize         = kPropertyListOffset + kPointerSize;
- protected:
-  friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
+
+ private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
 };
 
@@ -7476,6 +8462,34 @@
 #undef DECL_BOOLEAN_ACCESSORS
 #undef DECL_ACCESSORS
 
+#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V)                            \
+  V(kSymbolTable, "symbol_table", "(Symbols)")                          \
+  V(kExternalStringsTable, "external_strings_table", "(External strings)") \
+  V(kStrongRootList, "strong_root_list", "(Strong roots)")              \
+  V(kSymbol, "symbol", "(Symbol)")                                      \
+  V(kBootstrapper, "bootstrapper", "(Bootstrapper)")                    \
+  V(kTop, "top", "(Isolate)")                                           \
+  V(kRelocatable, "relocatable", "(Relocatable)")                       \
+  V(kDebug, "debug", "(Debugger)")                                      \
+  V(kCompilationCache, "compilationcache", "(Compilation cache)")       \
+  V(kHandleScope, "handlescope", "(Handle scope)")                      \
+  V(kBuiltins, "builtins", "(Builtins)")                                \
+  V(kGlobalHandles, "globalhandles", "(Global handles)")                \
+  V(kThreadManager, "threadmanager", "(Thread manager)")                \
+  V(kExtensions, "Extensions", "(Extensions)")
+
+class VisitorSynchronization : public AllStatic {
+ public:
+#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+  enum SyncTag {
+    VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_ENUM)
+    kNumberOfSyncTags
+  };
+#undef DECLARE_ENUM
+
+  static const char* const kTags[kNumberOfSyncTags];
+  static const char* const kTagNames[kNumberOfSyncTags];
+};
 
 // Abstract base class for visiting, and optionally modifying, the
 // pointers contained in Objects. Used in GC and serialization/deserialization.
@@ -7514,11 +8528,16 @@
   // Handy shorthand for visiting a single pointer.
   virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
 
+  // Visit pointer embedded into a code object.
+  virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
+
   // Visits a contiguous arrays of external references (references to the C++
   // heap) in the half-open range [start, end). Any or all of the values
   // may be modified on return.
   virtual void VisitExternalReferences(Address* start, Address* end) {}
 
+  virtual void VisitExternalReference(RelocInfo* rinfo);
+
   inline void VisitExternalReference(Address* p) {
     VisitExternalReferences(p, p + 1);
   }
@@ -7526,13 +8545,10 @@
   // Visits a handle that has an embedder-assigned class ID.
   virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
 
-#ifdef DEBUG
   // Intended for serialization/deserialization checking: insert, or
   // check for the presence of, a tag at this position in the stream.
-  virtual void Synchronize(const char* tag) {}
-#else
-  inline void Synchronize(const char* tag) {}
-#endif
+  // Also used for marking up GC roots in heap snapshots.
+  virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
 };
 
 
diff --git a/src/once.cc b/src/once.cc
new file mode 100644
index 0000000..37fe369
--- /dev/null
+++ b/src/once.cc
@@ -0,0 +1,77 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "once.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sched.h>
+#endif
+
+#include "atomicops.h"
+#include "checks.h"
+
+namespace v8 {
+namespace internal {
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
+  AtomicWord state = Acquire_Load(once);
+  // Fast path. The provided function was already executed.
+  if (state == ONCE_STATE_DONE) {
+    return;
+  }
+
+  // The function execution did not complete yet. The once object can be in one
+  // of the two following states:
+  //   - UNINITIALIZED: We are the first thread calling this function.
+  //   - EXECUTING_FUNCTION: Another thread is already executing the function.
+  //
+  // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION
+  // atomically.
+  state = Acquire_CompareAndSwap(
+      once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION);
+  if (state == ONCE_STATE_UNINITIALIZED) {
+    // We are the first thread to call this function, so we have to call the
+    // function.
+    init_func(arg);
+    Release_Store(once, ONCE_STATE_DONE);
+  } else {
+    // Another thread has already started executing the function. We need to
+    // wait until it completes the initialization.
+    while (state == ONCE_STATE_EXECUTING_FUNCTION) {
+#ifdef _WIN32
+      ::Sleep(0);
+#else
+      sched_yield();
+#endif
+      state = Acquire_Load(once);
+    }
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/once.h b/src/once.h
new file mode 100644
index 0000000..a44b8fa
--- /dev/null
+++ b/src/once.h
@@ -0,0 +1,123 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// emulates google3/base/once.h
+//
+// This header is intended to be included only by v8's internal code. Users
+// should not use this directly.
+//
+// This is basically a portable version of pthread_once().
+//
+// This header declares:
+// * A type called OnceType.
+// * A macro V8_DECLARE_ONCE() which declares a (global) variable of type
+//   OnceType.
+// * A function CallOnce(OnceType* once, void (*init_func)()).
+//   This function, when invoked multiple times given the same OnceType object,
+//   will invoke init_func on the first call only, and will make sure none of
+//   the calls return before that first call to init_func has finished.
+//
+// Additionally, the following features are supported:
+// * A macro V8_ONCE_INIT which is expanded into the expression used to
+//   initialize a OnceType. This is only useful when clients embed a OnceType
+//   into a structure of their own and want to initialize it statically.
+// * The user can provide a parameter which CallOnce() forwards to the
+//   user-provided function when it is called. Usage example:
+//     CallOnce(&my_once, &MyFunctionExpectingIntArgument, 10);
+// * This implementation guarantees that OnceType is a POD (i.e. no static
+//   initializer generated).
+//
+// This implements a way to perform lazy initialization.  It's more efficient
+// than using mutexes as no lock is needed if initialization has already
+// happened.
+//
+// Example usage:
+//   void Init();
+//   V8_DECLARE_ONCE(once_init);
+//
+//   // Calls Init() exactly once.
+//   void InitOnce() {
+//     CallOnce(&once_init, &Init);
+//   }
+//
+// Note that if CallOnce() is called before main() has begun, it must
+// only be called by the thread that will eventually call main() -- that is,
+// the thread that performs dynamic initialization.  In general this is a safe
+// assumption since people don't usually construct threads before main() starts,
+// but it is technically not guaranteed.  Unfortunately, Win32 provides no way
+// whatsoever to statically-initialize its synchronization primitives, so our
+// only choice is to assume that dynamic initialization is single-threaded.
+
+#ifndef V8_ONCE_H_
+#define V8_ONCE_H_
+
+#include "atomicops.h"
+
+namespace v8 {
+namespace internal {
+
+typedef AtomicWord OnceType;
+
+#define V8_ONCE_INIT 0
+
+#define V8_DECLARE_ONCE(NAME) ::v8::internal::OnceType NAME
+
+enum {
+  ONCE_STATE_UNINITIALIZED = 0,
+  ONCE_STATE_EXECUTING_FUNCTION = 1,
+  ONCE_STATE_DONE = 2
+};
+
+typedef void (*NoArgFunction)();
+typedef void (*PointerArgFunction)(void* arg);
+
+template <typename T>
+struct OneArgFunction {
+  typedef void (*type)(T);
+};
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg);
+
+inline void CallOnce(OnceType* once, NoArgFunction init_func) {
+  if (Acquire_Load(once) != ONCE_STATE_DONE) {
+    CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL);
+  }
+}
+
+
+template <typename Arg>
+inline void CallOnce(OnceType* once,
+    typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
+  if (Acquire_Load(once) != ONCE_STATE_DONE) {
+    CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
+        static_cast<void*>(arg));
+  }
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_ONCE_H_
diff --git a/src/parser.cc b/src/parser.cc
index 90d5c91..da68041 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,7 @@
 #include "v8.h"
 
 #include "api.h"
-#include "ast-inl.h"
+#include "ast.h"
 #include "bootstrapper.h"
 #include "char-predicates-inl.h"
 #include "codegen.h"
@@ -258,7 +258,7 @@
           scanner().literal_ascii_string());
     } else {
       return isolate()->factory()->LookupTwoByteSymbol(
-          scanner().literal_uc16_string());
+          scanner().literal_utf16_string());
     }
   }
   return LookupCachedSymbol(symbol_id);
@@ -279,7 +279,7 @@
           scanner().literal_ascii_string());
     } else {
       result = isolate()->factory()->LookupTwoByteSymbol(
-          scanner().literal_uc16_string());
+          scanner().literal_utf16_string());
     }
     symbol_cache_.at(symbol_id) = result;
     return result;
@@ -407,9 +407,9 @@
 }
 
 
-Scope* Parser::NewScope(Scope* parent, Scope::Type type, bool inside_with) {
+Scope* Parser::NewScope(Scope* parent, ScopeType type) {
   Scope* result = new(zone()) Scope(parent, type);
-  result->Initialize(inside_with);
+  result->Initialize();
   return result;
 }
 
@@ -459,90 +459,53 @@
 
 
 // ----------------------------------------------------------------------------
-// LexicalScope is a support class to facilitate manipulation of the
-// Parser's scope stack. The constructor sets the parser's top scope
-// to the incoming scope, and the destructor resets it.
-//
-// Additionally, it stores transient information used during parsing.
-// These scopes are not kept around after parsing or referenced by syntax
-// trees so they can be stack-allocated and hence used by the pre-parser.
+// FunctionState and BlockState together implement the parser's scope stack.
+// The parser's current scope is in top_scope_.  The BlockState and
+// FunctionState constructors push on the scope stack and the destructors
+// pop.  They are also used to hold the parser's per-function and per-block
+// state.
 
-class LexicalScope BASE_EMBEDDED {
+class Parser::BlockState BASE_EMBEDDED {
  public:
-  LexicalScope(Parser* parser, Scope* scope, Isolate* isolate);
-  ~LexicalScope();
-
-  int NextMaterializedLiteralIndex() {
-    int next_index =
-        materialized_literal_count_ + JSFunction::kLiteralsPrefixSize;
-    materialized_literal_count_++;
-    return next_index;
-  }
-  int materialized_literal_count() { return materialized_literal_count_; }
-
-  void SetThisPropertyAssignmentInfo(
-      bool only_simple_this_property_assignments,
-      Handle<FixedArray> this_property_assignments) {
-    only_simple_this_property_assignments_ =
-        only_simple_this_property_assignments;
-    this_property_assignments_ = this_property_assignments;
-  }
-  bool only_simple_this_property_assignments() {
-    return only_simple_this_property_assignments_;
-  }
-  Handle<FixedArray> this_property_assignments() {
-    return this_property_assignments_;
+  BlockState(Parser* parser, Scope* scope)
+      : parser_(parser),
+        outer_scope_(parser->top_scope_) {
+    parser->top_scope_ = scope;
   }
 
-  void AddProperty() { expected_property_count_++; }
-  int expected_property_count() { return expected_property_count_; }
+  ~BlockState() { parser_->top_scope_ = outer_scope_; }
 
  private:
-  // Captures the number of literals that need materialization in the
-  // function.  Includes regexp literals, and boilerplate for object
-  // and array literals.
-  int materialized_literal_count_;
-
-  // Properties count estimation.
-  int expected_property_count_;
-
-  // Keeps track of assignments to properties of this. Used for
-  // optimizing constructors.
-  bool only_simple_this_property_assignments_;
-  Handle<FixedArray> this_property_assignments_;
-
-  // Bookkeeping
   Parser* parser_;
-  // Previous values
-  LexicalScope* lexical_scope_parent_;
-  Scope* previous_scope_;
-  int previous_with_nesting_level_;
-  unsigned previous_ast_node_id_;
+  Scope* outer_scope_;
 };
 
 
-LexicalScope::LexicalScope(Parser* parser, Scope* scope, Isolate* isolate)
-  : materialized_literal_count_(0),
-    expected_property_count_(0),
-    only_simple_this_property_assignments_(false),
-    this_property_assignments_(isolate->factory()->empty_fixed_array()),
-    parser_(parser),
-    lexical_scope_parent_(parser->lexical_scope_),
-    previous_scope_(parser->top_scope_),
-    previous_with_nesting_level_(parser->with_nesting_level_),
-    previous_ast_node_id_(isolate->ast_node_id()) {
+Parser::FunctionState::FunctionState(Parser* parser,
+                                     Scope* scope,
+                                     Isolate* isolate)
+    : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+      next_handler_index_(0),
+      expected_property_count_(0),
+      only_simple_this_property_assignments_(false),
+      this_property_assignments_(isolate->factory()->empty_fixed_array()),
+      parser_(parser),
+      outer_function_state_(parser->current_function_state_),
+      outer_scope_(parser->top_scope_),
+      saved_ast_node_id_(isolate->ast_node_id()),
+      factory_(isolate) {
   parser->top_scope_ = scope;
-  parser->lexical_scope_ = this;
-  parser->with_nesting_level_ = 0;
+  parser->current_function_state_ = this;
   isolate->set_ast_node_id(AstNode::kDeclarationsId + 1);
 }
 
 
-LexicalScope::~LexicalScope() {
-  parser_->top_scope_ = previous_scope_;
-  parser_->lexical_scope_ = lexical_scope_parent_;
-  parser_->with_nesting_level_ = previous_with_nesting_level_;
-  parser_->isolate()->set_ast_node_id(previous_ast_node_id_);
+Parser::FunctionState::~FunctionState() {
+  parser_->top_scope_ = outer_scope_;
+  parser_->current_function_state_ = outer_function_state_;
+  if (outer_function_state_ != NULL) {
+    parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
+  }
 }
 
 
@@ -570,34 +533,40 @@
 // Implementation of Parser
 
 Parser::Parser(Handle<Script> script,
-               bool allow_natives_syntax,
+               int parser_flags,
                v8::Extension* extension,
                ScriptDataImpl* pre_data)
     : isolate_(script->GetIsolate()),
       symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
       script_(script),
       scanner_(isolate_->unicode_cache()),
+      reusable_preparser_(NULL),
       top_scope_(NULL),
-      with_nesting_level_(0),
-      lexical_scope_(NULL),
+      current_function_state_(NULL),
       target_stack_(NULL),
-      allow_natives_syntax_(allow_natives_syntax),
       extension_(extension),
       pre_data_(pre_data),
       fni_(NULL),
+      allow_natives_syntax_((parser_flags & kAllowNativesSyntax) != 0),
+      allow_lazy_((parser_flags & kAllowLazy) != 0),
+      allow_modules_((parser_flags & kAllowModules) != 0),
       stack_overflow_(false),
-      parenthesized_function_(false),
-      harmony_block_scoping_(false) {
-  AstNode::ResetIds();
+      parenthesized_function_(false) {
+  isolate_->set_ast_node_id(0);
+  if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) {
+    scanner().SetHarmonyScoping(true);
+  }
+  if ((parser_flags & kAllowModules) != 0) {
+    scanner().SetHarmonyModules(true);
+  }
 }
 
 
-FunctionLiteral* Parser::ParseProgram(Handle<String> source,
-                                      bool in_global_context,
-                                      StrictModeFlag strict_mode) {
+FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
   ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
 
   HistogramTimerScope timer(isolate()->counters()->parse());
+  Handle<String> source(String::cast(script_->source()));
   isolate()->counters()->total_parse_size()->Increment(source->length());
   fni_ = new(zone()) FuncNameInferrer(isolate());
 
@@ -607,68 +576,74 @@
     // Notice that the stream is destroyed at the end of the branch block.
     // The last line of the blocks can't be moved outside, even though they're
     // identical calls.
-    ExternalTwoByteStringUC16CharacterStream stream(
+    ExternalTwoByteStringUtf16CharacterStream stream(
         Handle<ExternalTwoByteString>::cast(source), 0, source->length());
     scanner_.Initialize(&stream);
-    return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
+    return DoParseProgram(info, source, &zone_scope);
   } else {
-    GenericStringUC16CharacterStream stream(source, 0, source->length());
+    GenericStringUtf16CharacterStream stream(source, 0, source->length());
     scanner_.Initialize(&stream);
-    return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
+    return DoParseProgram(info, source, &zone_scope);
   }
 }
 
 
-FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
-                                        bool in_global_context,
-                                        StrictModeFlag strict_mode,
+FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
+                                        Handle<String> source,
                                         ZoneScope* zone_scope) {
+  ASSERT(top_scope_ == NULL);
   ASSERT(target_stack_ == NULL);
   if (pre_data_ != NULL) pre_data_->Initialize();
 
   // Compute the parsing mode.
-  mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
+  mode_ = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
   if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
 
-  Scope::Type type =
-    in_global_context
-      ? Scope::GLOBAL_SCOPE
-      : Scope::EVAL_SCOPE;
   Handle<String> no_name = isolate()->factory()->empty_symbol();
 
   FunctionLiteral* result = NULL;
-  { Scope* scope = NewScope(top_scope_, type, inside_with());
-    LexicalScope lexical_scope(this, scope, isolate());
-    if (strict_mode == kStrictMode) {
-      top_scope_->EnableStrictMode();
+  { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+    info->SetGlobalScope(scope);
+    if (info->is_eval()) {
+      Handle<SharedFunctionInfo> shared = info->shared_info();
+      if (!info->is_global() && (shared.is_null() || shared->is_function())) {
+        scope = Scope::DeserializeScopeChain(*info->calling_context(), scope);
+      }
+      if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
+        scope = NewScope(scope, EVAL_SCOPE);
+      }
     }
+    scope->set_start_position(0);
+    scope->set_end_position(source->length());
+    FunctionState function_state(this, scope, isolate());
+    top_scope_->SetLanguageMode(info->language_mode());
     ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
     bool ok = true;
     int beg_loc = scanner().location().beg_pos;
-    ParseSourceElements(body, Token::EOS, &ok);
-    if (ok && top_scope_->is_strict_mode()) {
+    ParseSourceElements(body, Token::EOS, info->is_eval(), &ok);
+    if (ok && !top_scope_->is_classic_mode()) {
       CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
     }
 
-    if (ok && harmony_block_scoping_) {
-      CheckConflictingVarDeclarations(scope, &ok);
+    if (ok && is_extended_mode()) {
+      CheckConflictingVarDeclarations(top_scope_, &ok);
     }
 
     if (ok) {
-      result = new(zone()) FunctionLiteral(
-          isolate(),
+      result = factory()->NewFunctionLiteral(
           no_name,
           top_scope_,
           body,
-          lexical_scope.materialized_literal_count(),
-          lexical_scope.expected_property_count(),
-          lexical_scope.only_simple_this_property_assignments(),
-          lexical_scope.this_property_assignments(),
+          function_state.materialized_literal_count(),
+          function_state.expected_property_count(),
+          function_state.handler_count(),
+          function_state.only_simple_this_property_assignments(),
+          function_state.this_property_assignments(),
           0,
-          0,
-          source->length(),
+          FunctionLiteral::kNoDuplicateParameters,
           FunctionLiteral::ANONYMOUS_EXPRESSION,
-          false);  // Does not have duplicate parameters.
+          FunctionLiteral::kGlobalOrEval);
+      result->set_ast_properties(factory()->visitor()->ast_properties());
     } else if (stack_overflow_) {
       isolate()->StackOverflow();
     }
@@ -683,6 +658,7 @@
   return result;
 }
 
+
 FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
   ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
   HistogramTimerScope timer(isolate()->counters()->parse_lazy());
@@ -693,16 +669,16 @@
   // Initialize parser state.
   source->TryFlatten();
   if (source->IsExternalTwoByteString()) {
-    ExternalTwoByteStringUC16CharacterStream stream(
+    ExternalTwoByteStringUtf16CharacterStream stream(
         Handle<ExternalTwoByteString>::cast(source),
         shared_info->start_position(),
         shared_info->end_position());
     FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
     return result;
   } else {
-    GenericStringUC16CharacterStream stream(source,
-                                            shared_info->start_position(),
-                                            shared_info->end_position());
+    GenericStringUtf16CharacterStream stream(source,
+                                             shared_info->start_position(),
+                                             shared_info->end_position());
     FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
     return result;
   }
@@ -710,10 +686,11 @@
 
 
 FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
-                                   UC16CharacterStream* source,
+                                   Utf16CharacterStream* source,
                                    ZoneScope* zone_scope) {
   Handle<SharedFunctionInfo> shared_info = info->shared_info();
   scanner_.Initialize(source);
+  ASSERT(top_scope_ == NULL);
   ASSERT(target_stack_ == NULL);
 
   Handle<String> name(String::cast(shared_info->name()));
@@ -727,16 +704,17 @@
 
   {
     // Parse the function literal.
-    Scope* scope = NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
+    Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+    info->SetGlobalScope(scope);
     if (!info->closure().is_null()) {
-      scope = Scope::DeserializeScopeChain(info, scope);
+      scope = Scope::DeserializeScopeChain(info->closure()->context(), scope);
     }
-    LexicalScope lexical_scope(this, scope, isolate());
-
-    if (shared_info->strict_mode()) {
-      top_scope_->EnableStrictMode();
-    }
-
+    FunctionState function_state(this, scope, isolate());
+    ASSERT(scope->language_mode() != STRICT_MODE || !info->is_classic_mode());
+    ASSERT(scope->language_mode() != EXTENDED_MODE ||
+           info->is_extended_mode());
+    ASSERT(info->language_mode() == shared_info->language_mode());
+    scope->SetLanguageMode(shared_info->language_mode());
     FunctionLiteral::Type type = shared_info->is_expression()
         ? (shared_info->is_anonymous()
               ? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -783,6 +761,12 @@
 }
 
 
+void Parser::ReportMessage(const char* type, Vector<Handle<String> > args) {
+  Scanner::Location source_location = scanner().location();
+  ReportMessageAt(source_location, type, args);
+}
+
+
 void Parser::ReportMessageAt(Scanner::Location source_location,
                              const char* type,
                              Vector<const char*> args) {
@@ -817,10 +801,6 @@
   isolate()->Throw(*result, &location);
 }
 
-void Parser::SetHarmonyBlockScoping(bool block_scoping) {
-  scanner().SetHarmonyBlockScoping(block_scoping);
-  harmony_block_scoping_ = block_scoping;
-}
 
 // Base class containing common code for the different finder classes used by
 // the parser.
@@ -957,17 +937,18 @@
 };
 
 
-// A ThisNamedPropertyAssigmentFinder finds and marks statements of the form
+// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form
 // this.x = ...;, where x is a named property. It also determines whether a
 // function contains only assignments of this type.
-class ThisNamedPropertyAssigmentFinder : public ParserFinder {
+class ThisNamedPropertyAssignmentFinder : public ParserFinder {
  public:
-  explicit ThisNamedPropertyAssigmentFinder(Isolate* isolate)
+  explicit ThisNamedPropertyAssignmentFinder(Isolate* isolate)
       : isolate_(isolate),
         only_simple_this_property_assignments_(true),
-        names_(NULL),
-        assigned_arguments_(NULL),
-        assigned_constants_(NULL) {}
+        names_(0),
+        assigned_arguments_(0),
+        assigned_constants_(0) {
+  }
 
   void Update(Scope* scope, Statement* stat) {
     // Bail out if function already has property assignment that are
@@ -994,19 +975,17 @@
   // Returns a fixed array containing three elements for each assignment of the
   // form this.x = y;
   Handle<FixedArray> GetThisPropertyAssignments() {
-    if (names_ == NULL) {
+    if (names_.is_empty()) {
       return isolate_->factory()->empty_fixed_array();
     }
-    ASSERT(names_ != NULL);
-    ASSERT(assigned_arguments_ != NULL);
-    ASSERT_EQ(names_->length(), assigned_arguments_->length());
-    ASSERT_EQ(names_->length(), assigned_constants_->length());
+    ASSERT_EQ(names_.length(), assigned_arguments_.length());
+    ASSERT_EQ(names_.length(), assigned_constants_.length());
     Handle<FixedArray> assignments =
-        isolate_->factory()->NewFixedArray(names_->length() * 3);
-    for (int i = 0; i < names_->length(); i++) {
-      assignments->set(i * 3, *names_->at(i));
-      assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i)));
-      assignments->set(i * 3 + 2, *assigned_constants_->at(i));
+        isolate_->factory()->NewFixedArray(names_.length() * 3);
+    for (int i = 0; i < names_.length(); ++i) {
+      assignments->set(i * 3, *names_[i]);
+      assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_[i]));
+      assignments->set(i * 3 + 2, *assigned_constants_[i]);
     }
     return assignments;
   }
@@ -1063,18 +1042,37 @@
     AssignmentFromSomethingElse();
   }
 
+
+
+
+  // We will potentially reorder the property assignments, so they must be
+  // simple enough that the ordering does not matter.
   void AssignmentFromParameter(Handle<String> name, int index) {
-    EnsureAllocation();
-    names_->Add(name);
-    assigned_arguments_->Add(index);
-    assigned_constants_->Add(isolate_->factory()->undefined_value());
+    EnsureInitialized();
+    for (int i = 0; i < names_.length(); ++i) {
+      if (name->Equals(*names_[i])) {
+        assigned_arguments_[i] = index;
+        assigned_constants_[i] = isolate_->factory()->undefined_value();
+        return;
+      }
+    }
+    names_.Add(name);
+    assigned_arguments_.Add(index);
+    assigned_constants_.Add(isolate_->factory()->undefined_value());
   }
 
   void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
-    EnsureAllocation();
-    names_->Add(name);
-    assigned_arguments_->Add(-1);
-    assigned_constants_->Add(value);
+    EnsureInitialized();
+    for (int i = 0; i < names_.length(); ++i) {
+      if (name->Equals(*names_[i])) {
+        assigned_arguments_[i] = -1;
+        assigned_constants_[i] = value;
+        return;
+      }
+    }
+    names_.Add(name);
+    assigned_arguments_.Add(-1);
+    assigned_constants_.Add(value);
   }
 
   void AssignmentFromSomethingElse() {
@@ -1082,49 +1080,30 @@
     only_simple_this_property_assignments_ = false;
   }
 
-  void EnsureAllocation() {
-    if (names_ == NULL) {
-      ASSERT(assigned_arguments_ == NULL);
-      ASSERT(assigned_constants_ == NULL);
-      Zone* zone = isolate_->zone();
-      names_ = new(zone) ZoneStringList(4);
-      assigned_arguments_ = new(zone) ZoneList<int>(4);
-      assigned_constants_ = new(zone) ZoneObjectList(4);
+  void EnsureInitialized() {
+    if (names_.capacity() == 0) {
+      ASSERT(assigned_arguments_.capacity() == 0);
+      ASSERT(assigned_constants_.capacity() == 0);
+      names_.Initialize(4);
+      assigned_arguments_.Initialize(4);
+      assigned_constants_.Initialize(4);
     }
   }
 
   Isolate* isolate_;
   bool only_simple_this_property_assignments_;
-  ZoneStringList* names_;
-  ZoneList<int>* assigned_arguments_;
-  ZoneObjectList* assigned_constants_;
+  ZoneStringList names_;
+  ZoneList<int> assigned_arguments_;
+  ZoneObjectList assigned_constants_;
 };
 
 
-Statement* Parser::ParseSourceElement(ZoneStringList* labels,
-                                      bool* ok) {
-  if (peek() == Token::FUNCTION) {
-    // FunctionDeclaration is only allowed in the context of SourceElements
-    // (Ecma 262 5th Edition, clause 14):
-    // SourceElement:
-    //    Statement
-    //    FunctionDeclaration
-    // Common language extension is to allow function declaration in place
-    // of any statement. This language extension is disabled in strict mode.
-    return ParseFunctionDeclaration(ok);
-  } else if (peek() == Token::LET) {
-    return ParseVariableStatement(kSourceElement, ok);
-  } else {
-    return ParseStatement(labels, ok);
-  }
-}
-
-
 void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
                                   int end_token,
+                                  bool is_eval,
                                   bool* ok) {
   // SourceElements ::
-  //   (Statement)* <end_token>
+  //   (ModuleElement)* <end_token>
 
   // Allocate a target stack to use for this set of source
   // elements. This way, all scripts and functions get their own
@@ -1134,7 +1113,7 @@
 
   ASSERT(processor != NULL);
   InitializationBlockFinder block_finder(top_scope_, target_stack_);
-  ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate());
+  ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate());
   bool directive_prologue = true;     // Parsing directive prologue.
 
   while (peek() != end_token) {
@@ -1143,7 +1122,7 @@
     }
 
     Scanner::Location token_loc = scanner().peek_location();
-    Statement* stat = ParseSourceElement(NULL, CHECK_OK);
+    Statement* stat = ParseModuleElement(NULL, CHECK_OK);
     if (stat == NULL || stat->IsEmpty()) {
       directive_prologue = false;   // End of directive prologue.
       continue;
@@ -1151,8 +1130,8 @@
 
     if (directive_prologue) {
       // A shot at a directive.
-      ExpressionStatement *e_stat;
-      Literal *literal;
+      ExpressionStatement* e_stat;
+      Literal* literal;
       // Still processing directive prologue?
       if ((e_stat = stat->AsExpressionStatement()) != NULL &&
           (literal = e_stat->expression()->AsLiteral()) != NULL &&
@@ -1160,11 +1139,24 @@
         Handle<String> directive = Handle<String>::cast(literal->handle());
 
         // Check "use strict" directive (ES5 14.1).
-        if (!top_scope_->is_strict_mode() &&
+        if (top_scope_->is_classic_mode() &&
             directive->Equals(isolate()->heap()->use_strict()) &&
             token_loc.end_pos - token_loc.beg_pos ==
               isolate()->heap()->use_strict()->length() + 2) {
-          top_scope_->EnableStrictMode();
+          // TODO(mstarzinger): Global strict eval calls, need their own scope
+          // as specified in ES5 10.4.2(3). The correct fix would be to always
+          // add this scope in DoParseProgram(), but that requires adaptations
+          // all over the code base, so we go with a quick-fix for now.
+          if (is_eval && !top_scope_->is_eval_scope()) {
+            ASSERT(top_scope_->is_global_scope());
+            Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
+            scope->set_start_position(top_scope_->start_position());
+            scope->set_end_position(top_scope_->end_position());
+            top_scope_ = scope;
+          }
+          // TODO(ES6): Fix entering extended mode, once it is specified.
+          top_scope_->SetLanguageMode(FLAG_harmony_scoping
+                                      ? EXTENDED_MODE : STRICT_MODE);
           // "use strict" is the only directive for now.
           directive_prologue = false;
         }
@@ -1188,15 +1180,394 @@
         this_property_assignment_finder.only_simple_this_property_assignments()
         && top_scope_->declarations()->length() == 0;
     if (only_simple_this_property_assignments) {
-      lexical_scope_->SetThisPropertyAssignmentInfo(
+      current_function_state_->SetThisPropertyAssignmentInfo(
           only_simple_this_property_assignments,
           this_property_assignment_finder.GetThisPropertyAssignments());
     }
   }
+
   return 0;
 }
 
 
+Statement* Parser::ParseModuleElement(ZoneStringList* labels,
+                                      bool* ok) {
+  // (Ecma 262 5th Edition, clause 14):
+  // SourceElement:
+  //    Statement
+  //    FunctionDeclaration
+  //
+  // In harmony mode we allow additionally the following productions
+  // ModuleElement:
+  //    LetDeclaration
+  //    ConstDeclaration
+  //    ModuleDeclaration
+  //    ImportDeclaration
+  //    ExportDeclaration
+
+  switch (peek()) {
+    case Token::FUNCTION:
+      return ParseFunctionDeclaration(NULL, ok);
+    case Token::LET:
+    case Token::CONST:
+      return ParseVariableStatement(kModuleElement, NULL, ok);
+    case Token::IMPORT:
+      return ParseImportDeclaration(ok);
+    case Token::EXPORT:
+      return ParseExportDeclaration(ok);
+    default: {
+      Statement* stmt = ParseStatement(labels, CHECK_OK);
+      // Handle 'module' as a context-sensitive keyword.
+      if (FLAG_harmony_modules &&
+          peek() == Token::IDENTIFIER &&
+          !scanner().HasAnyLineTerminatorBeforeNext() &&
+          stmt != NULL) {
+        ExpressionStatement* estmt = stmt->AsExpressionStatement();
+        if (estmt != NULL &&
+            estmt->expression()->AsVariableProxy() != NULL &&
+            estmt->expression()->AsVariableProxy()->name()->Equals(
+                isolate()->heap()->module_symbol()) &&
+            !scanner().literal_contains_escapes()) {
+          return ParseModuleDeclaration(NULL, ok);
+        }
+      }
+      return stmt;
+    }
+  }
+}
+
+
+Block* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
+  // ModuleDeclaration:
+  //    'module' Identifier Module
+
+  // Create new block with one expected declaration.
+  Block* block = factory()->NewBlock(NULL, 1, true);
+  Handle<String> name = ParseIdentifier(CHECK_OK);
+
+#ifdef DEBUG
+  if (FLAG_print_interface_details)
+    PrintF("# Module %s...\n", name->ToAsciiArray());
+#endif
+
+  Module* module = ParseModule(CHECK_OK);
+  VariableProxy* proxy = NewUnresolved(name, LET, module->interface());
+  Declaration* declaration =
+      factory()->NewModuleDeclaration(proxy, module, top_scope_);
+  Declare(declaration, true, CHECK_OK);
+
+#ifdef DEBUG
+  if (FLAG_print_interface_details)
+    PrintF("# Module %s.\n", name->ToAsciiArray());
+
+  if (FLAG_print_interfaces) {
+    PrintF("module %s : ", name->ToAsciiArray());
+    module->interface()->Print();
+  }
+#endif
+
+  // TODO(rossberg): Add initialization statement to block.
+
+  if (names) names->Add(name);
+  return block;
+}
+
+
+Module* Parser::ParseModule(bool* ok) {
+  // Module:
+  //    '{' ModuleElement '}'
+  //    '=' ModulePath ';'
+  //    'at' String ';'
+
+  switch (peek()) {
+    case Token::LBRACE:
+      return ParseModuleLiteral(ok);
+
+    case Token::ASSIGN: {
+      Expect(Token::ASSIGN, CHECK_OK);
+      Module* result = ParseModulePath(CHECK_OK);
+      ExpectSemicolon(CHECK_OK);
+      return result;
+    }
+
+    default: {
+      ExpectContextualKeyword("at", CHECK_OK);
+      Module* result = ParseModuleUrl(CHECK_OK);
+      ExpectSemicolon(CHECK_OK);
+      return result;
+    }
+  }
+}
+
+
+Module* Parser::ParseModuleLiteral(bool* ok) {
+  // Module:
+  //    '{' ModuleElement '}'
+
+  // Construct block expecting 16 statements.
+  Block* body = factory()->NewBlock(NULL, 16, false);
+#ifdef DEBUG
+  if (FLAG_print_interface_details) PrintF("# Literal ");
+#endif
+  Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+
+  Expect(Token::LBRACE, CHECK_OK);
+  scope->set_start_position(scanner().location().beg_pos);
+  scope->SetLanguageMode(EXTENDED_MODE);
+
+  {
+    BlockState block_state(this, scope);
+    TargetCollector collector;
+    Target target(&this->target_stack_, &collector);
+    Target target_body(&this->target_stack_, body);
+    InitializationBlockFinder block_finder(top_scope_, target_stack_);
+
+    while (peek() != Token::RBRACE) {
+      Statement* stat = ParseModuleElement(NULL, CHECK_OK);
+      if (stat && !stat->IsEmpty()) {
+        body->AddStatement(stat);
+        block_finder.Update(stat);
+      }
+    }
+  }
+
+  Expect(Token::RBRACE, CHECK_OK);
+  scope->set_end_position(scanner().location().end_pos);
+  body->set_block_scope(scope);
+
+  scope->interface()->Freeze(ok);
+  ASSERT(ok);
+  return factory()->NewModuleLiteral(body, scope->interface());
+}
+
+
+Module* Parser::ParseModulePath(bool* ok) {
+  // ModulePath:
+  //    Identifier
+  //    ModulePath '.' Identifier
+
+  Module* result = ParseModuleVariable(CHECK_OK);
+  while (Check(Token::PERIOD)) {
+    Handle<String> name = ParseIdentifierName(CHECK_OK);
+#ifdef DEBUG
+    if (FLAG_print_interface_details)
+      PrintF("# Path .%s ", name->ToAsciiArray());
+#endif
+    Module* member = factory()->NewModulePath(result, name);
+    result->interface()->Add(name, member->interface(), ok);
+    if (!*ok) {
+#ifdef DEBUG
+      if (FLAG_print_interfaces) {
+        PrintF("PATH TYPE ERROR at '%s'\n", name->ToAsciiArray());
+        PrintF("result: ");
+        result->interface()->Print();
+        PrintF("member: ");
+        member->interface()->Print();
+      }
+#endif
+      ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
+      return NULL;
+    }
+    result = member;
+  }
+
+  return result;
+}
+
+
+Module* Parser::ParseModuleVariable(bool* ok) {
+  // ModulePath:
+  //    Identifier
+
+  Handle<String> name = ParseIdentifier(CHECK_OK);
+#ifdef DEBUG
+  if (FLAG_print_interface_details)
+    PrintF("# Module variable %s ", name->ToAsciiArray());
+#endif
+  VariableProxy* proxy = top_scope_->NewUnresolved(
+      factory(), name, scanner().location().beg_pos, Interface::NewModule());
+
+  return factory()->NewModuleVariable(proxy);
+}
+
+
+Module* Parser::ParseModuleUrl(bool* ok) {
+  // Module:
+  //    String
+
+  Expect(Token::STRING, CHECK_OK);
+  Handle<String> symbol = GetSymbol(CHECK_OK);
+
+  // TODO(ES6): Request JS resource from environment...
+
+#ifdef DEBUG
+  if (FLAG_print_interface_details) PrintF("# Url ");
+#endif
+  return factory()->NewModuleUrl(symbol);
+}
+
+
+Module* Parser::ParseModuleSpecifier(bool* ok) {
+  // ModuleSpecifier:
+  //    String
+  //    ModulePath
+
+  if (peek() == Token::STRING) {
+    return ParseModuleUrl(ok);
+  } else {
+    return ParseModulePath(ok);
+  }
+}
+
+
+Block* Parser::ParseImportDeclaration(bool* ok) {
+  // ImportDeclaration:
+  //    'import' IdentifierName (',' IdentifierName)* 'from' ModuleSpecifier ';'
+  //
+  // TODO(ES6): implement destructuring ImportSpecifiers
+
+  Expect(Token::IMPORT, CHECK_OK);
+  ZoneStringList names(1);
+
+  Handle<String> name = ParseIdentifierName(CHECK_OK);
+  names.Add(name);
+  while (peek() == Token::COMMA) {
+    Consume(Token::COMMA);
+    name = ParseIdentifierName(CHECK_OK);
+    names.Add(name);
+  }
+
+  ExpectContextualKeyword("from", CHECK_OK);
+  Module* module = ParseModuleSpecifier(CHECK_OK);
+  ExpectSemicolon(CHECK_OK);
+
+  // Generate a separate declaration for each identifier.
+  // TODO(ES6): once we implement destructuring, make that one declaration.
+  Block* block = factory()->NewBlock(NULL, 1, true);
+  for (int i = 0; i < names.length(); ++i) {
+#ifdef DEBUG
+    if (FLAG_print_interface_details)
+      PrintF("# Import %s ", names[i]->ToAsciiArray());
+#endif
+    Interface* interface = Interface::NewUnknown();
+    module->interface()->Add(names[i], interface, ok);
+    if (!*ok) {
+#ifdef DEBUG
+      if (FLAG_print_interfaces) {
+        PrintF("IMPORT TYPE ERROR at '%s'\n", names[i]->ToAsciiArray());
+        PrintF("module: ");
+        module->interface()->Print();
+      }
+#endif
+      ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
+      return NULL;
+    }
+    VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
+    Declaration* declaration =
+        factory()->NewImportDeclaration(proxy, module, top_scope_);
+    Declare(declaration, true, CHECK_OK);
+    // TODO(rossberg): Add initialization statement to block.
+  }
+
+  return block;
+}
+
+
+Statement* Parser::ParseExportDeclaration(bool* ok) {
+  // ExportDeclaration:
+  //    'export' Identifier (',' Identifier)* ';'
+  //    'export' VariableDeclaration
+  //    'export' FunctionDeclaration
+  //    'export' ModuleDeclaration
+  //
+  // TODO(ES6): implement structuring ExportSpecifiers
+
+  Expect(Token::EXPORT, CHECK_OK);
+
+  Statement* result = NULL;
+  ZoneStringList names(1);
+  switch (peek()) {
+    case Token::IDENTIFIER: {
+      Handle<String> name = ParseIdentifier(CHECK_OK);
+      // Handle 'module' as a context-sensitive keyword.
+      if (!name->IsEqualTo(CStrVector("module"))) {
+        names.Add(name);
+        while (peek() == Token::COMMA) {
+          Consume(Token::COMMA);
+          name = ParseIdentifier(CHECK_OK);
+          names.Add(name);
+        }
+        ExpectSemicolon(CHECK_OK);
+        result = factory()->NewEmptyStatement();
+      } else {
+        result = ParseModuleDeclaration(&names, CHECK_OK);
+      }
+      break;
+    }
+
+    case Token::FUNCTION:
+      result = ParseFunctionDeclaration(&names, CHECK_OK);
+      break;
+
+    case Token::VAR:
+    case Token::LET:
+    case Token::CONST:
+      result = ParseVariableStatement(kModuleElement, &names, CHECK_OK);
+      break;
+
+    default:
+      *ok = false;
+      ReportUnexpectedToken(scanner().current_token());
+      return NULL;
+  }
+
+  // Extract declared names into export declarations and interface.
+  Interface* interface = top_scope_->interface();
+  for (int i = 0; i < names.length(); ++i) {
+#ifdef DEBUG
+    if (FLAG_print_interface_details)
+      PrintF("# Export %s ", names[i]->ToAsciiArray());
+#endif
+    Interface* inner = Interface::NewUnknown();
+    interface->Add(names[i], inner, CHECK_OK);
+    VariableProxy* proxy = NewUnresolved(names[i], LET, inner);
+    USE(proxy);
+    // TODO(rossberg): Rethink whether we actually need to store export
+    // declarations (for compilation?).
+    // ExportDeclaration* declaration =
+    //     factory()->NewExportDeclaration(proxy, top_scope_);
+    // top_scope_->AddDeclaration(declaration);
+  }
+
+  ASSERT(result != NULL);
+  return result;
+}
+
+
+Statement* Parser::ParseBlockElement(ZoneStringList* labels,
+                                     bool* ok) {
+  // (Ecma 262 5th Edition, clause 14):
+  // SourceElement:
+  //    Statement
+  //    FunctionDeclaration
+  //
+  // In harmony mode we allow additionally the following productions
+  // BlockElement (aka SourceElement):
+  //    LetDeclaration
+  //    ConstDeclaration
+
+  switch (peek()) {
+    case Token::FUNCTION:
+      return ParseFunctionDeclaration(NULL, ok);
+    case Token::LET:
+    case Token::CONST:
+      return ParseVariableStatement(kModuleElement, NULL, ok);
+    default:
+      return ParseStatement(labels, ok);
+  }
+}
+
+
 Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
   // Statement ::
   //   Block
@@ -1230,13 +1601,14 @@
       return ParseBlock(labels, ok);
 
     case Token::CONST:  // fall through
+    case Token::LET:
     case Token::VAR:
-      stmt = ParseVariableStatement(kStatement, ok);
+      stmt = ParseVariableStatement(kStatement, NULL, ok);
       break;
 
     case Token::SEMICOLON:
       Next();
-      return EmptyStatement();
+      return factory()->NewEmptyStatement();
 
     case Token::IF:
       stmt = ParseIfStatement(labels, ok);
@@ -1284,7 +1656,7 @@
       // one must take great care not to treat it as a
       // fall-through. It is much easier just to wrap the entire
       // try-statement in a statement block and put the labels there
-      Block* result = new(zone()) Block(isolate(), labels, 1, false);
+      Block* result = factory()->NewBlock(labels, 1, false);
       Target target(&this->target_stack_, result);
       TryStatement* statement = ParseTryStatement(CHECK_OK);
       if (statement) {
@@ -1295,15 +1667,20 @@
     }
 
     case Token::FUNCTION: {
-      // In strict mode, FunctionDeclaration is only allowed in the context
-      // of SourceElements.
-      if (top_scope_->is_strict_mode()) {
+      // FunctionDeclaration is only allowed in the context of SourceElements
+      // (Ecma 262 5th Edition, clause 14):
+      // SourceElement:
+      //    Statement
+      //    FunctionDeclaration
+      // Common language extension is to allow function declaration in place
+      // of any statement. This language extension is disabled in strict mode.
+      if (!top_scope_->is_classic_mode()) {
         ReportMessageAt(scanner().peek_location(), "strict_function",
                         Vector<const char*>::empty());
         *ok = false;
         return NULL;
       }
-      return ParseFunctionDeclaration(ok);
+      return ParseFunctionDeclaration(NULL, ok);
     }
 
     case Token::DEBUGGER:
@@ -1320,15 +1697,24 @@
 }
 
 
-VariableProxy* Parser::Declare(Handle<String> name,
-                               Variable::Mode mode,
-                               FunctionLiteral* fun,
-                               bool resolve,
-                               bool* ok) {
-  Variable* var = NULL;
+VariableProxy* Parser::NewUnresolved(
+    Handle<String> name, VariableMode mode, Interface* interface) {
   // If we are inside a function, a declaration of a var/const variable is a
   // truly local variable, and the scope of the variable is always the function
   // scope.
+  // Let/const variables in harmony mode are always added to the immediately
+  // enclosing scope.
+  return DeclarationScope(mode)->NewUnresolved(
+      factory(), name, scanner().location().beg_pos, interface);
+}
+
+
+void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
+  VariableProxy* proxy = declaration->proxy();
+  Handle<String> name = proxy->name();
+  VariableMode mode = declaration->mode();
+  Scope* declaration_scope = DeclarationScope(mode);
+  Variable* var = NULL;
 
   // If a function scope exists, then we can statically declare this
   // variable and also set its mode. In any case, a Declaration node
@@ -1338,17 +1724,19 @@
   // to the calling function context.
   // Similarly, strict mode eval scope does not leak variable declarations to
   // the caller's scope so we declare all locals, too.
-
-  Scope* declaration_scope = mode == Variable::LET ? top_scope_
-      : top_scope_->DeclarationScope();
+  // Also for block scoped let/const bindings the variable can be
+  // statically declared.
   if (declaration_scope->is_function_scope() ||
-      declaration_scope->is_strict_mode_eval_scope() ||
-      declaration_scope->is_block_scope()) {
+      declaration_scope->is_strict_or_extended_eval_scope() ||
+      declaration_scope->is_block_scope() ||
+      declaration_scope->is_module_scope() ||
+      declaration->AsModuleDeclaration() != NULL) {
     // Declare the variable in the function scope.
     var = declaration_scope->LocalLookup(name);
     if (var == NULL) {
       // Declare the name.
-      var = declaration_scope->DeclareLocal(name, mode);
+      var = declaration_scope->DeclareLocal(
+          name, mode, declaration->initialization(), proxy->interface());
     } else {
       // The name was declared in this scope before; check for conflicting
       // re-declarations. We have a conflict if either of the declarations is
@@ -1361,12 +1749,13 @@
       //
       // because the var declaration is hoisted to the function scope where 'x'
       // is already bound.
-      if ((mode != Variable::VAR) || (var->mode() != Variable::VAR)) {
+      if ((mode != VAR) || (var->mode() != VAR)) {
         // We only have vars, consts and lets in declarations.
-        ASSERT(var->mode() == Variable::VAR ||
-               var->mode() == Variable::CONST ||
-               var->mode() == Variable::LET);
-        if (harmony_block_scoping_) {
+        ASSERT(var->mode() == VAR ||
+               var->mode() == CONST ||
+               var->mode() == CONST_HARMONY ||
+               var->mode() == LET);
+        if (is_extended_mode()) {
           // In harmony mode we treat re-declarations as early errors. See
           // ES5 16 for a definition of early errors.
           SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
@@ -1374,10 +1763,10 @@
           Vector<const char*> args(elms, 2);
           ReportMessage("redeclaration", args);
           *ok = false;
-          return NULL;
+          return;
         }
-        const char* type = (var->mode() == Variable::VAR) ? "var" :
-                           (var->mode() == Variable::CONST) ? "const" : "let";
+        const char* type = (var->mode() == VAR)
+            ? "var" : var->is_const_mode() ? "const" : "let";
         Handle<String> type_string =
             isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
         Expression* expression =
@@ -1404,20 +1793,33 @@
   // semantic issue as long as we keep the source order, but it may be
   // a performance issue since it may lead to repeated
   // Runtime::DeclareContextSlot() calls.
-  VariableProxy* proxy = declaration_scope->NewUnresolved(
-      name, false, scanner().location().beg_pos);
-  declaration_scope->AddDeclaration(
-      new(zone()) Declaration(proxy, mode, fun, top_scope_));
+  declaration_scope->AddDeclaration(declaration);
 
-  // For global const variables we bind the proxy to a variable.
-  if (mode == Variable::CONST && declaration_scope->is_global_scope()) {
+  if ((mode == CONST || mode == CONST_HARMONY) &&
+      declaration_scope->is_global_scope()) {
+    // For global const variables we bind the proxy to a variable.
     ASSERT(resolve);  // should be set by all callers
     Variable::Kind kind = Variable::NORMAL;
     var = new(zone()) Variable(declaration_scope,
                                name,
-                               Variable::CONST,
+                               mode,
                                true,
-                               kind);
+                               kind,
+                               kNeedsInitialization);
+  } else if (declaration_scope->is_eval_scope() &&
+             declaration_scope->is_classic_mode()) {
+    // For variable declarations in a non-strict eval scope the proxy is bound
+    // to a lookup variable to force a dynamic declaration using the
+    // DeclareContextSlot runtime function.
+    Variable::Kind kind = Variable::NORMAL;
+    var = new(zone()) Variable(declaration_scope,
+                               name,
+                               mode,
+                               true,
+                               kind,
+                               declaration->initialization());
+    var->AllocateTo(Variable::LOOKUP, -1);
+    resolve = true;
   }
 
   // If requested and we have a local variable, bind the proxy to the variable
@@ -1444,9 +1846,30 @@
   // initialization code. Thus, inside the 'with' statement, we need
   // both access to the static and the dynamic context chain; the
   // runtime needs to provide both.
-  if (resolve && var != NULL) proxy->BindTo(var);
+  if (resolve && var != NULL) {
+    proxy->BindTo(var);
 
-  return proxy;
+    if (FLAG_harmony_modules) {
+      bool ok;
+#ifdef DEBUG
+      if (FLAG_print_interface_details)
+        PrintF("# Declare %s\n", var->name()->ToAsciiArray());
+#endif
+      proxy->interface()->Unify(var->interface(), &ok);
+      if (!ok) {
+#ifdef DEBUG
+        if (FLAG_print_interfaces) {
+          PrintF("DECLARE TYPE ERROR\n");
+          PrintF("proxy: ");
+          proxy->interface()->Print();
+          PrintF("var: ");
+          var->interface()->Print();
+        }
+#endif
+        ReportMessage("module_type_error", Vector<Handle<String> >(&name, 1));
+      }
+    }
+  }
 }
 
 
@@ -1473,7 +1896,7 @@
   // isn't lazily compiled. The extension structures are only
   // accessible while parsing the first time not when reparsing
   // because of lazy compilation.
-  top_scope_->DeclarationScope()->ForceEagerCompilation();
+  DeclarationScope(VAR)->ForceEagerCompilation();
 
   // Compute the function template for the native function.
   v8::Handle<v8::FunctionTemplate> fun_template =
@@ -1487,7 +1910,7 @@
   Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
   Handle<SharedFunctionInfo> shared =
       isolate()->factory()->NewSharedFunctionInfo(name, literals, code,
-          Handle<SerializedScopeInfo>(fun->shared()->scope_info()));
+          Handle<ScopeInfo>(fun->shared()->scope_info()));
   shared->set_construct_stub(*construct_stub);
 
   // Copy the function data to the shared function info.
@@ -1497,16 +1920,20 @@
 
   // TODO(1240846): It's weird that native function declarations are
   // introduced dynamically when we meet their declarations, whereas
-  // other functions are setup when entering the surrounding scope.
+  // other functions are set up when entering the surrounding scope.
+  VariableProxy* proxy = NewUnresolved(name, VAR);
+  Declaration* declaration =
+      factory()->NewVariableDeclaration(proxy, VAR, top_scope_);
+  Declare(declaration, true, CHECK_OK);
   SharedFunctionInfoLiteral* lit =
-      new(zone()) SharedFunctionInfoLiteral(isolate(), shared);
-  VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
-  return new(zone()) ExpressionStatement(new(zone()) Assignment(
-      isolate(), Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
+      factory()->NewSharedFunctionInfoLiteral(shared);
+  return factory()->NewExpressionStatement(
+      factory()->NewAssignment(
+          Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition));
 }
 
 
-Statement* Parser::ParseFunctionDeclaration(bool* ok) {
+Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
   // FunctionDeclaration ::
   //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
   Expect(Token::FUNCTION, CHECK_OK);
@@ -1522,14 +1949,18 @@
   // Even if we're not at the top-level of the global or a function
   // scope, we treat is as such and introduce the function with it's
   // initial value upon entering the corresponding scope.
-  Variable::Mode mode = harmony_block_scoping_ ? Variable::LET : Variable::VAR;
-  Declare(name, mode, fun, true, CHECK_OK);
-  return EmptyStatement();
+  VariableMode mode = is_extended_mode() ? LET : VAR;
+  VariableProxy* proxy = NewUnresolved(name, mode);
+  Declaration* declaration =
+      factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_);
+  Declare(declaration, true, CHECK_OK);
+  if (names) names->Add(name);
+  return factory()->NewEmptyStatement();
 }
 
 
 Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
-  if (harmony_block_scoping_) return ParseScopedBlock(labels, ok);
+  if (top_scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
 
   // Block ::
   //   '{' Statement* '}'
@@ -1538,7 +1969,7 @@
   // (ECMA-262, 3rd, 12.2)
   //
   // Construct block expecting 16 statements.
-  Block* result = new(zone()) Block(isolate(), labels, 16, false);
+  Block* result = factory()->NewBlock(labels, 16, false);
   Target target(&this->target_stack_, result);
   Expect(Token::LBRACE, CHECK_OK);
   InitializationBlockFinder block_finder(top_scope_, target_stack_);
@@ -1555,27 +1986,26 @@
 
 
 Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
+  // The harmony mode uses block elements instead of statements.
+  //
+  // Block ::
+  //   '{' BlockElement* '}'
+
   // Construct block expecting 16 statements.
-  Block* body = new(zone()) Block(isolate(), labels, 16, false);
-  Scope* saved_scope = top_scope_;
-  Scope* block_scope = NewScope(top_scope_,
-                                Scope::BLOCK_SCOPE,
-                                inside_with());
-  if (top_scope_->is_strict_mode()) {
-    block_scope->EnableStrictMode();
-  }
-  top_scope_ = block_scope;
+  Block* body = factory()->NewBlock(labels, 16, false);
+  Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
 
   // Parse the statements and collect escaping labels.
-  TargetCollector collector;
-  Target target(&this->target_stack_, &collector);
   Expect(Token::LBRACE, CHECK_OK);
-  {
+  block_scope->set_start_position(scanner().location().beg_pos);
+  { BlockState block_state(this, block_scope);
+    TargetCollector collector;
+    Target target(&this->target_stack_, &collector);
     Target target_body(&this->target_stack_, body);
     InitializationBlockFinder block_finder(top_scope_, target_stack_);
 
     while (peek() != Token::RBRACE) {
-      Statement* stat = ParseSourceElement(NULL, CHECK_OK);
+      Statement* stat = ParseBlockElement(NULL, CHECK_OK);
       if (stat && !stat->IsEmpty()) {
         body->AddStatement(stat);
         block_finder.Update(stat);
@@ -1583,8 +2013,7 @@
     }
   }
   Expect(Token::RBRACE, CHECK_OK);
-  top_scope_ = saved_scope;
-
+  block_scope->set_end_position(scanner().location().end_pos);
   block_scope = block_scope->FinalizeBlockScope();
   body->set_block_scope(block_scope);
   return body;
@@ -1592,14 +2021,14 @@
 
 
 Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
+                                      ZoneStringList* names,
                                       bool* ok) {
   // VariableStatement ::
   //   VariableDeclarations ';'
 
   Handle<String> ignore;
-  Block* result = ParseVariableDeclarations(var_context,
-                                            &ignore,
-                                            CHECK_OK);
+  Block* result =
+      ParseVariableDeclarations(var_context, NULL, names, &ignore, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
   return result;
 }
@@ -1612,17 +2041,30 @@
 
 
 // If the variable declaration declares exactly one non-const
-// variable, then *var is set to that variable. In all other cases,
-// *var is untouched; in particular, it is the caller's responsibility
+// variable, then *out is set to that variable. In all other cases,
+// *out is untouched; in particular, it is the caller's responsibility
 // to initialize it properly. This mechanism is used for the parsing
 // of 'for-in' loops.
-Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
-                                         Handle<String>* out,
-                                         bool* ok) {
+Block* Parser::ParseVariableDeclarations(
+    VariableDeclarationContext var_context,
+    VariableDeclarationProperties* decl_props,
+    ZoneStringList* names,
+    Handle<String>* out,
+    bool* ok) {
   // VariableDeclarations ::
-  //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
-
-  Variable::Mode mode = Variable::VAR;
+  //   ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
+  //
+  // The ES6 Draft Rev3 specifies the following grammar for const declarations
+  //
+  // ConstDeclaration ::
+  //   const ConstBinding (',' ConstBinding)* ';'
+  // ConstBinding ::
+  //   Identifier '=' AssignmentExpression
+  //
+  // TODO(ES6):
+  // ConstBinding ::
+  //   BindingPattern '=' AssignmentExpression
+  VariableMode mode = VAR;
   // True if the binding needs initialization. 'let' and 'const' declared
   // bindings are created uninitialized by their declaration nodes and
   // need initialization. 'var' declared bindings are always initialized
@@ -1633,34 +2075,67 @@
   if (peek() == Token::VAR) {
     Consume(Token::VAR);
   } else if (peek() == Token::CONST) {
+    // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
+    //
+    // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
+    //
+    // * It is a Syntax Error if the code that matches this production is not
+    //   contained in extended code.
+    //
+    // However disallowing const in classic mode will break compatibility with
+    // existing pages. Therefore we keep allowing const with the old
+    // non-harmony semantics in classic mode.
     Consume(Token::CONST);
-    if (top_scope_->is_strict_mode()) {
-      ReportMessage("strict_const", Vector<const char*>::empty());
+    switch (top_scope_->language_mode()) {
+      case CLASSIC_MODE:
+        mode = CONST;
+        init_op = Token::INIT_CONST;
+        break;
+      case STRICT_MODE:
+        ReportMessage("strict_const", Vector<const char*>::empty());
+        *ok = false;
+        return NULL;
+      case EXTENDED_MODE:
+        if (var_context == kStatement) {
+          // In extended mode 'const' declarations are only allowed in source
+          // element positions.
+          ReportMessage("unprotected_const", Vector<const char*>::empty());
+          *ok = false;
+          return NULL;
+        }
+        mode = CONST_HARMONY;
+        init_op = Token::INIT_CONST_HARMONY;
+    }
+    is_const = true;
+    needs_init = true;
+  } else if (peek() == Token::LET) {
+    // ES6 Draft Rev4 section 12.2.1:
+    //
+    // LetDeclaration : let LetBindingList ;
+    //
+    // * It is a Syntax Error if the code that matches this production is not
+    //   contained in extended code.
+    if (!is_extended_mode()) {
+      ReportMessage("illegal_let", Vector<const char*>::empty());
       *ok = false;
       return NULL;
     }
-    mode = Variable::CONST;
-    is_const = true;
-    needs_init = true;
-    init_op = Token::INIT_CONST;
-  } else if (peek() == Token::LET) {
     Consume(Token::LET);
-    if (var_context != kSourceElement &&
-        var_context != kForStatement) {
-      ASSERT(var_context == kStatement);
+    if (var_context == kStatement) {
+      // Let declarations are only allowed in source element positions.
       ReportMessage("unprotected_let", Vector<const char*>::empty());
       *ok = false;
       return NULL;
     }
-    mode = Variable::LET;
+    mode = LET;
     needs_init = true;
     init_op = Token::INIT_LET;
   } else {
     UNREACHABLE();  // by current callers
   }
 
-  Scope* declaration_scope = mode == Variable::LET
-      ? top_scope_ : top_scope_->DeclarationScope();
+  Scope* declaration_scope = DeclarationScope(mode);
+
   // The scope of a var/const declared variable anywhere inside a function
   // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
   // transform a source-level var/const declaration into a (Function)
@@ -1674,7 +2149,7 @@
   // is inside an initializer block, it is ignored.
   //
   // Create new block with one expected declaration.
-  Block* block = new(zone()) Block(isolate(), NULL, 1, true);
+  Block* block = factory()->NewBlock(NULL, 1, true);
   int nvars = 0;  // the number of variables declared
   Handle<String> name;
   do {
@@ -1686,7 +2161,7 @@
     if (fni_ != NULL) fni_->PushVariableName(name);
 
     // Strict mode variables may not be named eval or arguments
-    if (declaration_scope->is_strict_mode() && IsEvalOrArguments(name)) {
+    if (!declaration_scope->is_classic_mode() && IsEvalOrArguments(name)) {
       ReportMessage("strict_var_name", Vector<const char*>::empty());
       *ok = false;
       return NULL;
@@ -1704,8 +2179,13 @@
     // If we have a const declaration, in an inner scope, the proxy is always
     // bound to the declared variable (independent of possibly surrounding with
     // statements).
-    Declare(name, mode, NULL, is_const /* always bound for CONST! */,
-            CHECK_OK);
+    // For let/const declarations in harmony mode, we can also immediately
+    // pre-resolve the proxy because it resides in the same scope as the
+    // declaration.
+    VariableProxy* proxy = NewUnresolved(name, mode);
+    Declaration* declaration =
+        factory()->NewVariableDeclaration(proxy, mode, top_scope_);
+    Declare(declaration, mode != VAR, CHECK_OK);
     nvars++;
     if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
       ReportMessageAt(scanner().location(), "too_many_variables",
@@ -1713,6 +2193,7 @@
       *ok = false;
       return NULL;
     }
+    if (names) names->Add(name);
 
     // Parse initialization expression if present and/or needed. A
     // declaration of the form:
@@ -1744,7 +2225,8 @@
     Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
     Expression* value = NULL;
     int position = -1;
-    if (peek() == Token::ASSIGN) {
+    // Harmony consts have non-optional initializers.
+    if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
       Expect(Token::ASSIGN, CHECK_OK);
       position = scanner().location().beg_pos;
       value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
@@ -1756,6 +2238,12 @@
       } else {
         fni_->RemoveLastFunction();
       }
+      if (decl_props != NULL) *decl_props = kHasInitializers;
+    }
+
+    // Record the end position of the initializer.
+    if (proxy->var() != NULL) {
+      proxy->var()->set_initializer_position(scanner().location().end_pos);
     }
 
     // Make sure that 'const x' and 'let x' initialize 'x' to undefined.
@@ -1782,12 +2270,11 @@
     // declaration statement has been executed. This is important in
     // browsers where the global object (window) has lots of
     // properties defined in prototype objects.
-
     if (initialization_scope->is_global_scope()) {
       // Compute the arguments for the runtime call.
       ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
       // We have at least 1 parameter.
-      arguments->Add(NewLiteral(name));
+      arguments->Add(factory()->NewLiteral(name));
       CallRuntime* initialize;
 
       if (is_const) {
@@ -1798,19 +2285,15 @@
         // and add it to the initialization statement block.
         // Note that the function does different things depending on
         // the number of arguments (1 or 2).
-        initialize =
-            new(zone()) CallRuntime(
-                isolate(),
-                isolate()->factory()->InitializeConstGlobal_symbol(),
-                Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
-                arguments);
+        initialize = factory()->NewCallRuntime(
+            isolate()->factory()->InitializeConstGlobal_symbol(),
+            Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
+            arguments);
       } else {
         // Add strict mode.
         // We may want to pass singleton to avoid Literal allocations.
-        StrictModeFlag flag = initialization_scope->is_strict_mode()
-            ? kStrictMode
-            : kNonStrictMode;
-        arguments->Add(NewNumberLiteral(flag));
+        LanguageMode language_mode = initialization_scope->language_mode();
+        arguments->Add(factory()->NewNumberLiteral(language_mode));
 
         // Be careful not to assign a value to the global variable if
         // we're in a with. The initialization value should not
@@ -1825,39 +2308,42 @@
         // and add it to the initialization statement block.
         // Note that the function does different things depending on
         // the number of arguments (2 or 3).
-        initialize =
-            new(zone()) CallRuntime(
-                isolate(),
-                isolate()->factory()->InitializeVarGlobal_symbol(),
-                Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
-                arguments);
+        initialize = factory()->NewCallRuntime(
+            isolate()->factory()->InitializeVarGlobal_symbol(),
+            Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
+            arguments);
       }
 
-      block->AddStatement(new(zone()) ExpressionStatement(initialize));
+      block->AddStatement(factory()->NewExpressionStatement(initialize));
+    } else if (needs_init) {
+      // Constant initializations always assign to the declared constant which
+      // is always at the function scope level. This is only relevant for
+      // dynamically looked-up variables and constants (the start context for
+      // constant lookups is always the function context, while it is the top
+      // context for var declared variables). Sigh...
+      // For 'let' and 'const' declared variables in harmony mode the
+      // initialization also always assigns to the declared variable.
+      ASSERT(proxy != NULL);
+      ASSERT(proxy->var() != NULL);
+      ASSERT(value != NULL);
+      Assignment* assignment =
+          factory()->NewAssignment(init_op, proxy, value, position);
+      block->AddStatement(factory()->NewExpressionStatement(assignment));
+      value = NULL;
     }
 
     // Add an assignment node to the initialization statement block if we still
-    // have a pending initialization value. We must distinguish between
-    // different kinds of declarations: 'var' initializations are simply
-    // assignments (with all the consequences if they are inside a 'with'
-    // statement - they may change a 'with' object property). Constant
-    // initializations always assign to the declared constant which is
-    // always at the function scope level. This is only relevant for
-    // dynamically looked-up variables and constants (the start context
-    // for constant lookups is always the function context, while it is
-    // the top context for var declared variables). Sigh...
-    // For 'let' declared variables the initialization is in the same scope
-    // as the declaration. Thus dynamic lookups are unnecessary even if the
-    // block scope is inside a with.
+    // have a pending initialization value.
     if (value != NULL) {
-      bool in_with = mode == Variable::VAR ? inside_with() : false;
+      ASSERT(mode == VAR);
+      // 'var' initializations are simply assignments (with all the consequences
+      // if they are inside a 'with' statement - they may change a 'with' object
+      // property).
       VariableProxy* proxy =
-          initialization_scope->NewUnresolved(name, in_with);
+          initialization_scope->NewUnresolved(factory(), name);
       Assignment* assignment =
-          new(zone()) Assignment(isolate(), init_op, proxy, value, position);
-      if (block) {
-        block->AddStatement(new(zone()) ExpressionStatement(assignment));
-      }
+          factory()->NewAssignment(init_op, proxy, value, position);
+      block->AddStatement(factory()->NewExpressionStatement(assignment));
     }
 
     if (fni_ != NULL) fni_->Leave();
@@ -1935,9 +2421,18 @@
     return ParseNativeDeclaration(ok);
   }
 
-  // Parsed expression statement.
-  ExpectSemicolon(CHECK_OK);
-  return new(zone()) ExpressionStatement(expr);
+  // Parsed expression statement, or the context-sensitive 'module' keyword.
+  // Only expect semicolon in the former case.
+  if (!FLAG_harmony_modules ||
+      peek() != Token::IDENTIFIER ||
+      scanner().HasAnyLineTerminatorBeforeNext() ||
+      expr->AsVariableProxy() == NULL ||
+      !expr->AsVariableProxy()->name()->Equals(
+          isolate()->heap()->module_symbol()) ||
+      scanner().literal_contains_escapes()) {
+    ExpectSemicolon(CHECK_OK);
+  }
+  return factory()->NewExpressionStatement(expr);
 }
 
 
@@ -1955,10 +2450,9 @@
     Next();
     else_statement = ParseStatement(labels, CHECK_OK);
   } else {
-    else_statement = EmptyStatement();
+    else_statement = factory()->NewEmptyStatement();
   }
-  return new(zone()) IfStatement(
-      isolate(), condition, then_statement, else_statement);
+  return factory()->NewIfStatement(condition, then_statement, else_statement);
 }
 
 
@@ -1988,7 +2482,7 @@
     return NULL;
   }
   ExpectSemicolon(CHECK_OK);
-  return new(zone()) ContinueStatement(target);
+  return factory()->NewContinueStatement(target);
 }
 
 
@@ -2006,7 +2500,8 @@
   // Parse labeled break statements that target themselves into
   // empty statements, e.g. 'l1: l2: l3: break l2;'
   if (!label.is_null() && ContainsLabel(labels, label)) {
-    return EmptyStatement();
+    ExpectSemicolon(CHECK_OK);
+    return factory()->NewEmptyStatement();
   }
   BreakableStatement* target = NULL;
   target = LookupBreakTarget(label, CHECK_OK);
@@ -2023,7 +2518,7 @@
     return NULL;
   }
   ExpectSemicolon(CHECK_OK);
-  return new(zone()) BreakStatement(target);
+  return factory()->NewBreakStatement(target);
 }
 
 
@@ -2043,11 +2538,11 @@
       tok == Token::RBRACE ||
       tok == Token::EOS) {
     ExpectSemicolon(CHECK_OK);
-    result = new(zone()) ReturnStatement(GetLiteralUndefined());
+    result = factory()->NewReturnStatement(GetLiteralUndefined());
   } else {
     Expression* expr = ParseExpression(true, CHECK_OK);
     ExpectSemicolon(CHECK_OK);
-    result = new(zone()) ReturnStatement(expr);
+    result = factory()->NewReturnStatement(expr);
   }
 
   // An ECMAScript program is considered syntactically incorrect if it
@@ -2060,7 +2555,7 @@
       declaration_scope->is_eval_scope()) {
     Handle<String> type = isolate()->factory()->illegal_return_symbol();
     Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
-    return new(zone()) ExpressionStatement(throw_error);
+    return factory()->NewExpressionStatement(throw_error);
   }
   return result;
 }
@@ -2072,7 +2567,7 @@
 
   Expect(Token::WITH, CHECK_OK);
 
-  if (top_scope_->is_strict_mode()) {
+  if (!top_scope_->is_classic_mode()) {
     ReportMessage("strict_mode_with", Vector<const char*>::empty());
     *ok = false;
     return NULL;
@@ -2082,11 +2577,15 @@
   Expression* expr = ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
 
-  ++with_nesting_level_;
   top_scope_->DeclarationScope()->RecordWithStatement();
-  Statement* stmt = ParseStatement(labels, CHECK_OK);
-  --with_nesting_level_;
-  return new(zone()) WithStatement(expr, stmt);
+  Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
+  Statement* stmt;
+  { BlockState block_state(this, with_scope);
+    with_scope->set_start_position(scanner().peek_location().beg_pos);
+    stmt = ParseStatement(labels, CHECK_OK);
+    with_scope->set_end_position(scanner().location().end_pos);
+  }
+  return factory()->NewWithStatement(expr, stmt);
 }
 
 
@@ -2128,7 +2627,7 @@
   // SwitchStatement ::
   //   'switch' '(' Expression ')' '{' CaseClause* '}'
 
-  SwitchStatement* statement = new(zone()) SwitchStatement(isolate(), labels);
+  SwitchStatement* statement = factory()->NewSwitchStatement(labels);
   Target target(&this->target_stack_, statement);
 
   Expect(Token::SWITCH, CHECK_OK);
@@ -2164,8 +2663,7 @@
   Expression* exception = ParseExpression(true, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
 
-  return new(zone()) ExpressionStatement(
-      new(zone()) Throw(isolate(), exception, pos));
+  return factory()->NewExpressionStatement(factory()->NewThrow(exception, pos));
 }
 
 
@@ -2210,9 +2708,11 @@
     Consume(Token::CATCH);
 
     Expect(Token::LPAREN, CHECK_OK);
+    catch_scope = NewScope(top_scope_, CATCH_SCOPE);
+    catch_scope->set_start_position(scanner().location().beg_pos);
     name = ParseIdentifier(CHECK_OK);
 
-    if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
+    if (!top_scope_->is_classic_mode() && IsEvalOrArguments(name)) {
       ReportMessage("strict_catch_variable", Vector<const char*>::empty());
       *ok = false;
       return NULL;
@@ -2222,22 +2722,16 @@
 
     if (peek() == Token::LBRACE) {
       Target target(&this->target_stack_, &catch_collector);
-      catch_scope = NewScope(top_scope_, Scope::CATCH_SCOPE, inside_with());
-      if (top_scope_->is_strict_mode()) {
-        catch_scope->EnableStrictMode();
-      }
-      Variable::Mode mode = harmony_block_scoping_
-          ? Variable::LET : Variable::VAR;
-      catch_variable = catch_scope->DeclareLocal(name, mode);
+      VariableMode mode = is_extended_mode() ? LET : VAR;
+      catch_variable =
+          catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
 
-      Scope* saved_scope = top_scope_;
-      top_scope_ = catch_scope;
+      BlockState block_state(this, catch_scope);
       catch_block = ParseBlock(NULL, CHECK_OK);
-      top_scope_ = saved_scope;
     } else {
       Expect(Token::LBRACE, CHECK_OK);
     }
-
+    catch_scope->set_end_position(scanner().location().end_pos);
     tok = peek();
   }
 
@@ -2255,13 +2749,11 @@
   if (catch_block != NULL && finally_block != NULL) {
     // If we have both, create an inner try/catch.
     ASSERT(catch_scope != NULL && catch_variable != NULL);
-    TryCatchStatement* statement =
-        new(zone()) TryCatchStatement(try_block,
-                                      catch_scope,
-                                      catch_variable,
-                                      catch_block);
+    int index = current_function_state_->NextHandlerIndex();
+    TryCatchStatement* statement = factory()->NewTryCatchStatement(
+        index, try_block, catch_scope, catch_variable, catch_block);
     statement->set_escaping_targets(try_collector.targets());
-    try_block = new(zone()) Block(isolate(), NULL, 1, false);
+    try_block = factory()->NewBlock(NULL, 1, false);
     try_block->AddStatement(statement);
     catch_block = NULL;  // Clear to indicate it's been handled.
   }
@@ -2270,14 +2762,13 @@
   if (catch_block != NULL) {
     ASSERT(finally_block == NULL);
     ASSERT(catch_scope != NULL && catch_variable != NULL);
-    result =
-        new(zone()) TryCatchStatement(try_block,
-                                      catch_scope,
-                                      catch_variable,
-                                      catch_block);
+    int index = current_function_state_->NextHandlerIndex();
+    result = factory()->NewTryCatchStatement(
+        index, try_block, catch_scope, catch_variable, catch_block);
   } else {
     ASSERT(finally_block != NULL);
-    result = new(zone()) TryFinallyStatement(try_block, finally_block);
+    int index = current_function_state_->NextHandlerIndex();
+    result = factory()->NewTryFinallyStatement(index, try_block, finally_block);
     // Combine the jump targets of the try block and the possible catch block.
     try_collector.targets()->AddAll(*catch_collector.targets());
   }
@@ -2292,7 +2783,7 @@
   // DoStatement ::
   //   'do' Statement 'while' '(' Expression ')' ';'
 
-  DoWhileStatement* loop = new(zone()) DoWhileStatement(isolate(), labels);
+  DoWhileStatement* loop = factory()->NewDoWhileStatement(labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::DO, CHECK_OK);
@@ -2323,7 +2814,7 @@
   // WhileStatement ::
   //   'while' '(' Expression ')' Statement
 
-  WhileStatement* loop = new(zone()) WhileStatement(isolate(), labels);
+  WhileStatement* loop = factory()->NewWhileStatement(labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::WHILE, CHECK_OK);
@@ -2343,17 +2834,23 @@
 
   Statement* init = NULL;
 
+  // Create an in-between scope for let-bound iteration variables.
+  Scope* saved_scope = top_scope_;
+  Scope* for_scope = NewScope(top_scope_, BLOCK_SCOPE);
+  top_scope_ = for_scope;
+
   Expect(Token::FOR, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
+  for_scope->set_start_position(scanner().location().beg_pos);
   if (peek() != Token::SEMICOLON) {
     if (peek() == Token::VAR || peek() == Token::CONST) {
       Handle<String> name;
       Block* variable_statement =
-          ParseVariableDeclarations(kForStatement, &name, CHECK_OK);
+          ParseVariableDeclarations(kForStatement, NULL, NULL, &name, CHECK_OK);
 
       if (peek() == Token::IN && !name.is_null()) {
-        VariableProxy* each = top_scope_->NewUnresolved(name, inside_with());
-        ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
+        VariableProxy* each = top_scope_->NewUnresolved(factory(), name);
+        ForInStatement* loop = factory()->NewForInStatement(labels);
         Target target(&this->target_stack_, loop);
 
         Expect(Token::IN, CHECK_OK);
@@ -2362,15 +2859,71 @@
 
         Statement* body = ParseStatement(NULL, CHECK_OK);
         loop->Initialize(each, enumerable, body);
-        Block* result = new(zone()) Block(isolate(), NULL, 2, false);
+        Block* result = factory()->NewBlock(NULL, 2, false);
         result->AddStatement(variable_statement);
         result->AddStatement(loop);
+        top_scope_ = saved_scope;
+        for_scope->set_end_position(scanner().location().end_pos);
+        for_scope = for_scope->FinalizeBlockScope();
+        ASSERT(for_scope == NULL);
         // Parsed for-in loop w/ variable/const declaration.
         return result;
       } else {
         init = variable_statement;
       }
+    } else if (peek() == Token::LET) {
+      Handle<String> name;
+      VariableDeclarationProperties decl_props = kHasNoInitializers;
+      Block* variable_statement =
+         ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
+                                   CHECK_OK);
+      bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
+      if (peek() == Token::IN && accept_IN) {
+        // Rewrite a for-in statement of the form
+        //
+        //   for (let x in e) b
+        //
+        // into
+        //
+        //   <let x' be a temporary variable>
+        //   for (x' in e) {
+        //     let x;
+        //     x = x';
+        //     b;
+        //   }
 
+        // TODO(keuchel): Move the temporary variable to the block scope, after
+        // implementing stack allocated block scoped variables.
+        Variable* temp = top_scope_->DeclarationScope()->NewTemporary(name);
+        VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
+        VariableProxy* each = top_scope_->NewUnresolved(factory(), name);
+        ForInStatement* loop = factory()->NewForInStatement(labels);
+        Target target(&this->target_stack_, loop);
+
+        Expect(Token::IN, CHECK_OK);
+        Expression* enumerable = ParseExpression(true, CHECK_OK);
+        Expect(Token::RPAREN, CHECK_OK);
+
+        Statement* body = ParseStatement(NULL, CHECK_OK);
+        Block* body_block = factory()->NewBlock(NULL, 3, false);
+        Assignment* assignment = factory()->NewAssignment(
+            Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition);
+        Statement* assignment_statement =
+            factory()->NewExpressionStatement(assignment);
+        body_block->AddStatement(variable_statement);
+        body_block->AddStatement(assignment_statement);
+        body_block->AddStatement(body);
+        loop->Initialize(temp_proxy, enumerable, body_block);
+        top_scope_ = saved_scope;
+        for_scope->set_end_position(scanner().location().end_pos);
+        for_scope = for_scope->FinalizeBlockScope();
+        body_block->set_block_scope(for_scope);
+        // Parsed for-in loop w/ let declaration.
+        return loop;
+
+      } else {
+        init = variable_statement;
+      }
     } else {
       Expression* expression = ParseExpression(false, CHECK_OK);
       if (peek() == Token::IN) {
@@ -2383,7 +2936,7 @@
               isolate()->factory()->invalid_lhs_in_for_in_symbol();
           expression = NewThrowReferenceError(type);
         }
-        ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
+        ForInStatement* loop = factory()->NewForInStatement(labels);
         Target target(&this->target_stack_, loop);
 
         Expect(Token::IN, CHECK_OK);
@@ -2392,17 +2945,21 @@
 
         Statement* body = ParseStatement(NULL, CHECK_OK);
         if (loop) loop->Initialize(expression, enumerable, body);
+        top_scope_ = saved_scope;
+        for_scope->set_end_position(scanner().location().end_pos);
+        for_scope = for_scope->FinalizeBlockScope();
+        ASSERT(for_scope == NULL);
         // Parsed for-in loop.
         return loop;
 
       } else {
-        init = new(zone()) ExpressionStatement(expression);
+        init = factory()->NewExpressionStatement(expression);
       }
     }
   }
 
   // Standard 'for' loop
-  ForStatement* loop = new(zone()) ForStatement(isolate(), labels);
+  ForStatement* loop = factory()->NewForStatement(labels);
   Target target(&this->target_stack_, loop);
 
   // Parsed initializer at this point.
@@ -2417,13 +2974,36 @@
   Statement* next = NULL;
   if (peek() != Token::RPAREN) {
     Expression* exp = ParseExpression(true, CHECK_OK);
-    next = new(zone()) ExpressionStatement(exp);
+    next = factory()->NewExpressionStatement(exp);
   }
   Expect(Token::RPAREN, CHECK_OK);
 
   Statement* body = ParseStatement(NULL, CHECK_OK);
-  if (loop) loop->Initialize(init, cond, next, body);
-  return loop;
+  top_scope_ = saved_scope;
+  for_scope->set_end_position(scanner().location().end_pos);
+  for_scope = for_scope->FinalizeBlockScope();
+  if (for_scope != NULL) {
+    // Rewrite a for statement of the form
+    //
+    //   for (let x = i; c; n) b
+    //
+    // into
+    //
+    //   {
+    //     let x = i;
+    //     for (; c; n) b
+    //   }
+    ASSERT(init != NULL);
+    Block* result = factory()->NewBlock(NULL, 2, false);
+    result->AddStatement(init);
+    result->AddStatement(loop);
+    result->set_block_scope(for_scope);
+    if (loop) loop->Initialize(NULL, cond, next, body);
+    return result;
+  } else {
+    if (loop) loop->Initialize(init, cond, next, body);
+    return loop;
+  }
 }
 
 
@@ -2438,8 +3018,8 @@
     Expect(Token::COMMA, CHECK_OK);
     int position = scanner().location().beg_pos;
     Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-    result = new(zone()) BinaryOperation(
-        isolate(), Token::COMMA, result, right, position);
+    result =
+        factory()->NewBinaryOperation(Token::COMMA, result, right, position);
   }
   return result;
 }
@@ -2470,10 +3050,11 @@
     expression = NewThrowReferenceError(type);
   }
 
-  if (top_scope_->is_strict_mode()) {
+  if (!top_scope_->is_classic_mode()) {
     // Assignment to eval or arguments is disallowed in strict mode.
     CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
   }
+  MarkAsLValue(expression);
 
   Token::Value op = Next();  // Get assignment operator.
   int pos = scanner().location().beg_pos;
@@ -2489,13 +3070,13 @@
       property != NULL &&
       property->obj()->AsVariableProxy() != NULL &&
       property->obj()->AsVariableProxy()->is_this()) {
-    lexical_scope_->AddProperty();
+    current_function_state_->AddProperty();
   }
 
   // If we assign a function literal to a property we pretenure the
   // literal so it can be added as a constant function property.
   if (property != NULL && right->AsFunctionLiteral() != NULL) {
-    right->AsFunctionLiteral()->set_pretenure(true);
+    right->AsFunctionLiteral()->set_pretenure();
   }
 
   if (fni_ != NULL) {
@@ -2513,7 +3094,7 @@
     fni_->Leave();
   }
 
-  return new(zone()) Assignment(isolate(), op, expression, right, pos);
+  return factory()->NewAssignment(op, expression, right, pos);
 }
 
 
@@ -2535,8 +3116,8 @@
   Expect(Token::COLON, CHECK_OK);
   int right_position = scanner().peek_location().beg_pos;
   Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-  return new(zone()) Conditional(
-      isolate(), expression, left, right, left_position, right_position);
+  return factory()->NewConditional(
+      expression, left, right, left_position, right_position);
 }
 
 
@@ -2567,41 +3148,47 @@
 
         switch (op) {
           case Token::ADD:
-            x = NewNumberLiteral(x_val + y_val);
+            x = factory()->NewNumberLiteral(x_val + y_val);
             continue;
           case Token::SUB:
-            x = NewNumberLiteral(x_val - y_val);
+            x = factory()->NewNumberLiteral(x_val - y_val);
             continue;
           case Token::MUL:
-            x = NewNumberLiteral(x_val * y_val);
+            x = factory()->NewNumberLiteral(x_val * y_val);
             continue;
           case Token::DIV:
-            x = NewNumberLiteral(x_val / y_val);
+            x = factory()->NewNumberLiteral(x_val / y_val);
             continue;
-          case Token::BIT_OR:
-            x = NewNumberLiteral(DoubleToInt32(x_val) | DoubleToInt32(y_val));
+          case Token::BIT_OR: {
+            int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
+            x = factory()->NewNumberLiteral(value);
             continue;
-          case Token::BIT_AND:
-            x = NewNumberLiteral(DoubleToInt32(x_val) & DoubleToInt32(y_val));
+          }
+          case Token::BIT_AND: {
+            int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
+            x = factory()->NewNumberLiteral(value);
             continue;
-          case Token::BIT_XOR:
-            x = NewNumberLiteral(DoubleToInt32(x_val) ^ DoubleToInt32(y_val));
+          }
+          case Token::BIT_XOR: {
+            int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
+            x = factory()->NewNumberLiteral(value);
             continue;
+          }
           case Token::SHL: {
             int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
-            x = NewNumberLiteral(value);
+            x = factory()->NewNumberLiteral(value);
             continue;
           }
           case Token::SHR: {
             uint32_t shift = DoubleToInt32(y_val) & 0x1f;
             uint32_t value = DoubleToUint32(x_val) >> shift;
-            x = NewNumberLiteral(value);
+            x = factory()->NewNumberLiteral(value);
             continue;
           }
           case Token::SAR: {
             uint32_t shift = DoubleToInt32(y_val) & 0x1f;
             int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
-            x = NewNumberLiteral(value);
+            x = factory()->NewNumberLiteral(value);
             continue;
           }
           default:
@@ -2620,15 +3207,15 @@
           case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
           default: break;
         }
-        x = NewCompareNode(cmp, x, y, position);
+        x = factory()->NewCompareOperation(cmp, x, y, position);
         if (cmp != op) {
           // The comparison was negated - add a NOT.
-          x = new(zone()) UnaryOperation(isolate(), Token::NOT, x, position);
+          x = factory()->NewUnaryOperation(Token::NOT, x, position);
         }
 
       } else {
         // We have a "normal" binary operation.
-        x = new(zone()) BinaryOperation(isolate(), op, x, y, position);
+        x = factory()->NewBinaryOperation(op, x, y, position);
       }
     }
   }
@@ -2636,27 +3223,6 @@
 }
 
 
-Expression* Parser::NewCompareNode(Token::Value op,
-                                   Expression* x,
-                                   Expression* y,
-                                   int position) {
-  ASSERT(op != Token::NE && op != Token::NE_STRICT);
-  if (op == Token::EQ || op == Token::EQ_STRICT) {
-    bool is_strict = (op == Token::EQ_STRICT);
-    Literal* x_literal = x->AsLiteral();
-    if (x_literal != NULL && x_literal->IsNull()) {
-      return new(zone()) CompareToNull(isolate(), is_strict, y);
-    }
-
-    Literal* y_literal = y->AsLiteral();
-    if (y_literal != NULL && y_literal->IsNull()) {
-      return new(zone()) CompareToNull(isolate(), is_strict, x);
-    }
-  }
-  return new(zone()) CompareOperation(isolate(), op, x, y, position);
-}
-
-
 Expression* Parser::ParseUnaryExpression(bool* ok) {
   // UnaryExpression ::
   //   PostfixExpression
@@ -2682,7 +3248,7 @@
         // Convert the literal to a boolean condition and negate it.
         bool condition = literal->ToBoolean()->IsTrue();
         Handle<Object> result(isolate()->heap()->ToBoolean(!condition));
-        return NewLiteral(result);
+        return factory()->NewLiteral(result);
       } else if (literal->IsNumber()) {
         // Compute some expressions involving only number literals.
         double value = literal->Number();
@@ -2690,9 +3256,9 @@
           case Token::ADD:
             return expression;
           case Token::SUB:
-            return NewNumberLiteral(-value);
+            return factory()->NewNumberLiteral(-value);
           case Token::BIT_NOT:
-            return NewNumberLiteral(~DoubleToInt32(value));
+            return factory()->NewNumberLiteral(~DoubleToInt32(value));
           default:
             break;
         }
@@ -2700,7 +3266,7 @@
     }
 
     // "delete identifier" is a syntax error in strict mode.
-    if (op == Token::DELETE && top_scope_->is_strict_mode()) {
+    if (op == Token::DELETE && !top_scope_->is_classic_mode()) {
       VariableProxy* operand = expression->AsVariableProxy();
       if (operand != NULL && !operand->is_this()) {
         ReportMessage("strict_delete", Vector<const char*>::empty());
@@ -2709,7 +3275,7 @@
       }
     }
 
-    return new(zone()) UnaryOperation(isolate(), op, expression, position);
+    return factory()->NewUnaryOperation(op, expression, position);
 
   } else if (Token::IsCountOp(op)) {
     op = Next();
@@ -2724,17 +3290,17 @@
       expression = NewThrowReferenceError(type);
     }
 
-    if (top_scope_->is_strict_mode()) {
+    if (!top_scope_->is_classic_mode()) {
       // Prefix expression operand in strict mode may not be eval or arguments.
       CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
     }
+    MarkAsLValue(expression);
 
     int position = scanner().location().beg_pos;
-    return new(zone()) CountOperation(isolate(),
-                                      op,
-                                      true /* prefix */,
-                                      expression,
-                                      position);
+    return factory()->NewCountOperation(op,
+                                        true /* prefix */,
+                                        expression,
+                                        position);
 
   } else {
     return ParsePostfixExpression(ok);
@@ -2759,19 +3325,19 @@
       expression = NewThrowReferenceError(type);
     }
 
-    if (top_scope_->is_strict_mode()) {
+    if (!top_scope_->is_classic_mode()) {
       // Postfix expression operand in strict mode may not be eval or arguments.
       CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
     }
+    MarkAsLValue(expression);
 
     Token::Value next = Next();
     int position = scanner().location().beg_pos;
     expression =
-        new(zone()) CountOperation(isolate(),
-                                   next,
-                                   false /* postfix */,
-                                   expression,
-                                   position);
+        factory()->NewCountOperation(next,
+                                     false /* postfix */,
+                                     expression,
+                                     position);
   }
   return expression;
 }
@@ -2794,37 +3360,40 @@
         Consume(Token::LBRACK);
         int pos = scanner().location().beg_pos;
         Expression* index = ParseExpression(true, CHECK_OK);
-        result = new(zone()) Property(isolate(), result, index, pos);
+        result = factory()->NewProperty(result, index, pos);
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
 
       case Token::LPAREN: {
-        int pos = scanner().location().beg_pos;
+        int pos;
+        if (scanner().current_token() == Token::IDENTIFIER) {
+          // For call of an identifier we want to report position of
+          // the identifier as position of the call in the stack trace.
+          pos = scanner().location().beg_pos;
+        } else {
+          // For other kinds of calls we record position of the parenthesis as
+          // position of the call.  Note that this is extremely important for
+          // expressions of the form function(){...}() for which call position
+          // should not point to the closing brace otherwise it will intersect
+          // with positions recorded for function literal and confuse debugger.
+          pos = scanner().peek_location().beg_pos;
+        }
         ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
 
         // Keep track of eval() calls since they disable all local variable
         // optimizations.
         // The calls that need special treatment are the
-        // direct (i.e. not aliased) eval calls. These calls are all of the
-        // form eval(...) with no explicit receiver object where eval is not
-        // declared in the current scope chain.
+        // direct eval calls. These calls are all of the form eval(...), with
+        // no explicit receiver.
         // These calls are marked as potentially direct eval calls. Whether
         // they are actually direct calls to eval is determined at run time.
-        // TODO(994): In ES5, it doesn't matter if the "eval" var is declared
-        // in the local scope chain. It only matters that it's called "eval",
-        // is called without a receiver and it refers to the original eval
-        // function.
         VariableProxy* callee = result->AsVariableProxy();
         if (callee != NULL &&
             callee->IsVariable(isolate()->factory()->eval_symbol())) {
-          Handle<String> name = callee->name();
-          Variable* var = top_scope_->Lookup(name);
-          if (var == NULL) {
-            top_scope_->DeclarationScope()->RecordEvalCall();
-          }
+          top_scope_->DeclarationScope()->RecordEvalCall();
         }
-        result = NewCall(result, args, pos);
+        result = factory()->NewCall(result, args, pos);
         break;
       }
 
@@ -2832,10 +3401,8 @@
         Consume(Token::PERIOD);
         int pos = scanner().location().beg_pos;
         Handle<String> name = ParseIdentifierName(CHECK_OK);
-        result = new(zone()) Property(isolate(),
-                                      result,
-                                      NewLiteral(name),
-                                      pos);
+        result =
+            factory()->NewProperty(result, factory()->NewLiteral(name), pos);
         if (fni_ != NULL) fni_->PushLiteralName(name);
         break;
       }
@@ -2871,10 +3438,8 @@
 
   if (!stack->is_empty()) {
     int last = stack->pop();
-    result = new(zone()) CallNew(isolate(),
-                                 result,
-                                 new(zone()) ZoneList<Expression*>(0),
-                                 last);
+    result = factory()->NewCallNew(
+        result, new(zone()) ZoneList<Expression*>(0), last);
   }
   return result;
 }
@@ -2926,7 +3491,7 @@
         Consume(Token::LBRACK);
         int pos = scanner().location().beg_pos;
         Expression* index = ParseExpression(true, CHECK_OK);
-        result = new(zone()) Property(isolate(), result, index, pos);
+        result = factory()->NewProperty(result, index, pos);
         if (fni_ != NULL) {
           if (index->IsPropertyName()) {
             fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
@@ -2942,10 +3507,8 @@
         Consume(Token::PERIOD);
         int pos = scanner().location().beg_pos;
         Handle<String> name = ParseIdentifierName(CHECK_OK);
-        result = new(zone()) Property(isolate(),
-                                      result,
-                                      NewLiteral(name),
-                                      pos);
+        result =
+            factory()->NewProperty(result, factory()->NewLiteral(name), pos);
         if (fni_ != NULL) fni_->PushLiteralName(name);
         break;
       }
@@ -2954,7 +3517,7 @@
         // Consume one of the new prefixes (already parsed).
         ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
         int last = stack->pop();
-        result = new(zone()) CallNew(isolate(), result, args, last);
+        result = factory()->NewCallNew(result, args, last);
         break;
       }
       default:
@@ -2973,7 +3536,7 @@
 
   Expect(Token::DEBUGGER, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
-  return new(zone()) DebuggerStatement();
+  return factory()->NewDebuggerStatement();
 }
 
 
@@ -2999,9 +3562,9 @@
       return ReportMessage("unexpected_reserved",
                            Vector<const char*>::empty());
     case Token::FUTURE_STRICT_RESERVED_WORD:
-      return ReportMessage(top_scope_->is_strict_mode() ?
-                               "unexpected_strict_reserved" :
-                               "unexpected_token_identifier",
+      return ReportMessage(top_scope_->is_classic_mode() ?
+                               "unexpected_token_identifier" :
+                               "unexpected_strict_reserved",
                            Vector<const char*>::empty());
     default:
       const char* name = Token::String(token);
@@ -3038,35 +3601,37 @@
   switch (peek()) {
     case Token::THIS: {
       Consume(Token::THIS);
-      result = new(zone()) VariableProxy(isolate(), top_scope_->receiver());
+      result = factory()->NewVariableProxy(top_scope_->receiver());
       break;
     }
 
     case Token::NULL_LITERAL:
       Consume(Token::NULL_LITERAL);
-      result = new(zone()) Literal(
-          isolate(), isolate()->factory()->null_value());
+      result = factory()->NewLiteral(isolate()->factory()->null_value());
       break;
 
     case Token::TRUE_LITERAL:
       Consume(Token::TRUE_LITERAL);
-      result = new(zone()) Literal(
-          isolate(), isolate()->factory()->true_value());
+      result = factory()->NewLiteral(isolate()->factory()->true_value());
       break;
 
     case Token::FALSE_LITERAL:
       Consume(Token::FALSE_LITERAL);
-      result = new(zone()) Literal(
-          isolate(), isolate()->factory()->false_value());
+      result = factory()->NewLiteral(isolate()->factory()->false_value());
       break;
 
     case Token::IDENTIFIER:
     case Token::FUTURE_STRICT_RESERVED_WORD: {
       Handle<String> name = ParseIdentifier(CHECK_OK);
       if (fni_ != NULL) fni_->PushVariableName(name);
-      result = top_scope_->NewUnresolved(name,
-                                         inside_with(),
-                                         scanner().location().beg_pos);
+      // The name may refer to a module instance object, so its type is unknown.
+#ifdef DEBUG
+      if (FLAG_print_interface_details)
+        PrintF("# Variable %s ", name->ToAsciiArray());
+#endif
+      Interface* interface = Interface::NewUnknown();
+      result = top_scope_->NewUnresolved(
+          factory(), name, scanner().location().beg_pos, interface);
       break;
     }
 
@@ -3076,14 +3641,14 @@
       double value = StringToDouble(isolate()->unicode_cache(),
                                     scanner().literal_ascii_string(),
                                     ALLOW_HEX | ALLOW_OCTALS);
-      result = NewNumberLiteral(value);
+      result = factory()->NewNumberLiteral(value);
       break;
     }
 
     case Token::STRING: {
       Consume(Token::STRING);
       Handle<String> symbol = GetSymbol(CHECK_OK);
-      result = NewLiteral(symbol);
+      result = factory()->NewLiteral(symbol);
       if (fni_ != NULL) fni_->PushLiteralName(symbol);
       break;
     }
@@ -3181,11 +3746,14 @@
   Expect(Token::RBRACK, CHECK_OK);
 
   // Update the scope information before the pre-parsing bailout.
-  int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+  int literal_index = current_function_state_->NextMaterializedLiteralIndex();
 
-  // Allocate a fixed array with all the literals.
-  Handle<FixedArray> literals =
+  // Allocate a fixed array to hold all the object literals.
+  Handle<FixedArray> object_literals =
       isolate()->factory()->NewFixedArray(values->length(), TENURED);
+  Handle<FixedDoubleArray> double_literals;
+  ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
+  bool has_only_undefined_values = true;
 
   // Fill in the literals.
   bool is_simple = true;
@@ -3197,21 +3765,85 @@
     }
     Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
     if (boilerplate_value->IsUndefined()) {
-      literals->set_the_hole(i);
+      object_literals->set_the_hole(i);
+      if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+        double_literals->set_the_hole(i);
+      }
       is_simple = false;
     } else {
-      literals->set(i, *boilerplate_value);
+      // Examine each literal element, and adjust the ElementsKind if the
+      // literal element is not of a type that can be stored in the current
+      // ElementsKind.  Start with FAST_SMI_ONLY_ELEMENTS, and transition to
+      // FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary.  Always remember
+      // the tagged value, no matter what the ElementsKind is in case we
+      // ultimately end up in FAST_ELEMENTS.
+      has_only_undefined_values = false;
+      object_literals->set(i, *boilerplate_value);
+      if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+        // Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
+        // FAST_ELEMENTS is required.
+        if (!boilerplate_value->IsSmi()) {
+          if (boilerplate_value->IsNumber() && FLAG_smi_only_arrays) {
+            // Allocate a double array on the FAST_DOUBLE_ELEMENTS transition to
+            // avoid over-allocating in TENURED space.
+            double_literals = isolate()->factory()->NewFixedDoubleArray(
+                values->length(), TENURED);
+            // Copy the contents of the FAST_SMI_ONLY_ELEMENT array to the
+            // FAST_DOUBLE_ELEMENTS array so that they are in sync.
+            for (int j = 0; j < i; ++j) {
+              Object* smi_value = object_literals->get(j);
+              if (smi_value->IsTheHole()) {
+                double_literals->set_the_hole(j);
+              } else {
+                double_literals->set(j, Smi::cast(smi_value)->value());
+              }
+            }
+            double_literals->set(i, boilerplate_value->Number());
+            elements_kind = FAST_DOUBLE_ELEMENTS;
+          } else {
+            elements_kind = FAST_ELEMENTS;
+          }
+        }
+      } else if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+        // Continue to store double values in to FAST_DOUBLE_ELEMENTS arrays
+        // until the first value is seen that can't be stored as a double.
+        if (boilerplate_value->IsNumber()) {
+          double_literals->set(i, boilerplate_value->Number());
+        } else {
+          elements_kind = FAST_ELEMENTS;
+        }
+      }
     }
   }
 
+  // Very small array literals that don't have a concrete hint about their type
+  // from a constant value should default to the slow case to avoid lots of
+  // elements transitions on really small objects.
+  if (has_only_undefined_values && values->length() <= 2) {
+    elements_kind = FAST_ELEMENTS;
+  }
+
   // Simple and shallow arrays can be lazily copied, we transform the
   // elements array to a copy-on-write array.
-  if (is_simple && depth == 1 && values->length() > 0) {
-    literals->set_map(isolate()->heap()->fixed_cow_array_map());
+  if (is_simple && depth == 1 && values->length() > 0 &&
+      elements_kind != FAST_DOUBLE_ELEMENTS) {
+    object_literals->set_map(isolate()->heap()->fixed_cow_array_map());
   }
 
-  return new(zone()) ArrayLiteral(
-      isolate(), literals, values, literal_index, is_simple, depth);
+  Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS
+      ? Handle<FixedArrayBase>(double_literals)
+      : Handle<FixedArrayBase>(object_literals);
+
+  // Remember both the literal's constant values as well as the ElementsKind
+  // in a 2-element FixedArray.
+  Handle<FixedArray> literals =
+      isolate()->factory()->NewFixedArray(2, TENURED);
+
+  literals->set(0, Smi::FromInt(elements_kind));
+  literals->set(1, *element_values);
+
+  return factory()->NewArrayLiteral(
+      literals, values, literal_index, is_simple, depth);
 }
 
 
@@ -3283,19 +3915,13 @@
   return isolate()->factory()->undefined_value();
 }
 
-// Defined in ast.cc
-bool IsEqualString(void* first, void* second);
-bool IsEqualNumber(void* first, void* second);
-
-
 // Validation per 11.1.5 Object Initialiser
 class ObjectLiteralPropertyChecker {
  public:
-  ObjectLiteralPropertyChecker(Parser* parser, bool strict) :
-    props(&IsEqualString),
-    elems(&IsEqualNumber),
+  ObjectLiteralPropertyChecker(Parser* parser, LanguageMode language_mode) :
+    props_(Literal::Match),
     parser_(parser),
-    strict_(strict) {
+    language_mode_(language_mode) {
   }
 
   void CheckProperty(
@@ -3322,10 +3948,9 @@
     }
   }
 
-  HashMap props;
-  HashMap elems;
+  HashMap props_;
   Parser* parser_;
-  bool strict_;
+  LanguageMode language_mode_;
 };
 
 
@@ -3333,49 +3958,14 @@
     ObjectLiteral::Property* property,
     Scanner::Location loc,
     bool* ok) {
-
   ASSERT(property != NULL);
-
-  Literal *lit = property->key();
-  Handle<Object> handle = lit->handle();
-
-  uint32_t hash;
-  HashMap* map;
-  void* key;
-
-  if (handle->IsSymbol()) {
-    Handle<String> name(String::cast(*handle));
-    if (name->AsArrayIndex(&hash)) {
-      Handle<Object> key_handle = FACTORY->NewNumberFromUint(hash);
-      key = key_handle.location();
-      map = &elems;
-    } else {
-      key = handle.location();
-      hash = name->Hash();
-      map = &props;
-    }
-  } else if (handle->ToArrayIndex(&hash)) {
-    key = handle.location();
-    map = &elems;
-  } else {
-    ASSERT(handle->IsNumber());
-    double num = handle->Number();
-    char arr[100];
-    Vector<char> buffer(arr, ARRAY_SIZE(arr));
-    const char* str = DoubleToCString(num, buffer);
-    Handle<String> name = FACTORY->NewStringFromAscii(CStrVector(str));
-    key = name.location();
-    hash = name->Hash();
-    map = &props;
-  }
-
-  // Lookup property previously defined, if any.
-  HashMap::Entry* entry = map->Lookup(key, hash, true);
+  Literal* literal = property->key();
+  HashMap::Entry* entry = props_.Lookup(literal, literal->Hash(), true);
   intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
   intptr_t curr = GetPropertyKind(property);
 
-  // Duplicate data properties are illegal in strict mode.
-  if (strict_ && (curr & prev & kData) != 0) {
+  // Duplicate data properties are illegal in strict or extended mode.
+  if (language_mode_ != CLASSIC_MODE && (curr & prev & kData) != 0) {
     parser_->ReportMessageAt(loc, "strict_duplicate_property",
                              Vector<const char*>::empty());
     *ok = false;
@@ -3486,11 +4076,9 @@
                              RelocInfo::kNoPosition,
                              FunctionLiteral::ANONYMOUS_EXPRESSION,
                              CHECK_OK);
-    // Allow any number of parameters for compatiabilty with JSC.
+    // Allow any number of parameters for compatibilty with JSC.
     // Specification only allows zero parameters for get and one for set.
-    ObjectLiteral::Property* property =
-        new(zone()) ObjectLiteral::Property(is_getter, value);
-    return property;
+    return factory()->NewObjectLiteralProperty(is_getter, value);
   } else {
     ReportUnexpectedToken(next);
     *ok = false;
@@ -3511,7 +4099,7 @@
   int number_of_boilerplate_properties = 0;
   bool has_function = false;
 
-  ObjectLiteralPropertyChecker checker(this, top_scope_->is_strict_mode());
+  ObjectLiteralPropertyChecker checker(this, top_scope_->language_mode());
 
   Expect(Token::LBRACE, CHECK_OK);
 
@@ -3555,7 +4143,7 @@
         }
         // Failed to parse as get/set property, so it's just a property
         // called "get" or "set".
-        key = NewLiteral(id);
+        key = factory()->NewLiteral(id);
         break;
       }
       case Token::STRING: {
@@ -3564,10 +4152,10 @@
         if (fni_ != NULL) fni_->PushLiteralName(string);
         uint32_t index;
         if (!string.is_null() && string->AsArrayIndex(&index)) {
-          key = NewNumberLiteral(index);
+          key = factory()->NewNumberLiteral(index);
           break;
         }
-        key = NewLiteral(string);
+        key = factory()->NewLiteral(string);
         break;
       }
       case Token::NUMBER: {
@@ -3576,14 +4164,14 @@
         double value = StringToDouble(isolate()->unicode_cache(),
                                       scanner().literal_ascii_string(),
                                       ALLOW_HEX | ALLOW_OCTALS);
-        key = NewNumberLiteral(value);
+        key = factory()->NewNumberLiteral(value);
         break;
       }
       default:
         if (Token::IsKeyword(next)) {
           Consume(next);
           Handle<String> string = GetSymbol(CHECK_OK);
-          key = NewLiteral(string);
+          key = factory()->NewLiteral(string);
         } else {
           // Unexpected token.
           Token::Value next = Next();
@@ -3597,13 +4185,15 @@
     Expression* value = ParseAssignmentExpression(true, CHECK_OK);
 
     ObjectLiteral::Property* property =
-        new(zone()) ObjectLiteral::Property(key, value);
+        new(zone()) ObjectLiteral::Property(key, value, isolate());
 
-    // Mark object literals that contain function literals and pretenure the
-    // literal so it can be added as a constant function property.
-    if (value->AsFunctionLiteral() != NULL) {
+    // Mark top-level object literals that contain function literals and
+    // pretenure the literal so it can be added as a constant function
+    // property.
+    if (top_scope_->DeclarationScope()->is_global_scope() &&
+        value->AsFunctionLiteral() != NULL) {
       has_function = true;
-      value->AsFunctionLiteral()->set_pretenure(true);
+      value->AsFunctionLiteral()->set_pretenure();
     }
 
     // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
@@ -3623,7 +4213,7 @@
   Expect(Token::RBRACE, CHECK_OK);
 
   // Computation of literal_index must happen before pre parse bailout.
-  int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+  int literal_index = current_function_state_->NextMaterializedLiteralIndex();
 
   Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
       number_of_boilerplate_properties * 2, TENURED);
@@ -3636,14 +4226,13 @@
                                        &is_simple,
                                        &fast_elements,
                                        &depth);
-  return new(zone()) ObjectLiteral(isolate(),
-                                   constant_properties,
-                                   properties,
-                                   literal_index,
-                                   is_simple,
-                                   fast_elements,
-                                   depth,
-                                   has_function);
+  return factory()->NewObjectLiteral(constant_properties,
+                                     properties,
+                                     literal_index,
+                                     is_simple,
+                                     fast_elements,
+                                     depth,
+                                     has_function);
 }
 
 
@@ -3655,15 +4244,14 @@
     return NULL;
   }
 
-  int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+  int literal_index = current_function_state_->NextMaterializedLiteralIndex();
 
   Handle<String> js_pattern = NextLiteralString(TENURED);
   scanner().ScanRegExpFlags();
   Handle<String> js_flags = NextLiteralString(TENURED);
   Next();
 
-  return new(zone()) RegExpLiteral(
-      isolate(), js_pattern, js_flags, literal_index);
+  return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index);
 }
 
 
@@ -3691,6 +4279,98 @@
 }
 
 
+class SingletonLogger : public ParserRecorder {
+ public:
+  SingletonLogger() : has_error_(false), start_(-1), end_(-1) { }
+  virtual ~SingletonLogger() { }
+
+  void Reset() { has_error_ = false; }
+
+  virtual void LogFunction(int start,
+                           int end,
+                           int literals,
+                           int properties,
+                           LanguageMode mode) {
+    ASSERT(!has_error_);
+    start_ = start;
+    end_ = end;
+    literals_ = literals;
+    properties_ = properties;
+    mode_ = mode;
+  };
+
+  // Logs a symbol creation of a literal or identifier.
+  virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
+  virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
+
+  // Logs an error message and marks the log as containing an error.
+  // Further logging will be ignored, and ExtractData will return a vector
+  // representing the error only.
+  virtual void LogMessage(int start,
+                          int end,
+                          const char* message,
+                          const char* argument_opt) {
+    has_error_ = true;
+    start_ = start;
+    end_ = end;
+    message_ = message;
+    argument_opt_ = argument_opt;
+  }
+
+  virtual int function_position() { return 0; }
+
+  virtual int symbol_position() { return 0; }
+
+  virtual int symbol_ids() { return -1; }
+
+  virtual Vector<unsigned> ExtractData() {
+    UNREACHABLE();
+    return Vector<unsigned>();
+  }
+
+  virtual void PauseRecording() { }
+
+  virtual void ResumeRecording() { }
+
+  bool has_error() { return has_error_; }
+
+  int start() { return start_; }
+  int end() { return end_; }
+  int literals() {
+    ASSERT(!has_error_);
+    return literals_;
+  }
+  int properties() {
+    ASSERT(!has_error_);
+    return properties_;
+  }
+  LanguageMode language_mode() {
+    ASSERT(!has_error_);
+    return mode_;
+  }
+  const char* message() {
+    ASSERT(has_error_);
+    return message_;
+  }
+  const char* argument_opt() {
+    ASSERT(has_error_);
+    return argument_opt_;
+  }
+
+ private:
+  bool has_error_;
+  int start_;
+  int end_;
+  // For function entries.
+  int literals_;
+  int properties_;
+  LanguageMode mode_;
+  // For error messages.
+  const char* message_;
+  const char* argument_opt_;
+};
+
+
 FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
                                               bool name_is_strict_reserved,
                                               int function_token_position,
@@ -3713,26 +4393,26 @@
   // Function declarations are function scoped in normal mode, so they are
   // hoisted. In harmony block scoping mode they are block scoped, so they
   // are not hoisted.
-  Scope* scope = (type == FunctionLiteral::DECLARATION &&
-                  !harmony_block_scoping_)
-      ? NewScope(top_scope_->DeclarationScope(), Scope::FUNCTION_SCOPE, false)
-      : NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
-  ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8);
-  int materialized_literal_count;
-  int expected_property_count;
-  int start_pos;
-  int end_pos;
+  Scope* scope = (type == FunctionLiteral::DECLARATION && !is_extended_mode())
+      ? NewScope(top_scope_->DeclarationScope(), FUNCTION_SCOPE)
+      : NewScope(top_scope_, FUNCTION_SCOPE);
+  ZoneList<Statement*>* body = NULL;
+  int materialized_literal_count = -1;
+  int expected_property_count = -1;
+  int handler_count = 0;
   bool only_simple_this_property_assignments;
   Handle<FixedArray> this_property_assignments;
-  bool has_duplicate_parameters = false;
+  FunctionLiteral::ParameterFlag duplicate_parameters =
+      FunctionLiteral::kNoDuplicateParameters;
+  AstProperties ast_properties;
   // Parse function body.
-  { LexicalScope lexical_scope(this, scope, isolate());
+  { FunctionState function_state(this, scope, isolate());
     top_scope_->SetScopeName(function_name);
 
     //  FormalParameterList ::
     //    '(' (Identifier)*[','] ')'
     Expect(Token::LPAREN, CHECK_OK);
-    start_pos = scanner().location().beg_pos;
+    scope->set_start_position(scanner().location().beg_pos);
     Scanner::Location name_loc = Scanner::Location::invalid();
     Scanner::Location dupe_loc = Scanner::Location::invalid();
     Scanner::Location reserved_loc = Scanner::Location::invalid();
@@ -3749,17 +4429,14 @@
         name_loc = scanner().location();
       }
       if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
-        has_duplicate_parameters = true;
+        duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
         dupe_loc = scanner().location();
       }
       if (!reserved_loc.IsValid() && is_strict_reserved) {
         reserved_loc = scanner().location();
       }
 
-      top_scope_->DeclareParameter(param_name,
-                                   harmony_block_scoping_
-                                   ? Variable::LET
-                                   : Variable::VAR);
+      top_scope_->DeclareParameter(param_name, VAR);
       num_parameters++;
       if (num_parameters > kMaxNumFunctionParameters) {
         ReportMessageAt(scanner().location(), "too_many_parameters",
@@ -3780,71 +4457,130 @@
     // NOTE: We create a proxy and resolve it here so that in the
     // future we can change the AST to only refer to VariableProxies
     // instead of Variables and Proxis as is the case now.
+    Variable* fvar = NULL;
+    Token::Value fvar_init_op = Token::INIT_CONST;
     if (type == FunctionLiteral::NAMED_EXPRESSION) {
-      Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
-      VariableProxy* fproxy =
-          top_scope_->NewUnresolved(function_name, inside_with());
-      fproxy->BindTo(fvar);
-      body->Add(new(zone()) ExpressionStatement(
-          new(zone()) Assignment(isolate(),
-                                 Token::INIT_CONST,
-                                 fproxy,
-                                 new(zone()) ThisFunction(isolate()),
-                                 RelocInfo::kNoPosition)));
+      VariableMode fvar_mode;
+      if (is_extended_mode()) {
+        fvar_mode = CONST_HARMONY;
+        fvar_init_op = Token::INIT_CONST_HARMONY;
+      } else {
+        fvar_mode = CONST;
+      }
+      fvar =
+          top_scope_->DeclareFunctionVar(function_name, fvar_mode, factory());
     }
 
-    // Determine if the function will be lazily compiled. The mode can only
-    // be PARSE_LAZILY if the --lazy flag is true.  We will not lazily
-    // compile if we do not have preparser data for the function.
+    // Determine whether the function will be lazily compiled.
+    // The heuristics are:
+    // - It must not have been prohibited by the caller to Parse (some callers
+    //   need a full AST).
+    // - The outer scope must be trivial (only global variables in scope).
+    // - The function mustn't be a function expression with an open parenthesis
+    //   before; we consider that a hint that the function will be called
+    //   immediately, and it would be a waste of time to make it lazily
+    //   compiled.
+    // These are all things we can know at this point, without looking at the
+    // function itself.
     bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
                                top_scope_->outer_scope()->is_global_scope() &&
                                top_scope_->HasTrivialOuterContext() &&
-                               !parenthesized_function_ &&
-                               pre_data() != NULL);
+                               !parenthesized_function_);
     parenthesized_function_ = false;  // The bit was set for this function only.
 
     if (is_lazily_compiled) {
       int function_block_pos = scanner().location().beg_pos;
-      FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
-      if (!entry.is_valid()) {
-        // There is no preparser data for the function, we will not lazily
-        // compile after all.
-        is_lazily_compiled = false;
-      } else {
-        end_pos = entry.end_pos();
-        if (end_pos <= function_block_pos) {
-          // End position greater than end of stream is safe, and hard to check.
-          ReportInvalidPreparseData(function_name, CHECK_OK);
+      FunctionEntry entry;
+      if (pre_data_ != NULL) {
+        // If we have pre_data_, we use it to skip parsing the function body.
+        // the preparser data contains the information we need to construct the
+        // lazy function.
+        entry = pre_data()->GetFunctionEntry(function_block_pos);
+        if (entry.is_valid()) {
+          if (entry.end_pos() <= function_block_pos) {
+            // End position greater than end of stream is safe, and hard
+            // to check.
+            ReportInvalidPreparseData(function_name, CHECK_OK);
+          }
+          scanner().SeekForward(entry.end_pos() - 1);
+
+          scope->set_end_position(entry.end_pos());
+          Expect(Token::RBRACE, CHECK_OK);
+          isolate()->counters()->total_preparse_skipped()->Increment(
+              scope->end_position() - function_block_pos);
+          materialized_literal_count = entry.literal_count();
+          expected_property_count = entry.property_count();
+          top_scope_->SetLanguageMode(entry.language_mode());
+          only_simple_this_property_assignments = false;
+          this_property_assignments = isolate()->factory()->empty_fixed_array();
+        } else {
+          is_lazily_compiled = false;
         }
+      } else {
+        // With no preparser data, we partially parse the function, without
+        // building an AST. This gathers the data needed to build a lazy
+        // function.
+        SingletonLogger logger;
+        preparser::PreParser::PreParseResult result =
+            LazyParseFunctionLiteral(&logger);
+        if (result == preparser::PreParser::kPreParseStackOverflow) {
+          // Propagate stack overflow.
+          stack_overflow_ = true;
+          *ok = false;
+          return NULL;
+        }
+        if (logger.has_error()) {
+          const char* arg = logger.argument_opt();
+          Vector<const char*> args;
+          if (arg != NULL) {
+            args = Vector<const char*>(&arg, 1);
+          }
+          ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
+                          logger.message(), args);
+          *ok = false;
+          return NULL;
+        }
+        scope->set_end_position(logger.end());
+        Expect(Token::RBRACE, CHECK_OK);
         isolate()->counters()->total_preparse_skipped()->Increment(
-            end_pos - function_block_pos);
-        // Seek to position just before terminal '}'.
-        scanner().SeekForward(end_pos - 1);
-        materialized_literal_count = entry.literal_count();
-        expected_property_count = entry.property_count();
-        if (entry.strict_mode()) top_scope_->EnableStrictMode();
+            scope->end_position() - function_block_pos);
+        materialized_literal_count = logger.literals();
+        expected_property_count = logger.properties();
+        top_scope_->SetLanguageMode(logger.language_mode());
         only_simple_this_property_assignments = false;
         this_property_assignments = isolate()->factory()->empty_fixed_array();
-        Expect(Token::RBRACE, CHECK_OK);
       }
     }
 
     if (!is_lazily_compiled) {
-      ParseSourceElements(body, Token::RBRACE, CHECK_OK);
+      body = new(zone()) ZoneList<Statement*>(8);
+      if (fvar != NULL) {
+        VariableProxy* fproxy =
+            top_scope_->NewUnresolved(factory(), function_name);
+        fproxy->BindTo(fvar);
+        body->Add(factory()->NewExpressionStatement(
+            factory()->NewAssignment(fvar_init_op,
+                                     fproxy,
+                                     factory()->NewThisFunction(),
+                                     RelocInfo::kNoPosition)));
+      }
+      ParseSourceElements(body, Token::RBRACE, false, CHECK_OK);
 
-      materialized_literal_count = lexical_scope.materialized_literal_count();
-      expected_property_count = lexical_scope.expected_property_count();
+      materialized_literal_count = function_state.materialized_literal_count();
+      expected_property_count = function_state.expected_property_count();
+      handler_count = function_state.handler_count();
       only_simple_this_property_assignments =
-          lexical_scope.only_simple_this_property_assignments();
-      this_property_assignments = lexical_scope.this_property_assignments();
+          function_state.only_simple_this_property_assignments();
+      this_property_assignments = function_state.this_property_assignments();
 
       Expect(Token::RBRACE, CHECK_OK);
-      end_pos = scanner().location().end_pos;
+      scope->set_end_position(scanner().location().end_pos);
     }
 
     // Validate strict mode.
-    if (top_scope_->is_strict_mode()) {
+    if (!top_scope_->is_classic_mode()) {
       if (IsEvalOrArguments(function_name)) {
+        int start_pos = scope->start_position();
         int position = function_token_position != RelocInfo::kNoPosition
             ? function_token_position
             : (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3867,6 +4603,7 @@
         return NULL;
       }
       if (name_is_strict_reserved) {
+        int start_pos = scope->start_position();
         int position = function_token_position != RelocInfo::kNoPosition
             ? function_token_position
             : (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3882,35 +4619,60 @@
         *ok = false;
         return NULL;
       }
-      CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
+      CheckOctalLiteral(scope->start_position(),
+                        scope->end_position(),
+                        CHECK_OK);
     }
+    ast_properties = *factory()->visitor()->ast_properties();
   }
 
-  if (harmony_block_scoping_) {
+  if (is_extended_mode()) {
     CheckConflictingVarDeclarations(scope, CHECK_OK);
   }
 
   FunctionLiteral* function_literal =
-      new(zone()) FunctionLiteral(isolate(),
-                                  function_name,
-                                  scope,
-                                  body,
-                                  materialized_literal_count,
-                                  expected_property_count,
-                                  only_simple_this_property_assignments,
-                                  this_property_assignments,
-                                  num_parameters,
-                                  start_pos,
-                                  end_pos,
-                                  type,
-                                  has_duplicate_parameters);
+      factory()->NewFunctionLiteral(function_name,
+                                    scope,
+                                    body,
+                                    materialized_literal_count,
+                                    expected_property_count,
+                                    handler_count,
+                                    only_simple_this_property_assignments,
+                                    this_property_assignments,
+                                    num_parameters,
+                                    duplicate_parameters,
+                                    type,
+                                    FunctionLiteral::kIsFunction);
   function_literal->set_function_token_position(function_token_position);
+  function_literal->set_ast_properties(&ast_properties);
 
   if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
   return function_literal;
 }
 
 
+preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
+    SingletonLogger* logger) {
+  HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
+  ASSERT_EQ(Token::LBRACE, scanner().current_token());
+
+  if (reusable_preparser_ == NULL) {
+    intptr_t stack_limit = isolate()->stack_guard()->real_climit();
+    bool do_allow_lazy = true;
+    reusable_preparser_ = new preparser::PreParser(&scanner_,
+                                                   NULL,
+                                                   stack_limit,
+                                                   do_allow_lazy,
+                                                   allow_natives_syntax_,
+                                                   allow_modules_);
+  }
+  preparser::PreParser::PreParseResult result =
+      reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
+                                                logger);
+  return result;
+}
+
+
 Expression* Parser::ParseV8Intrinsic(bool* ok) {
   // CallRuntime ::
   //   '%' Identifier Arguments
@@ -3953,7 +4715,7 @@
   }
 
   // We have a valid intrinsics call or a call to a builtin.
-  return new(zone()) CallRuntime(isolate(), name, function, args);
+  return factory()->NewCallRuntime(name, function, args);
 }
 
 
@@ -4008,25 +4770,32 @@
 }
 
 
+void Parser::ExpectContextualKeyword(const char* keyword, bool* ok) {
+  Expect(Token::IDENTIFIER, ok);
+  if (!*ok) return;
+  Handle<String> symbol = GetSymbol(ok);
+  if (!*ok) return;
+  if (!symbol->IsEqualTo(CStrVector(keyword))) {
+    *ok = false;
+    ReportUnexpectedToken(scanner().current_token());
+  }
+}
+
+
 Literal* Parser::GetLiteralUndefined() {
-  return NewLiteral(isolate()->factory()->undefined_value());
+  return factory()->NewLiteral(isolate()->factory()->undefined_value());
 }
 
 
 Literal* Parser::GetLiteralTheHole() {
-  return NewLiteral(isolate()->factory()->the_hole_value());
-}
-
-
-Literal* Parser::GetLiteralNumber(double value) {
-  return NewNumberLiteral(value);
+  return factory()->NewLiteral(isolate()->factory()->the_hole_value());
 }
 
 
 // Parses an identifier that is valid for the current scope, in particular it
 // fails on strict mode future reserved keywords in a strict scope.
 Handle<String> Parser::ParseIdentifier(bool* ok) {
-  if (top_scope_->is_strict_mode()) {
+  if (!top_scope_->is_classic_mode()) {
     Expect(Token::IDENTIFIER, ok);
   } else if (!Check(Token::IDENTIFIER)) {
     Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
@@ -4064,12 +4833,21 @@
 }
 
 
+void Parser::MarkAsLValue(Expression* expression) {
+  VariableProxy* proxy = expression != NULL
+      ? expression->AsVariableProxy()
+      : NULL;
+
+  if (proxy != NULL) proxy->MarkAsLValue();
+}
+
+
 // Checks LHS expression for assignment and prefix/postfix increment/decrement
 // in strict mode.
 void Parser::CheckStrictModeLValue(Expression* expression,
                                    const char* error,
                                    bool* ok) {
-  ASSERT(top_scope_->is_strict_mode());
+  ASSERT(!top_scope_->is_classic_mode());
   VariableProxy* lhs = expression != NULL
       ? expression->AsVariableProxy()
       : NULL;
@@ -4188,11 +4966,6 @@
 }
 
 
-Literal* Parser::NewNumberLiteral(double number) {
-  return NewLiteral(isolate()->factory()->NewNumber(number, TENURED));
-}
-
-
 Expression* Parser::NewThrowReferenceError(Handle<String> type) {
   return NewThrowError(isolate()->factory()->MakeReferenceError_symbol(),
                        type, HandleVector<Object>(NULL, 0));
@@ -4232,19 +5005,15 @@
       elements->set(i, *element);
     }
   }
-  Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(elements,
-                                                                       TENURED);
+  Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(
+      elements, FAST_ELEMENTS, TENURED);
 
   ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2);
-  args->Add(NewLiteral(type));
-  args->Add(NewLiteral(array));
-  CallRuntime* call_constructor = new(zone()) CallRuntime(isolate(),
-                                                          constructor,
-                                                          NULL,
-                                                          args);
-  return new(zone()) Throw(isolate(),
-                           call_constructor,
-                           scanner().location().beg_pos);
+  args->Add(factory()->NewLiteral(type));
+  args->Add(factory()->NewLiteral(array));
+  CallRuntime* call_constructor =
+      factory()->NewCallRuntime(constructor, NULL, args);
+  return factory()->NewThrow(call_constructor, scanner().location().beg_pos);
 }
 
 // ----------------------------------------------------------------------------
@@ -5121,19 +5890,21 @@
 
 
 // Create a Scanner for the preparser to use as input, and preparse the source.
-static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
-                                  bool allow_lazy,
-                                  ParserRecorder* recorder,
-                                  bool harmony_block_scoping) {
+static ScriptDataImpl* DoPreParse(Utf16CharacterStream* source,
+                                  int flags,
+                                  ParserRecorder* recorder) {
   Isolate* isolate = Isolate::Current();
-  JavaScriptScanner scanner(isolate->unicode_cache());
-  scanner.SetHarmonyBlockScoping(harmony_block_scoping);
+  HistogramTimerScope timer(isolate->counters()->pre_parse());
+  Scanner scanner(isolate->unicode_cache());
+  scanner.SetHarmonyScoping(FLAG_harmony_scoping);
   scanner.Initialize(source);
   intptr_t stack_limit = isolate->stack_guard()->real_climit();
-  if (!preparser::PreParser::PreParseProgram(&scanner,
-                                             recorder,
-                                             allow_lazy,
-                                             stack_limit)) {
+  preparser::PreParser::PreParseResult result =
+      preparser::PreParser::PreParseProgram(&scanner,
+                                            recorder,
+                                            flags,
+                                            stack_limit);
+  if (result == preparser::PreParser::kPreParseStackOverflow) {
     isolate->StackOverflow();
     return NULL;
   }
@@ -5147,27 +5918,38 @@
 
 // Preparse, but only collect data that is immediately useful,
 // even if the preparser data is only used once.
-ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
+ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
                                            v8::Extension* extension,
-                                           bool harmony_block_scoping) {
+                                           int flags) {
   bool allow_lazy = FLAG_lazy && (extension == NULL);
   if (!allow_lazy) {
     // Partial preparsing is only about lazily compiled functions.
     // If we don't allow lazy compilation, the log data will be empty.
     return NULL;
   }
+  flags |= kAllowLazy;
   PartialParserRecorder recorder;
-  return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
+  int source_length = source->length();
+  if (source->IsExternalTwoByteString()) {
+    ExternalTwoByteStringUtf16CharacterStream stream(
+        Handle<ExternalTwoByteString>::cast(source), 0, source_length);
+    return DoPreParse(&stream, flags, &recorder);
+  } else {
+    GenericStringUtf16CharacterStream stream(source, 0, source_length);
+    return DoPreParse(&stream, flags, &recorder);
+  }
 }
 
 
-ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
+ScriptDataImpl* ParserApi::PreParse(Utf16CharacterStream* source,
                                     v8::Extension* extension,
-                                    bool harmony_block_scoping) {
+                                    int flags) {
   Handle<Script> no_script;
-  bool allow_lazy = FLAG_lazy && (extension == NULL);
+  if (FLAG_lazy && (extension == NULL)) {
+    flags |= kAllowLazy;
+  }
   CompleteParserRecorder recorder;
-  return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
+  return DoPreParse(source, flags, &recorder);
 }
 
 
@@ -5193,29 +5975,33 @@
 }
 
 
-bool ParserApi::Parse(CompilationInfo* info) {
+bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
   ASSERT(info->function() == NULL);
   FunctionLiteral* result = NULL;
   Handle<Script> script = info->script();
-  bool harmony_block_scoping = !info->is_native() &&
-                               FLAG_harmony_block_scoping;
+  ASSERT((parsing_flags & kLanguageModeMask) == CLASSIC_MODE);
+  if (!info->is_native() && FLAG_harmony_scoping) {
+    // Harmony scoping is requested.
+    parsing_flags |= EXTENDED_MODE;
+  }
+  if (!info->is_native() && FLAG_harmony_modules) {
+    parsing_flags |= kAllowModules;
+  }
+  if (FLAG_allow_natives_syntax || info->is_native()) {
+    // We require %identifier(..) syntax.
+    parsing_flags |= kAllowNativesSyntax;
+  }
   if (info->is_lazy()) {
-    bool allow_natives_syntax =
-        FLAG_allow_natives_syntax ||
-        info->is_native();
-    Parser parser(script, allow_natives_syntax, NULL, NULL);
-    parser.SetHarmonyBlockScoping(harmony_block_scoping);
-    result = parser.ParseLazy(info);
+    ASSERT(!info->is_eval());
+    Parser parser(script, parsing_flags, NULL, NULL);
+    if (info->shared_info()->is_function()) {
+      result = parser.ParseLazy(info);
+    } else {
+      result = parser.ParseProgram(info);
+    }
   } else {
-    // Whether we allow %identifier(..) syntax.
-    bool allow_natives_syntax =
-        info->is_native() || FLAG_allow_natives_syntax;
     ScriptDataImpl* pre_data = info->pre_parse_data();
-    Parser parser(script,
-                  allow_natives_syntax,
-                  info->extension(),
-                  pre_data);
-    parser.SetHarmonyBlockScoping(harmony_block_scoping);
+    Parser parser(script, parsing_flags, info->extension(), pre_data);
     if (pre_data != NULL && pre_data->has_error()) {
       Scanner::Location loc = pre_data->MessageLocation();
       const char* message = pre_data->BuildMessage();
@@ -5228,10 +6014,7 @@
       DeleteArray(args.start());
       ASSERT(info->isolate()->has_pending_exception());
     } else {
-      Handle<String> source = Handle<String>(String::cast(script->source()));
-      result = parser.ParseProgram(source,
-                                   info->is_global(),
-                                   info->StrictMode());
+      result = parser.ParseProgram(info);
     }
   }
   info->SetFunction(result);
diff --git a/src/parser.h b/src/parser.h
index 3312f2f..b4d8825 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,6 +33,7 @@
 #include "preparse-data-format.h"
 #include "preparse-data.h"
 #include "scopes.h"
+#include "preparser.h"
 
 namespace v8 {
 namespace internal {
@@ -42,7 +43,6 @@
 class ParserLog;
 class PositionStack;
 class Target;
-class LexicalScope;
 
 template <typename T> class ZoneListWrapper;
 
@@ -67,26 +67,36 @@
 
 class FunctionEntry BASE_EMBEDDED {
  public:
-  explicit FunctionEntry(Vector<unsigned> backing) : backing_(backing) { }
-  FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
+  enum {
+    kStartPositionIndex,
+    kEndPositionIndex,
+    kLiteralCountIndex,
+    kPropertyCountIndex,
+    kLanguageModeIndex,
+    kSize
+  };
 
-  int start_pos() { return backing_[kStartPosOffset]; }
-  int end_pos() { return backing_[kEndPosOffset]; }
-  int literal_count() { return backing_[kLiteralCountOffset]; }
-  int property_count() { return backing_[kPropertyCountOffset]; }
-  bool strict_mode() { return backing_[kStrictModeOffset] != 0; }
+  explicit FunctionEntry(Vector<unsigned> backing)
+    : backing_(backing) { }
 
-  bool is_valid() { return backing_.length() > 0; }
+  FunctionEntry() : backing_() { }
 
-  static const int kSize = 5;
+  int start_pos() { return backing_[kStartPositionIndex]; }
+  int end_pos() { return backing_[kEndPositionIndex]; }
+  int literal_count() { return backing_[kLiteralCountIndex]; }
+  int property_count() { return backing_[kPropertyCountIndex]; }
+  LanguageMode language_mode() {
+    ASSERT(backing_[kLanguageModeIndex] == CLASSIC_MODE ||
+           backing_[kLanguageModeIndex] == STRICT_MODE ||
+           backing_[kLanguageModeIndex] == EXTENDED_MODE);
+    return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
+  }
+
+  bool is_valid() { return !backing_.is_empty(); }
 
  private:
   Vector<unsigned> backing_;
-  static const int kStartPosOffset = 0;
-  static const int kEndPosOffset = 1;
-  static const int kLiteralCountOffset = 2;
-  static const int kPropertyCountOffset = 3;
-  static const int kStrictModeOffset = 4;
+  bool owns_data_;
 };
 
 
@@ -98,7 +108,7 @@
 
   // Create an empty ScriptDataImpl that is guaranteed to not satisfy
   // a SanityCheck.
-  ScriptDataImpl() : store_(Vector<unsigned>()), owns_store_(false) { }
+  ScriptDataImpl() : owns_store_(false) { }
 
   virtual ~ScriptDataImpl();
   virtual int Length();
@@ -159,24 +169,24 @@
   // Parses the source code represented by the compilation info and sets its
   // function literal.  Returns false (and deallocates any allocated AST
   // nodes) if parsing failed.
-  static bool Parse(CompilationInfo* info);
+  static bool Parse(CompilationInfo* info, int flags);
 
   // Generic preparser generating full preparse data.
-  static ScriptDataImpl* PreParse(UC16CharacterStream* source,
+  static ScriptDataImpl* PreParse(Utf16CharacterStream* source,
                                   v8::Extension* extension,
-                                  bool harmony_block_scoping);
+                                  int flags);
 
   // Preparser that only does preprocessing that makes sense if only used
   // immediately after.
-  static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source,
+  static ScriptDataImpl* PartialPreParse(Handle<String> source,
                                          v8::Extension* extension,
-                                         bool harmony_block_scoping);
+                                         int flags);
 };
 
 // ----------------------------------------------------------------------------
 // REGEXP PARSING
 
-// A BuffferedZoneList is an automatically growing list, just like (and backed
+// A BufferedZoneList is an automatically growing list, just like (and backed
 // by) a ZoneList, that is optimized for the case of adding and removing
 // a single element. The last element added is stored outside the backing list,
 // and if no more than one element is ever added, the ZoneList isn't even
@@ -415,19 +425,22 @@
 // ----------------------------------------------------------------------------
 // JAVASCRIPT PARSING
 
+// Forward declaration.
+class SingletonLogger;
+
 class Parser {
  public:
   Parser(Handle<Script> script,
-         bool allow_natives_syntax,
+         int parsing_flags,  // Combination of ParsingFlags
          v8::Extension* extension,
          ScriptDataImpl* pre_data);
-  virtual ~Parser() { }
+  virtual ~Parser() {
+    delete reusable_preparser_;
+    reusable_preparser_ = NULL;
+  }
 
   // Returns NULL if parsing failed.
-  FunctionLiteral* ParseProgram(Handle<String> source,
-                                bool in_global_context,
-                                StrictModeFlag strict_mode);
-
+  FunctionLiteral* ParseProgram(CompilationInfo* info);
   FunctionLiteral* ParseLazy(CompilationInfo* info);
 
   void ReportMessageAt(Scanner::Location loc,
@@ -436,7 +449,6 @@
   void ReportMessageAt(Scanner::Location loc,
                        const char* message,
                        Vector<Handle<String> > args);
-  void SetHarmonyBlockScoping(bool block_scoping);
 
  private:
   // Limit on number of function parameters is chosen arbitrarily.
@@ -445,38 +457,120 @@
   // should be checked.
   static const int kMaxNumFunctionParameters = 32766;
   static const int kMaxNumFunctionLocals = 32767;
-  FunctionLiteral* ParseLazy(CompilationInfo* info,
-                             UC16CharacterStream* source,
-                             ZoneScope* zone_scope);
+
   enum Mode {
     PARSE_LAZILY,
     PARSE_EAGERLY
   };
 
   enum VariableDeclarationContext {
-    kSourceElement,
+    kModuleElement,
+    kBlockElement,
     kStatement,
     kForStatement
   };
 
+  // If a list of variable declarations includes any initializers.
+  enum VariableDeclarationProperties {
+    kHasInitializers,
+    kHasNoInitializers
+  };
+
+  class BlockState;
+
+  class FunctionState BASE_EMBEDDED {
+   public:
+    FunctionState(Parser* parser,
+                  Scope* scope,
+                  Isolate* isolate);
+    ~FunctionState();
+
+    int NextMaterializedLiteralIndex() {
+      return next_materialized_literal_index_++;
+    }
+    int materialized_literal_count() {
+      return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
+    }
+
+    int NextHandlerIndex() { return next_handler_index_++; }
+    int handler_count() { return next_handler_index_; }
+
+    void SetThisPropertyAssignmentInfo(
+        bool only_simple_this_property_assignments,
+        Handle<FixedArray> this_property_assignments) {
+      only_simple_this_property_assignments_ =
+          only_simple_this_property_assignments;
+      this_property_assignments_ = this_property_assignments;
+    }
+    bool only_simple_this_property_assignments() {
+      return only_simple_this_property_assignments_;
+    }
+    Handle<FixedArray> this_property_assignments() {
+      return this_property_assignments_;
+    }
+
+    void AddProperty() { expected_property_count_++; }
+    int expected_property_count() { return expected_property_count_; }
+
+    AstNodeFactory<AstConstructionVisitor>* factory() { return &factory_; }
+
+   private:
+    // Used to assign an index to each literal that needs materialization in
+    // the function.  Includes regexp literals, and boilerplate for object and
+    // array literals.
+    int next_materialized_literal_index_;
+
+    // Used to assign a per-function index to try and catch handlers.
+    int next_handler_index_;
+
+    // Properties count estimation.
+    int expected_property_count_;
+
+    // Keeps track of assignments to properties of this. Used for
+    // optimizing constructors.
+    bool only_simple_this_property_assignments_;
+    Handle<FixedArray> this_property_assignments_;
+
+    Parser* parser_;
+    FunctionState* outer_function_state_;
+    Scope* outer_scope_;
+    int saved_ast_node_id_;
+    AstNodeFactory<AstConstructionVisitor> factory_;
+  };
+
+
+
+
+  FunctionLiteral* ParseLazy(CompilationInfo* info,
+                             Utf16CharacterStream* source,
+                             ZoneScope* zone_scope);
+
   Isolate* isolate() { return isolate_; }
   Zone* zone() { return isolate_->zone(); }
 
   // Called by ParseProgram after setting up the scanner.
-  FunctionLiteral* DoParseProgram(Handle<String> source,
-                                  bool in_global_context,
-                                  StrictModeFlag strict_mode,
+  FunctionLiteral* DoParseProgram(CompilationInfo* info,
+                                  Handle<String> source,
                                   ZoneScope* zone_scope);
 
   // Report syntax error
   void ReportUnexpectedToken(Token::Value token);
   void ReportInvalidPreparseData(Handle<String> name, bool* ok);
   void ReportMessage(const char* message, Vector<const char*> args);
+  void ReportMessage(const char* message, Vector<Handle<String> > args);
 
-  bool inside_with() const { return with_nesting_level_ > 0; }
-  JavaScriptScanner& scanner()  { return scanner_; }
+  bool inside_with() const { return top_scope_->inside_with(); }
+  Scanner& scanner()  { return scanner_; }
   Mode mode() const { return mode_; }
   ScriptDataImpl* pre_data() const { return pre_data_; }
+  bool is_extended_mode() {
+    ASSERT(top_scope_ != NULL);
+    return top_scope_->is_extended_mode();
+  }
+  Scope* DeclarationScope(VariableMode mode) {
+    return (mode == LET || mode == CONST_HARMONY)
+        ? top_scope_ : top_scope_->DeclarationScope();
+  }
 
   // Check if the given string is 'eval' or 'arguments'.
   bool IsEvalOrArguments(Handle<String> string);
@@ -486,16 +580,28 @@
   // By making the 'exception handling' explicit, we are forced to check
   // for failure at the call sites.
   void* ParseSourceElements(ZoneList<Statement*>* processor,
-                            int end_token, bool* ok);
-  Statement* ParseSourceElement(ZoneStringList* labels, bool* ok);
+                            int end_token, bool is_eval, bool* ok);
+  Statement* ParseModuleElement(ZoneStringList* labels, bool* ok);
+  Block* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
+  Module* ParseModule(bool* ok);
+  Module* ParseModuleLiteral(bool* ok);
+  Module* ParseModulePath(bool* ok);
+  Module* ParseModuleVariable(bool* ok);
+  Module* ParseModuleUrl(bool* ok);
+  Module* ParseModuleSpecifier(bool* ok);
+  Block* ParseImportDeclaration(bool* ok);
+  Statement* ParseExportDeclaration(bool* ok);
+  Statement* ParseBlockElement(ZoneStringList* labels, bool* ok);
   Statement* ParseStatement(ZoneStringList* labels, bool* ok);
-  Statement* ParseFunctionDeclaration(bool* ok);
+  Statement* ParseFunctionDeclaration(ZoneStringList* names, bool* ok);
   Statement* ParseNativeDeclaration(bool* ok);
   Block* ParseBlock(ZoneStringList* labels, bool* ok);
-  Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
   Block* ParseVariableStatement(VariableDeclarationContext var_context,
+                                ZoneStringList* names,
                                 bool* ok);
   Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
+                                   VariableDeclarationProperties* decl_props,
+                                   ZoneStringList* names,
                                    Handle<String>* out,
                                    bool* ok);
   Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
@@ -515,6 +621,9 @@
   TryStatement* ParseTryStatement(bool* ok);
   DebuggerStatement* ParseDebuggerStatement(bool* ok);
 
+  // Support for hamony block scoped bindings.
+  Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
+
   Expression* ParseExpression(bool accept_IN, bool* ok);
   Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
   Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
@@ -533,11 +642,6 @@
   ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
   Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
 
-  Expression* NewCompareNode(Token::Value op,
-                             Expression* x,
-                             Expression* y,
-                             int position);
-
   // Populate the constant properties fixed array for a materialized object
   // literal.
   void BuildObjectLiteralConstantProperties(
@@ -600,6 +704,7 @@
   void Expect(Token::Value token, bool* ok);
   bool Check(Token::Value token);
   void ExpectSemicolon(bool* ok);
+  void ExpectContextualKeyword(const char* keyword, bool* ok);
 
   Handle<String> LiteralString(PretenureFlag tenured) {
     if (scanner().is_literal_ascii()) {
@@ -607,7 +712,7 @@
           scanner().literal_ascii_string(), tenured);
     } else {
       return isolate_->factory()->NewStringFromTwoByte(
-            scanner().literal_uc16_string(), tenured);
+            scanner().literal_utf16_string(), tenured);
     }
   }
 
@@ -617,7 +722,7 @@
           scanner().next_literal_ascii_string(), tenured);
     } else {
       return isolate_->factory()->NewStringFromTwoByte(
-          scanner().next_literal_uc16_string(), tenured);
+          scanner().next_literal_utf16_string(), tenured);
     }
   }
 
@@ -626,7 +731,6 @@
   // Get odd-ball literals.
   Literal* GetLiteralUndefined();
   Literal* GetLiteralTheHole();
-  Literal* GetLiteralNumber(double value);
 
   Handle<String> ParseIdentifier(bool* ok);
   Handle<String> ParseIdentifierOrStrictReservedWord(
@@ -636,6 +740,11 @@
                                                bool* is_set,
                                                bool* ok);
 
+  // Determine if the expression is a variable proxy and mark it as being used
+  // in an assignment or with a increment/decrement operator. This is currently
+  // used on for the statically checking assignments to harmony const bindings.
+  void MarkAsLValue(Expression* expression);
+
   // Strict mode validation of LValue expressions
   void CheckStrictModeLValue(Expression* expression,
                              const char* error,
@@ -656,10 +765,10 @@
   void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
 
   // Parser support
-  VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
-                         FunctionLiteral* fun,
-                         bool resolve,
-                         bool* ok);
+  VariableProxy* NewUnresolved(Handle<String> name,
+                               VariableMode mode,
+                               Interface* interface = Interface::NewValue());
+  void Declare(Declaration* declaration, bool resolve, bool* ok);
 
   bool TargetStackContainsLabel(Handle<String> label);
   BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
@@ -669,30 +778,12 @@
 
   // Factory methods.
 
-  Statement* EmptyStatement() {
-    static v8::internal::EmptyStatement empty;
-    return &empty;
-  }
-
-  Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
+  Scope* NewScope(Scope* parent, ScopeType type);
 
   Handle<String> LookupSymbol(int symbol_id);
 
   Handle<String> LookupCachedSymbol(int symbol_id);
 
-  Expression* NewCall(Expression* expression,
-                      ZoneList<Expression*>* arguments,
-                      int pos) {
-    return new(zone()) Call(isolate(), expression, arguments, pos);
-  }
-
-  inline Literal* NewLiteral(Handle<Object> handle) {
-    return new(zone()) Literal(isolate(), handle);
-  }
-
-  // Create a number literal.
-  Literal* NewNumberLiteral(double value);
-
   // Generate AST node that throw a ReferenceError with the given type.
   Expression* NewThrowReferenceError(Handle<String> type);
 
@@ -712,33 +803,39 @@
                             Handle<String> type,
                             Vector< Handle<Object> > arguments);
 
+  preparser::PreParser::PreParseResult LazyParseFunctionLiteral(
+       SingletonLogger* logger);
+
+  AstNodeFactory<AstConstructionVisitor>* factory() {
+    return current_function_state_->factory();
+  }
+
   Isolate* isolate_;
   ZoneList<Handle<String> > symbol_cache_;
 
   Handle<Script> script_;
-  JavaScriptScanner scanner_;
-
+  Scanner scanner_;
+  preparser::PreParser* reusable_preparser_;
   Scope* top_scope_;
-  int with_nesting_level_;
-
-  LexicalScope* lexical_scope_;
-  Mode mode_;
-
+  FunctionState* current_function_state_;
   Target* target_stack_;  // for break, continue statements
-  bool allow_natives_syntax_;
   v8::Extension* extension_;
-  bool is_pre_parsing_;
   ScriptDataImpl* pre_data_;
   FuncNameInferrer* fni_;
+
+  Mode mode_;
+  bool allow_natives_syntax_;
+  bool allow_lazy_;
+  bool allow_modules_;
   bool stack_overflow_;
   // If true, the next (and immediately following) function literal is
   // preceded by a parenthesis.
   // Heuristically that means that the function will be called immediately,
   // so never lazily compile it.
   bool parenthesized_function_;
-  bool harmony_block_scoping_;
 
-  friend class LexicalScope;
+  friend class BlockState;
+  friend class FunctionState;
 };
 
 
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index a72f5da..dd7253b 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,6 +41,7 @@
 
 #include "v8.h"
 
+#include "platform-posix.h"
 #include "platform.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
@@ -61,7 +62,7 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::Setup() {
+void OS::SetUp() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
   // to an unsigned. Going directly can cause an overflow and the seed to be
@@ -73,6 +74,12 @@
 }
 
 
+void OS::PostSetUp() {
+  // Math functions depend on CPU features therefore they are initialized after
+  // CPU.
+  MathSetup();
+}
+
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
   return 0;  // Nothing special about Cygwin.
 }
@@ -114,7 +121,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -290,7 +297,7 @@
       }
       LOG(isolate, SharedLibraryEvent(lib_name, start, end));
     } else {
-      // Entry not describing executable data. Skip to end of line to setup
+      // Entry not describing executable data. Skip to end of line to set up
       // reading the next entry.
       do {
         c = getc(fp);
@@ -355,6 +362,17 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_READONLY | PAGE_GUARD)) {
+    return false;
+  }
+  return true;
+}
+
+
 class Thread::PlatformData : public Malloced {
  public:
   PlatformData() : thread_(kNoThread) {}
@@ -365,16 +383,9 @@
 
 
 Thread::Thread(const Options& options)
-    : data_(new PlatformData),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : data_(new PlatformData),
-      stack_size_(0) {
-  set_name(name);
+    : data_(new PlatformData()),
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -617,12 +628,14 @@
 
 class SamplerThread : public Thread {
  public:
+  static const int kSamplerThreadStackSize = 64 * KB;
+
   explicit SamplerThread(int interval)
-      : Thread("SamplerThread"),
+      : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       instance_ = new SamplerThread(sampler->interval());
@@ -633,7 +646,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -719,14 +732,15 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SamplerThread* instance_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(SamplerThread);
 };
 
 
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
 SamplerThread* SamplerThread::instance_ = NULL;
 
 
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 685ec3c..6a004ea 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -54,6 +54,7 @@
 #include "v8.h"
 #include "v8threads.h"
 
+#include "platform-posix.h"
 #include "platform.h"
 #include "vm-state-inl.h"
 
@@ -79,7 +80,7 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::Setup() {
+void OS::SetUp() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
   // to an unsigned. Going directly can cause an overflow and the seed to be
@@ -91,6 +92,13 @@
 }
 
 
+void OS::PostSetUp() {
+  // Math functions depend on CPU features therefore they are initialized after
+  // CPU.
+  MathSetup();
+}
+
+
 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
   __asm__ __volatile__("" : : : "memory");
   *ptr = value;
@@ -128,7 +136,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -333,44 +341,132 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = ReserveRegion(size);
   size_ = size;
 }
 
 
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
 }
 
 
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
-    return false;
-  }
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
 
-  UpdateAllocatedSpaceLimits(address, size);
-  return true;
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, size);
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
 }
 
 
@@ -382,15 +478,8 @@
 
 Thread::Thread(const Options& options)
     : data_(new PlatformData),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : data_(new PlatformData),
-      stack_size_(0) {
-  set_name(name);
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -635,12 +724,14 @@
     FULL_INTERVAL
   };
 
+  static const int kSignalSenderStackSize = 64 * KB;
+
   explicit SignalSender(int interval)
-      : Thread("SignalSender"),
+      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       // Install a signal handler.
@@ -660,7 +751,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -753,15 +844,16 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 032cdaa..9ca2479 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -57,6 +57,7 @@
 
 #include "v8.h"
 
+#include "platform-posix.h"
 #include "platform.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
@@ -78,31 +79,7 @@
 static Mutex* limit_mutex = NULL;
 
 
-static void* GetRandomMmapAddr() {
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  // Note that the current isolate isn't set up in a call path via
-  // CpuFeatures::Probe. We don't care about randomization in this case because
-  // the code page is immediately freed.
-  if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
-    uint64_t rnd1 = V8::RandomPrivate(isolate);
-    uint64_t rnd2 = V8::RandomPrivate(isolate);
-    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
-    raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
-    uint32_t raw_addr = V8::RandomPrivate(isolate);
-    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
-    // variety of ASLR modes (PAE kernel, NX compat mode, etc).
-    raw_addr &= 0x3ffff000;
-    raw_addr += 0x20000000;
-#endif
-    return reinterpret_cast<void*>(raw_addr);
-  }
-  return NULL;
-}
-
-
-void OS::Setup() {
+void OS::SetUp() {
   // Seed the random number generator. We preserve microsecond resolution.
   uint64_t seed = Ticks() ^ (getpid() << 16);
   srandom(static_cast<unsigned int>(seed));
@@ -129,6 +106,13 @@
 }
 
 
+void OS::PostSetUp() {
+  // Math functions depend on CPU features therefore they are initialized after
+  // CPU.
+  MathSetup();
+}
+
+
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
   return 0;  // Linux runs on anything.
 }
@@ -211,15 +195,15 @@
 // pair r0, r1 is loaded with 0.0. If -mfloat-abi=hard is pased to GCC then
 // calling this will return 1.0 and otherwise 0.0.
 static void ArmUsingHardFloatHelper() {
-  asm("mov r0, #0");
+  asm("mov r0, #0":::"r0");
 #if defined(__VFP_FP__) && !defined(__SOFTFP__)
   // Load 0x3ff00000 into r1 using instructions available in both ARM
   // and Thumb mode.
-  asm("mov r1, #3");
-  asm("mov r2, #255");
-  asm("lsl r1, r1, #8");
-  asm("orr r1, r1, r2");
-  asm("lsl r1, r1, #20");
+  asm("mov r1, #3":::"r1");
+  asm("mov r2, #255":::"r2");
+  asm("lsl r1, r1, #8":::"r1");
+  asm("orr r1, r1, r2":::"r1");
+  asm("lsl r1, r1, #20":::"r1");
   // For vmov d0, r0, r1 use ARM mode.
 #ifdef __thumb__
   asm volatile(
@@ -233,12 +217,12 @@
     "    adr r3, 2f+1    \n\t"
     "    bx  r3          \n\t"
     "    .THUMB          \n"
-    "2:                  \n\t");
+    "2:                  \n\t":::"r3");
 #else
   asm("vmov d0, r0, r1");
 #endif  // __thumb__
 #endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
-  asm("mov r1, #0");
+  asm("mov r1, #0":::"r1");
 }
 
 
@@ -350,7 +334,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -381,9 +365,9 @@
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
-  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+  const size_t msize = RoundUp(requested, AllocateAlignment());
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* addr = GetRandomMmapAddr();
+  void* addr = OS::GetRandomMmapAddr();
   void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   if (mbase == MAP_FAILED) {
     LOG(i::Isolate::Current(),
@@ -412,6 +396,9 @@
 
 void OS::Abort() {
   // Redirect to std abort to signal abnormal program termination.
+  if (FLAG_break_on_abort) {
+    DebugBreak();
+  }
   abort();
 }
 
@@ -453,7 +440,12 @@
   int size = ftell(file);
 
   void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
@@ -468,13 +460,18 @@
     return NULL;
   }
   void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }
 
@@ -526,7 +523,7 @@
       }
       LOG(isolate, SharedLibraryEvent(lib_name, start, end));
     } else {
-      // Entry not describing executable data. Skip to end of line to setup
+      // Entry not describing executable data. Skip to end of line to set up
       // reading the next entry.
       do {
         c = getc(fp);
@@ -553,10 +550,14 @@
   // kernel log.
   int size = sysconf(_SC_PAGESIZE);
   FILE* f = fopen(kGCFakeMmap, "w+");
-  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
-                    fileno(f), 0);
+  void* addr = mmap(OS::GetRandomMmapAddr(),
+                    size,
+                    PROT_READ | PROT_EXEC,
+                    MAP_PRIVATE,
+                    fileno(f),
+                    0);
   ASSERT(addr != MAP_FAILED);
-  munmap(addr, size);
+  OS::Free(addr, size);
   fclose(f);
 }
 
@@ -598,44 +599,132 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = ReserveRegion(size);
   size_ = size;
 }
 
 
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
 }
 
 
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(address, size);
-  return true;
+  return CommitRegion(address, size, is_executable);
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, size);
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
 }
 
 
@@ -648,15 +737,8 @@
 
 Thread::Thread(const Options& options)
     : data_(new PlatformData()),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : data_(new PlatformData()),
-      stack_size_(0) {
-  set_name(name);
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -696,7 +778,8 @@
     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
     attr_ptr = &attr;
   }
-  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+  int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+  CHECK_EQ(0, result);
   ASSERT(data_->thread_ != kNoThread);
 }
 
@@ -855,7 +938,7 @@
 }
 
 
-#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__) || defined(__i386__))
+#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
 // Android runs a fairly new Linux kernel, so signal info is there,
 // but the C library doesn't have the structs defined.
 
@@ -877,8 +960,79 @@
   __sigset_t uc_sigmask;
 } ucontext_t;
 enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
-enum X86Registers {REG_EIP = 14, REG_ESP = 7, REG_EBP = 6};
 
+#elif !defined(__GLIBC__) && defined(__mips__)
+// MIPS version of sigcontext, for Android bionic.
+struct sigcontext {
+  uint32_t regmask;
+  uint32_t status;
+  uint64_t pc;
+  uint64_t gregs[32];
+  uint64_t fpregs[32];
+  uint32_t acx;
+  uint32_t fpc_csr;
+  uint32_t fpc_eir;
+  uint32_t used_math;
+  uint32_t dsp;
+  uint64_t mdhi;
+  uint64_t mdlo;
+  uint32_t hi1;
+  uint32_t lo1;
+  uint32_t hi2;
+  uint32_t lo2;
+  uint32_t hi3;
+  uint32_t lo3;
+};
+typedef uint32_t __sigset_t;
+typedef struct sigcontext mcontext_t;
+typedef struct ucontext {
+  uint32_t uc_flags;
+  struct ucontext* uc_link;
+  stack_t uc_stack;
+  mcontext_t uc_mcontext;
+  __sigset_t uc_sigmask;
+} ucontext_t;
+
+#elif !defined(__GLIBC__) && defined(__i386__)
+// x86 version for Android.
+struct _libc_fpreg {
+  uint16_t significand[4];
+  uint16_t exponent;
+};
+
+struct _libc_fpstate {
+  uint64_t cw;
+  uint64_t sw;
+  uint64_t tag;
+  uint64_t ipoff;
+  uint64_t cssel;
+  uint64_t dataoff;
+  uint64_t datasel;
+  struct _libc_fpreg _st[8];
+  uint64_t status;
+};
+
+typedef struct _libc_fpstate *fpregset_t;
+
+typedef struct mcontext {
+  int32_t gregs[19];
+  fpregset_t fpregs;
+  int64_t oldmask;
+  int64_t cr2;
+} mcontext_t;
+
+typedef uint64_t __sigset_t;
+
+typedef struct ucontext {
+  uint64_t uc_flags;
+  struct ucontext *uc_link;
+  stack_t uc_stack;
+  mcontext_t uc_mcontext;
+  __sigset_t uc_sigmask;
+  struct _libc_fpstate __fpregs_mem;
+} ucontext_t;
+
+enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
 #endif
 
 
@@ -893,7 +1047,6 @@
 
 
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
-#ifndef V8_HOST_ARCH_MIPS
   USE(info);
   if (signal != SIGPROF) return;
   Isolate* isolate = Isolate::UncheckedCurrent();
@@ -935,15 +1088,14 @@
   sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
   sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
   sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif
+#endif  // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
 #elif V8_HOST_ARCH_MIPS
-  sample.pc = reinterpret_cast<Address>(mcontext.pc);
-  sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
-  sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#endif
+  sample->pc = reinterpret_cast<Address>(mcontext.pc);
+  sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+  sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#endif  // V8_HOST_ARCH_*
   sampler->SampleStack(sample);
   sampler->Tick(sample);
-#endif
 }
 
 
@@ -965,8 +1117,10 @@
     FULL_INTERVAL
   };
 
+  static const int kSignalSenderStackSize = 64 * KB;
+
   explicit SignalSender(int interval)
-      : Thread("SignalSender"),
+      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
         vm_tgid_(getpid()),
         interval_(interval) {}
 
@@ -987,7 +1141,7 @@
   }
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       // Start a thread that will send SIGPROF signal to VM threads,
@@ -1000,7 +1154,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -1081,6 +1235,9 @@
     // occuring during signal delivery.
     useconds_t interval = interval_ * 1000 - 100;
     if (full_or_half == HALF_INTERVAL) interval /= 2;
+#if defined(ANDROID)
+    usleep(interval);
+#else
     int result = usleep(interval);
 #ifdef DEBUG
     if (result != 0 && errno != EINTR) {
@@ -1090,8 +1247,9 @@
               errno);
       ASSERT(result == 0 || errno == EINTR);
     }
-#endif
+#endif  // DEBUG
     USE(result);
+#endif  // ANDROID
   }
 
   const int vm_tgid_;
@@ -1099,16 +1257,17 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
 
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 6be941a..dbcd80e 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -58,6 +58,7 @@
 
 #include "v8.h"
 
+#include "platform-posix.h"
 #include "platform.h"
 #include "vm-state-inl.h"
 
@@ -75,7 +76,7 @@
 namespace v8 {
 namespace internal {
 
-// 0 is never a valid thread id on MacOSX since a ptread_t is
+// 0 is never a valid thread id on MacOSX since a pthread_t is
 // a pointer.
 static const pthread_t kNoThread = (pthread_t) 0;
 
@@ -93,21 +94,24 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::Setup() {
-  // Seed the random number generator.
-  // Convert the current time to a 64-bit integer first, before converting it
-  // to an unsigned. Going directly will cause an overflow and the seed to be
-  // set to all ones. The seed will be identical for different instances that
-  // call this setup code within the same millisecond.
-  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+void OS::SetUp() {
+  // Seed the random number generator. We preserve microsecond resolution.
+  uint64_t seed = Ticks() ^ (getpid() << 16);
   srandom(static_cast<unsigned int>(seed));
   limit_mutex = CreateMutex();
 }
 
 
+void OS::PostSetUp() {
+  // Math functions depend on CPU features therefore they are initialized after
+  // CPU.
+  MathSetup();
+}
+
+
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -148,9 +152,12 @@
                    bool is_executable) {
   const size_t msize = RoundUp(requested, getpagesize());
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot,
+  void* mbase = mmap(OS::GetRandomMmapAddr(),
+                     msize,
+                     prot,
                      MAP_PRIVATE | MAP_ANON,
-                     kMmapFd, kMmapFdOffset);
+                     kMmapFd,
+                     kMmapFdOffset);
   if (mbase == MAP_FAILED) {
     LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
     return NULL;
@@ -207,7 +214,12 @@
   int size = ftell(file);
 
   void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
@@ -222,13 +234,18 @@
     return NULL;
   }
   void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+      mmap(OS::GetRandomMmapAddr(),
+          size,
+          PROT_READ | PROT_WRITE,
+          MAP_SHARED,
+          fileno(file),
+          0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }
 
@@ -334,33 +351,108 @@
 }
 
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 
-VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
-  size_ = size;
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
 }
 
 
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
 }
 
 
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+bool VirtualMemory::CommitRegion(void* address,
+                                 size_t size,
+                                 bool is_executable) {
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
+  if (MAP_FAILED == mmap(address,
+                         size,
+                         prot,
                          MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
+                         kMmapFd,
+                         kMmapFdOffset)) {
     return false;
   }
 
@@ -370,9 +462,22 @@
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+  return mmap(address,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+  return munmap(address, size) == 0;
 }
 
 
@@ -382,17 +487,11 @@
   pthread_t thread_;  // Thread handle for pthread.
 };
 
+
 Thread::Thread(const Options& options)
     : data_(new PlatformData),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : data_(new PlatformData),
-      stack_size_(0) {
-  set_name(name);
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -645,14 +744,17 @@
   thread_act_t profiled_thread_;
 };
 
+
 class SamplerThread : public Thread {
  public:
+  static const int kSamplerThreadStackSize = 64 * KB;
+
   explicit SamplerThread(int interval)
-      : Thread("SamplerThread"),
+      : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       instance_ = new SamplerThread(sampler->interval());
@@ -663,7 +765,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -760,16 +862,17 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SamplerThread* instance_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(SamplerThread);
 };
 
 #undef REGISTER_FIELD
 
 
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
 SamplerThread* SamplerThread::instance_ = NULL;
 
 
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 8c2a863..42799db 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -55,13 +55,42 @@
 }
 
 
+double fast_sin(double x) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+double fast_cos(double x) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+double fast_tan(double x) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
+double fast_log(double x) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
 // Initialize OS class early in the V8 startup.
-void OS::Setup() {
+void OS::SetUp() {
   // Seed the random number generator.
   UNIMPLEMENTED();
 }
 
 
+void OS::PostSetUp() {
+  UNIMPLEMENTED();
+}
+
+
 // Returns the accumulated user time for thread.
 int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
   UNIMPLEMENTED();
@@ -295,6 +324,12 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+
 class Thread::PlatformData : public Malloced {
  public:
   PlatformData() {
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 973329b..6a06e3e 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,87 +25,115 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Platform specific code for OpenBSD goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
+// Platform specific code for OpenBSD and NetBSD goes here. For the POSIX
+// comaptible parts the implementation is in platform-posix.cc.
 
 #include <pthread.h>
 #include <semaphore.h>
 #include <signal.h>
 #include <sys/time.h>
 #include <sys/resource.h>
+#include <sys/syscall.h>
 #include <sys/types.h>
 #include <stdlib.h>
 
 #include <sys/types.h>  // mmap & munmap
 #include <sys/mman.h>   // mmap & munmap
 #include <sys/stat.h>   // open
-#include <sys/fcntl.h>  // open
-#include <unistd.h>     // getpagesize
+#include <fcntl.h>      // open
+#include <unistd.h>     // sysconf
 #include <execinfo.h>   // backtrace, backtrace_symbols
 #include <strings.h>    // index
 #include <errno.h>
 #include <stdarg.h>
-#include <limits.h>
 
 #undef MAP_TYPE
 
 #include "v8.h"
-#include "v8threads.h"
 
+#include "platform-posix.h"
 #include "platform.h"
+#include "v8threads.h"
 #include "vm-state-inl.h"
 
 
 namespace v8 {
 namespace internal {
 
-// 0 is never a valid thread id on OpenBSD since tids and pids share a
-// name space and pid 0 is used to kill the group (see man 2 kill).
+// 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
 static const pthread_t kNoThread = (pthread_t) 0;
 
 
 double ceiling(double x) {
-    // Correct as on OS X
-    if (-1.0 < x && x < 0.0) {
-        return -0.0;
-    } else {
-        return ceil(x);
-    }
+  return ceil(x);
 }
 
 
 static Mutex* limit_mutex = NULL;
 
 
-void OS::Setup() {
-  // Seed the random number generator.
-  // Convert the current time to a 64-bit integer first, before converting it
-  // to an unsigned. Going directly can cause an overflow and the seed to be
-  // set to all ones. The seed will be identical for different instances that
-  // call this setup code within the same millisecond.
-  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+static void* GetRandomMmapAddr() {
+  Isolate* isolate = Isolate::UncheckedCurrent();
+  // Note that the current isolate isn't set up in a call path via
+  // CpuFeatures::Probe. We don't care about randomization in this case because
+  // the code page is immediately freed.
+  if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+    uint64_t rnd1 = V8::RandomPrivate(isolate);
+    uint64_t rnd2 = V8::RandomPrivate(isolate);
+    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+    // Currently available CPUs have 48 bits of virtual addressing.  Truncate
+    // the hint address to 46 bits to give the kernel a fighting chance of
+    // fulfilling our placement request.
+    raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+    uint32_t raw_addr = V8::RandomPrivate(isolate);
+    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+    // variety of ASLR modes (PAE kernel, NX compat mode, etc).
+    raw_addr &= 0x3ffff000;
+    raw_addr += 0x20000000;
+#endif
+    return reinterpret_cast<void*>(raw_addr);
+  }
+  return NULL;
+}
+
+
+void OS::SetUp() {
+  // Seed the random number generator. We preserve microsecond resolution.
+  uint64_t seed = Ticks() ^ (getpid() << 16);
   srandom(static_cast<unsigned int>(seed));
   limit_mutex = CreateMutex();
 }
 
 
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-  __asm__ __volatile__("" : : : "memory");
-  *ptr = value;
+void OS::PostSetUp() {
+  // Math functions depend on CPU features therefore they are initialized after
+  // CPU.
+  MathSetup();
 }
 
 
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
-  return 0;  // OpenBSD runs on anything.
+  return 0;
 }
 
 
 int OS::ActivationFrameAlignment() {
-  // 16 byte alignment on OpenBSD
+  // With gcc 4.4 the tree vectorization optimizer can generate code
+  // that requires 16 byte alignment such as movdqa on x86.
   return 16;
 }
 
 
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+  __asm__ __volatile__("" : : : "memory");
+  // An x86 store acts as a release barrier.
+  *ptr = value;
+}
+
+
 const char* OS::LocalTimezone(double time) {
   if (isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -126,7 +154,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -150,19 +178,20 @@
 
 
 size_t OS::AllocateAlignment() {
-  return getpagesize();
+  return sysconf(_SC_PAGESIZE);
 }
 
 
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
-                   bool executable) {
-  const size_t msize = RoundUp(requested, getpagesize());
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, AllocateAlignment());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* addr = GetRandomMmapAddr();
+  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
   if (mbase == MAP_FAILED) {
-    LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+    LOG(i::Isolate::Current(),
+        StringEvent("OS::Allocate", "mmap failed"));
     return NULL;
   }
   *allocated = msize;
@@ -171,9 +200,9 @@
 }
 
 
-void OS::Free(void* buf, const size_t length) {
+void OS::Free(void* address, const size_t size) {
   // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(buf, length);
+  int result = munmap(address, size);
   USE(result);
   ASSERT(result == 0);
 }
@@ -192,13 +221,7 @@
 
 
 void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
-  asm("bkpt 0");
-# endif
-#else
   asm("int $3");
-#endif
 }
 
 
@@ -245,61 +268,95 @@
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }
 
 
-static unsigned StringToLong(char* buffer) {
-  return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
-}
-
-
 void OS::LogSharedLibraryAddresses() {
-  static const int MAP_LENGTH = 1024;
-  int fd = open("/proc/self/maps", O_RDONLY);
-  if (fd < 0) return;
+  // This function assumes that the layout of the file is as follows:
+  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+  // If we encounter an unexpected situation we abort scanning further entries.
+  FILE* fp = fopen("/proc/self/maps", "r");
+  if (fp == NULL) return;
+
+  // Allocate enough room to be able to store a full file name.
+  const int kLibNameLen = FILENAME_MAX + 1;
+  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+  i::Isolate* isolate = ISOLATE;
+  // This loop will terminate once the scanning hits an EOF.
   while (true) {
-    char addr_buffer[11];
-    addr_buffer[0] = '0';
-    addr_buffer[1] = 'x';
-    addr_buffer[10] = 0;
-    int result = read(fd, addr_buffer + 2, 8);
-    if (result < 8) break;
-    unsigned start = StringToLong(addr_buffer);
-    result = read(fd, addr_buffer + 2, 1);
-    if (result < 1) break;
-    if (addr_buffer[2] != '-') break;
-    result = read(fd, addr_buffer + 2, 8);
-    if (result < 8) break;
-    unsigned end = StringToLong(addr_buffer);
-    char buffer[MAP_LENGTH];
-    int bytes_read = -1;
-    do {
-      bytes_read++;
-      if (bytes_read >= MAP_LENGTH - 1)
-        break;
-      result = read(fd, buffer + bytes_read, 1);
-      if (result < 1) break;
-    } while (buffer[bytes_read] != '\n');
-    buffer[bytes_read] = 0;
-    // Ignore mappings that are not executable.
-    if (buffer[3] != 'x') continue;
-    char* start_of_path = index(buffer, '/');
-    // There may be no filename in this line.  Skip to next.
-    if (start_of_path == NULL) continue;
-    buffer[bytes_read] = 0;
-    LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
+    uintptr_t start, end;
+    char attr_r, attr_w, attr_x, attr_p;
+    // Parse the addresses and permission bits at the beginning of the line.
+    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+    int c;
+    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+      // Found a read-only executable entry. Skip characters until we reach
+      // the beginning of the filename or the end of the line.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n') && (c != '/'));
+      if (c == EOF) break;  // EOF: Was unexpected, just exit.
+
+      // Process the filename if found.
+      if (c == '/') {
+        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
+
+        // Read to the end of the line. Exit if the read fails.
+        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+        // Drop the newline character read by fgets. We do not need to check
+        // for a zero-length string because we know that we at least read the
+        // '/' character.
+        lib_name[strlen(lib_name) - 1] = '\0';
+      } else {
+        // No library name found, just record the raw address range.
+        snprintf(lib_name, kLibNameLen,
+                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+      }
+      LOG(isolate, SharedLibraryEvent(lib_name, start, end));
+    } else {
+      // Entry not describing executable data. Skip to end of line to set up
+      // reading the next entry.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n'));
+      if (c == EOF) break;
+    }
   }
-  close(fd);
+  free(lib_name);
+  fclose(fp);
 }
 
 
+static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
+
+
 void OS::SignalCodeMovingGC() {
+  // Support for ll_prof.py.
+  //
+  // The Linux profiler built into the kernel logs all mmap's with
+  // PROT_EXEC so that analysis tools can properly attribute ticks. We
+  // do a mmap with a name known by ll_prof.py and immediately munmap
+  // it. This injects a GC marker into the stream of events generated
+  // by the kernel and allows us to synchronize V8 code log and the
+  // kernel log.
+  int size = sysconf(_SC_PAGESIZE);
+  FILE* f = fopen(kGCFakeMmap, "w+");
+  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+                    fileno(f), 0);
+  ASSERT(addr != MAP_FAILED);
+  OS::Free(addr, size);
+  fclose(f);
 }
 
 
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  // backtrace is a glibc extension.
   int frames_size = frames.length();
   ScopedVector<void*> addresses(frames_size);
 
@@ -331,64 +388,146 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = ReserveRegion(size);
   size_ = size;
 }
 
 
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
 }
 
 
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
-    return false;
-  }
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
 
-  UpdateAllocatedSpaceLimits(address, size);
-  return true;
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, size);
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
 }
 
 
 class Thread::PlatformData : public Malloced {
  public:
+  PlatformData() : thread_(kNoThread) {}
+
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-
 Thread::Thread(const Options& options)
-    : data_(new PlatformData),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : data_(new PlatformData),
-      stack_size_(0) {
-  set_name(name);
+    : data_(new PlatformData()),
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -402,6 +541,11 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
+#ifdef PR_SET_NAME
+  prctl(PR_SET_NAME,
+        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
+        0, 0, 0);
+#endif
   thread->data()->thread_ = pthread_self();
   ASSERT(thread->data()->thread_ != kNoThread);
   thread->Run();
@@ -477,6 +621,7 @@
     ASSERT(result == 0);
     result = pthread_mutex_init(&mutex_, &attrs);
     ASSERT(result == 0);
+    USE(result);
   }
 
   virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
@@ -533,6 +678,14 @@
 }
 
 
+#ifndef TIMEVAL_TO_TIMESPEC
+#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
+    (ts)->tv_sec = (tv)->tv_sec;                                    \
+    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
+} while (false)
+#endif
+
+
 bool OpenBSDSemaphore::Wait(int timeout) {
   const long kOneSecondMicros = 1000000;  // NOLINT
 
@@ -566,29 +719,15 @@
   }
 }
 
-
 Semaphore* OS::CreateSemaphore(int count) {
   return new OpenBSDSemaphore(count);
 }
 
 
 static pthread_t GetThreadID() {
-  pthread_t thread_id = pthread_self();
-  return thread_id;
+  return pthread_self();
 }
 
-
-class Sampler::PlatformData : public Malloced {
- public:
-  PlatformData() : vm_tid_(GetThreadID()) {}
-
-  pthread_t vm_tid() const { return vm_tid_; }
-
- private:
-  pthread_t vm_tid_;
-};
-
-
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
   USE(info);
   if (signal != SIGPROF) return;
@@ -610,8 +749,20 @@
   if (sample == NULL) sample = &sample_obj;
 
   // Extracting the sample from the context is extremely machine dependent.
-  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
   sample->state = isolate->current_vm_state();
+  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+#ifdef __NetBSD__
+  mcontext_t& mcontext = ucontext->uc_mcontext;
+#if V8_HOST_ARCH_IA32
+  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
+  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
+  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
+#elif V8_HOST_ARCH_X64
+  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
+  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
+  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
+#endif  // V8_HOST_ARCH
+#else  // OpenBSD
 #if V8_HOST_ARCH_IA32
   sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
   sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
@@ -620,16 +771,24 @@
   sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
   sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
   sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#elif V8_HOST_ARCH_ARM
-  sample->pc = reinterpret_cast<Address>(ucontext->sc_r15);
-  sample->sp = reinterpret_cast<Address>(ucontext->sc_r13);
-  sample->fp = reinterpret_cast<Address>(ucontext->sc_r11);
-#endif
+#endif  // V8_HOST_ARCH
+#endif  // __NetBSD__
   sampler->SampleStack(sample);
   sampler->Tick(sample);
 }
 
 
+class Sampler::PlatformData : public Malloced {
+ public:
+  PlatformData() : vm_tid_(GetThreadID()) {}
+
+  pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+  pthread_t vm_tid_;
+};
+
+
 class SignalSender : public Thread {
  public:
   enum SleepInterval {
@@ -637,23 +796,35 @@
     FULL_INTERVAL
   };
 
+  static const int kSignalSenderStackSize = 64 * KB;
+
   explicit SignalSender(int interval)
-      : Thread("SignalSender"),
+      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
+        vm_tgid_(getpid()),
         interval_(interval) {}
 
+  static void InstallSignalHandler() {
+    struct sigaction sa;
+    sa.sa_sigaction = ProfilerSignalHandler;
+    sigemptyset(&sa.sa_mask);
+    sa.sa_flags = SA_RESTART | SA_SIGINFO;
+    signal_handler_installed_ =
+        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+  }
+
+  static void RestoreSignalHandler() {
+    if (signal_handler_installed_) {
+      sigaction(SIGPROF, &old_signal_handler_, 0);
+      signal_handler_installed_ = false;
+    }
+  }
+
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
-      // Install a signal handler.
-      struct sigaction sa;
-      sa.sa_sigaction = ProfilerSignalHandler;
-      sigemptyset(&sa.sa_mask);
-      sa.sa_flags = SA_RESTART | SA_SIGINFO;
-      signal_handler_installed_ =
-          (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-
-      // Start a thread that sends SIGPROF signal to VM threads.
+      // Start a thread that will send SIGPROF signal to VM threads,
+      // when CPU profiling will be enabled.
       instance_ = new SignalSender(sampler->interval());
       instance_->Start();
     } else {
@@ -662,18 +833,13 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
       delete instance_;
       instance_ = NULL;
-
-      // Restore the old signal handler.
-      if (signal_handler_installed_) {
-        sigaction(SIGPROF, &old_signal_handler_, 0);
-        signal_handler_installed_ = false;
-      }
+      RestoreSignalHandler();
     }
   }
 
@@ -685,6 +851,11 @@
       bool cpu_profiling_enabled =
           (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
       bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+      if (cpu_profiling_enabled && !signal_handler_installed_) {
+        InstallSignalHandler();
+      } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+        RestoreSignalHandler();
+      }
       // When CPU profiling is enabled both JavaScript and C++ code is
       // profiled. We must not suspend.
       if (!cpu_profiling_enabled) {
@@ -751,19 +922,22 @@
     USE(result);
   }
 
+  const int vm_tgid_;
   const int interval_;
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 52cf029..59066ea 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -29,6 +29,8 @@
 // own but contains the parts which are the same across POSIX platforms Linux,
 // Mac OS, FreeBSD and OpenBSD.
 
+#include "platform-posix.h"
+
 #include <unistd.h>
 #include <errno.h>
 #include <time.h>
@@ -46,13 +48,14 @@
 
 #undef MAP_TYPE
 
-#if defined(ANDROID)
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
 #define LOG_TAG "v8"
-#include <utils/Log.h>  // LOG_PRI_VA
+#include <android/log.h>
 #endif
 
 #include "v8.h"
 
+#include "codegen.h"
 #include "platform.h"
 
 namespace v8 {
@@ -70,6 +73,12 @@
 }
 
 
+intptr_t OS::CommitPageSize() {
+  static intptr_t page_size = getpagesize();
+  return page_size;
+}
+
+
 #ifndef __CYGWIN__
 // Get rid of writable permission on code allocations.
 void OS::ProtectCode(void* address, const size_t size) {
@@ -84,6 +93,34 @@
 #endif  // __CYGWIN__
 
 
+void* OS::GetRandomMmapAddr() {
+  Isolate* isolate = Isolate::UncheckedCurrent();
+  // Note that the current isolate isn't set up in a call path via
+  // CpuFeatures::Probe. We don't care about randomization in this case because
+  // the code page is immediately freed.
+  if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+    uint64_t rnd1 = V8::RandomPrivate(isolate);
+    uint64_t rnd2 = V8::RandomPrivate(isolate);
+    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+    // Currently available CPUs have 48 bits of virtual addressing.  Truncate
+    // the hint address to 46 bits to give the kernel a fighting chance of
+    // fulfilling our placement request.
+    raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+    uint32_t raw_addr = V8::RandomPrivate(isolate);
+    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+    // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+    // 10.6 and 10.7.
+    raw_addr &= 0x3ffff000;
+    raw_addr += 0x20000000;
+#endif
+    return reinterpret_cast<void*>(raw_addr);
+  }
+  return NULL;
+}
+
+
 // ----------------------------------------------------------------------------
 // Math functions
 
@@ -92,6 +129,33 @@
 }
 
 
+#define UNARY_MATH_FUNCTION(name, generator)             \
+static UnaryMathFunction fast_##name##_function = NULL;  \
+void init_fast_##name##_function() {                     \
+  fast_##name##_function = generator;                    \
+}                                                        \
+double fast_##name(double x) {                           \
+  return (*fast_##name##_function)(x);                   \
+}
+
+UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
+UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
+UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
+UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
+UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
+
+#undef MATH_FUNCTION
+
+
+void MathSetup() {
+  init_fast_sin_function();
+  init_fast_cos_function();
+  init_fast_tan_function();
+  init_fast_log_function();
+  init_fast_sqrt_function();
+}
+
+
 double OS::nan_value() {
   // NAN from math.h is defined in C99 and not in POSIX.
   return NAN;
@@ -182,7 +246,7 @@
 
 void OS::VPrint(const char* format, va_list args) {
 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
 #else
   vprintf(format, args);
 #endif
@@ -199,7 +263,7 @@
 
 void OS::VFPrint(FILE* out, const char* format, va_list args) {
 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
 #else
   vfprintf(out, format, args);
 #endif
@@ -216,7 +280,7 @@
 
 void OS::VPrintError(const char* format, va_list args) {
 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+  __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
 #else
   vfprintf(stderr, format, args);
 #endif
@@ -249,14 +313,14 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 static OS::MemCopyFunction memcopy_function = NULL;
-static Mutex* memcopy_function_mutex = OS::CreateMutex();
+static LazyMutex memcopy_function_mutex = LAZY_MUTEX_INITIALIZER;
 // Defined in codegen-ia32.cc.
 OS::MemCopyFunction CreateMemCopyFunction();
 
 // Copy memory area to disjoint memory area.
 void OS::MemCopy(void* dest, const void* src, size_t size) {
   if (memcopy_function == NULL) {
-    ScopedLock lock(memcopy_function_mutex);
+    ScopedLock lock(memcopy_function_mutex.Pointer());
     if (memcopy_function == NULL) {
       OS::MemCopyFunction temp = CreateMemCopyFunction();
       MemoryBarrier();
@@ -427,7 +491,7 @@
 }
 
 
-bool Socket::Setup() {
+bool Socket::SetUp() {
   // Nothing to do on POSIX.
   return true;
 }
diff --git a/test/mjsunit/regress/regress-221.js b/src/platform-posix.h
similarity index 81%
copy from test/mjsunit/regress/regress-221.js
copy to src/platform-posix.h
index d3f2e35..4ae0e52 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/src/platform-posix.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+#ifndef V8_PLATFORM_POSIX_H_
+#define V8_PLATFORM_POSIX_H_
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+namespace v8 {
+namespace internal {
 
-assertThrows('eval(delete eval)');
+// Used by platform implementation files during OS::PostSetUp() to initialize
+// the math functions.
+void MathSetup();
 
+} }  // namespace v8::internal
+
+#endif  // V8_PLATFORM_POSIX_H_
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 035d394..e044dbc 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,7 +52,9 @@
 
 #include "v8.h"
 
+#include "platform-posix.h"
 #include "platform.h"
+#include "v8threads.h"
 #include "vm-state-inl.h"
 
 
@@ -89,7 +91,7 @@
 
 
 static Mutex* limit_mutex = NULL;
-void OS::Setup() {
+void OS::SetUp() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
   // to an unsigned. Going directly will cause an overflow and the seed to be
@@ -101,6 +103,13 @@
 }
 
 
+void OS::PostSetUp() {
+  // Math functions depend on CPU features therefore they are initialized after
+  // CPU.
+  MathSetup();
+}
+
+
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
   return 0;  // Solaris runs on a lot of things.
 }
@@ -139,7 +148,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -322,43 +331,132 @@
 static const int kMmapFdOffset = 0;
 
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = ReserveRegion(size);
   size_ = size;
 }
 
 
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
 }
 
 
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
-    return false;
-  }
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
 
-  UpdateAllocatedSpaceLimits(address, size);
-  return true;
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
-              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, size);
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
 }
 
 
@@ -369,17 +467,11 @@
   pthread_t thread_;  // Thread handle for pthread.
 };
 
+
 Thread::Thread(const Options& options)
     : data_(new PlatformData()),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : data_(new PlatformData()),
-      stack_size_(0) {
-  set_name(name);
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -626,8 +718,10 @@
     FULL_INTERVAL
   };
 
+  static const int kSignalSenderStackSize = 64 * KB;
+
   explicit SignalSender(int interval)
-      : Thread("SignalSender"),
+      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
         interval_(interval) {}
 
   static void InstallSignalHandler() {
@@ -647,7 +741,7 @@
   }
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       // Start a thread that will send SIGPROF signal to VM threads,
@@ -660,7 +754,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -754,15 +848,16 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 97788e2..c79f442 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,6 +32,7 @@
 
 #include "v8.h"
 
+#include "codegen.h"
 #include "platform.h"
 #include "vm-state-inl.h"
 
@@ -58,21 +59,26 @@
 }
 
 
-// Not sure this the correct interpretation of _mkgmtime
-time_t _mkgmtime(tm* timeptr) {
-  return mktime(timeptr);
-}
-
-
 int fopen_s(FILE** pFile, const char* filename, const char* mode) {
   *pFile = fopen(filename, mode);
   return *pFile != NULL ? 0 : 1;
 }
 
 
+#ifndef __MINGW64_VERSION_MAJOR
+
+// Not sure this the correct interpretation of _mkgmtime
+time_t _mkgmtime(tm* timeptr) {
+  return mktime(timeptr);
+}
+
+
 #define _TRUNCATE 0
 #define STRUNCATE 80
 
+#endif  // __MINGW64_VERSION_MAJOR
+
+
 int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
                  const char* format, va_list argptr) {
   ASSERT(count == _TRUNCATE);
@@ -107,11 +113,16 @@
 }
 
 
+#ifndef __MINGW64_VERSION_MAJOR
+
 inline void MemoryBarrier() {
   int barrier = 0;
   __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
 }
 
+#endif  // __MINGW64_VERSION_MAJOR
+
+
 #endif  // __MINGW32__
 
 // Generate a pseudo-random number in the range 0-2^31-1. Usually
@@ -138,14 +149,14 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 static OS::MemCopyFunction memcopy_function = NULL;
-static Mutex* memcopy_function_mutex = OS::CreateMutex();
+static LazyMutex memcopy_function_mutex = LAZY_MUTEX_INITIALIZER;
 // Defined in codegen-ia32.cc.
 OS::MemCopyFunction CreateMemCopyFunction();
 
 // Copy memory area to disjoint memory area.
 void OS::MemCopy(void* dest, const void* src, size_t size) {
   if (memcopy_function == NULL) {
-    ScopedLock lock(memcopy_function_mutex);
+    ScopedLock lock(memcopy_function_mutex.Pointer());
     if (memcopy_function == NULL) {
       OS::MemCopyFunction temp = CreateMemCopyFunction();
       MemoryBarrier();
@@ -164,19 +175,14 @@
 #ifdef _WIN64
 typedef double (*ModuloFunction)(double, double);
 static ModuloFunction modulo_function = NULL;
-static Mutex* modulo_function_mutex = OS::CreateMutex();
 // Defined in codegen-x64.cc.
 ModuloFunction CreateModuloFunction();
 
+void init_modulo_function() {
+  modulo_function = CreateModuloFunction();
+}
+
 double modulo(double x, double y) {
-  if (modulo_function == NULL) {
-    ScopedLock lock(modulo_function_mutex);
-    if (modulo_function == NULL) {
-      ModuloFunction temp = CreateModuloFunction();
-      MemoryBarrier();
-      modulo_function = temp;
-    }
-  }
   // Note: here we rely on dependent reads being ordered. This is true
   // on all architectures we currently support.
   return (*modulo_function)(x, y);
@@ -196,9 +202,40 @@
 
 #endif  // _WIN64
 
+
+#define UNARY_MATH_FUNCTION(name, generator)             \
+static UnaryMathFunction fast_##name##_function = NULL;  \
+void init_fast_##name##_function() {                     \
+  fast_##name##_function = generator;                    \
+}                                                        \
+double fast_##name(double x) {                           \
+  return (*fast_##name##_function)(x);                   \
+}
+
+UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
+UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
+UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
+UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
+UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
+
+#undef MATH_FUNCTION
+
+
+void MathSetup() {
+#ifdef _WIN64
+  init_modulo_function();
+#endif
+  init_fast_sin_function();
+  init_fast_cos_function();
+  init_fast_tan_function();
+  init_fast_log_function();
+  init_fast_sqrt_function();
+}
+
+
 // ----------------------------------------------------------------------------
 // The Time class represents time on win32. A timestamp is represented as
-// a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript
+// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
 // timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
 // January 1, 1970.
 
@@ -528,7 +565,7 @@
 }
 
 
-void OS::Setup() {
+void OS::SetUp() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
   // to an unsigned. Going directly can cause an overflow and the seed to be
@@ -540,6 +577,13 @@
 }
 
 
+void OS::PostSetUp() {
+  // Math functions depend on CPU features therefore they are initialized after
+  // CPU.
+  MathSetup();
+}
+
+
 // Returns the accumulated user time for thread.
 int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
   FILETIME dummy;
@@ -776,7 +820,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -831,43 +875,62 @@
 }
 
 
+static void* GetRandomAddr() {
+  Isolate* isolate = Isolate::UncheckedCurrent();
+  // Note that the current isolate isn't set up in a call path via
+  // CpuFeatures::Probe. We don't care about randomization in this case because
+  // the code page is immediately freed.
+  if (isolate != NULL) {
+    // The address range used to randomize RWX allocations in OS::Allocate
+    // Try not to map pages into the default range that windows loads DLLs
+    // Use a multiple of 64k to prevent committing unused memory.
+    // Note: This does not guarantee RWX regions will be within the
+    // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+    static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+    static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+    static const intptr_t kAllocationRandomAddressMin = 0x04000000;
+    static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+    uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits)
+        | kAllocationRandomAddressMin;
+    address &= kAllocationRandomAddressMax;
+    return reinterpret_cast<void *>(address);
+  }
+  return NULL;
+}
+
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+  LPVOID base = NULL;
+
+  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+    // For exectutable pages try and randomize the allocation address
+    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+      base = VirtualAlloc(GetRandomAddr(), size, action, protection);
+    }
+  }
+
+  // After three attempts give up and let the OS find an address to use.
+  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+  return base;
+}
+
+
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
-  // The address range used to randomize RWX allocations in OS::Allocate
-  // Try not to map pages into the default range that windows loads DLLs
-  // Use a multiple of 64k to prevent committing unused memory.
-  // Note: This does not guarantee RWX regions will be within the
-  // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
-  static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
-  static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
-  static const intptr_t kAllocationRandomAddressMin = 0x04000000;
-  static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
-
   // VirtualAlloc rounds allocated size to page size automatically.
   size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
-  intptr_t address = 0;
 
   // Windows XP SP2 allows Data Excution Prevention (DEP).
   int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
 
-  // For exectutable pages try and randomize the allocation address
-  if (prot == PAGE_EXECUTE_READWRITE &&
-      msize >= static_cast<size_t>(Page::kPageSize)) {
-    address = (V8::RandomPrivate(Isolate::Current()) << kPageSizeBits)
-      | kAllocationRandomAddressMin;
-    address &= kAllocationRandomAddressMax;
-  }
-
-  LPVOID mbase = VirtualAlloc(reinterpret_cast<void *>(address),
-                              msize,
-                              MEM_COMMIT | MEM_RESERVE,
-                              prot);
-  if (mbase == NULL && address != 0)
-    mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot);
+  LPVOID mbase = RandomizedVirtualAlloc(msize,
+                                        MEM_COMMIT | MEM_RESERVE,
+                                        prot);
 
   if (mbase == NULL) {
     LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed"));
@@ -889,6 +952,11 @@
 }
 
 
+intptr_t OS::CommitPageSize() {
+  return 4096;
+}
+
+
 void OS::ProtectCode(void* address, const size_t size) {
   DWORD old_protect;
   VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
@@ -907,15 +975,11 @@
 
 
 void OS::Abort() {
-  if (!IsDebuggerPresent()) {
-#ifdef _MSC_VER
-    // Make the MSVCRT do a silent abort.
-    _set_abort_behavior(0, _WRITE_ABORT_MSG);
-    _set_abort_behavior(0, _CALL_REPORTFAULT);
-#endif  // _MSC_VER
-    abort();
-  } else {
+  if (IsDebuggerPresent() || FLAG_break_on_abort) {
     DebugBreak();
+  } else {
+    // Make the MSVCRT do a silent abort.
+    raise(SIGABRT);
   }
 }
 
@@ -1397,38 +1461,108 @@
 }
 
 
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 
-VirtualMemory::VirtualMemory(size_t size) {
-  address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
-  size_ = size;
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* address = ReserveRegion(request_size);
+  if (address == NULL) return;
+  Address base = RoundUp(static_cast<Address>(address), alignment);
+  // Try reducing the size by freeing and then reallocating a specific area.
+  bool result = ReleaseRegion(address, request_size);
+  USE(result);
+  ASSERT(result);
+  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+  if (address != NULL) {
+    request_size = size;
+    ASSERT(base == static_cast<Address>(address));
+  } else {
+    // Resizing failed, just go with a bigger area.
+    address = ReserveRegion(request_size);
+    if (address == NULL) return;
+  }
+  address_ = address;
+  size_ = request_size;
 }
 
 
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
+    bool result = ReleaseRegion(address_, size_);
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
-    return false;
-  }
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
 
-  UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
-  return true;
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  if (CommitRegion(address, size, is_executable)) {
+    UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
+    return true;
+  }
+  return false;
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
   ASSERT(IsReserved());
-  return VirtualFree(address, size, MEM_DECOMMIT) != false;
+  return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
+  return true;
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_READONLY | PAGE_GUARD)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return VirtualFree(base, 0, MEM_RELEASE) != 0;
 }
 
 
@@ -1453,6 +1587,7 @@
  public:
   explicit PlatformData(HANDLE thread) : thread_(thread) {}
   HANDLE thread_;
+  unsigned thread_id_;
 };
 
 
@@ -1460,16 +1595,9 @@
 // handle until it is started.
 
 Thread::Thread(const Options& options)
-    : stack_size_(options.stack_size) {
+    : stack_size_(options.stack_size()) {
   data_ = new PlatformData(kNoThread);
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : stack_size_(0) {
-  data_ = new PlatformData(kNoThread);
-  set_name(name);
+  set_name(options.name());
 }
 
 
@@ -1496,13 +1624,15 @@
                      ThreadEntry,
                      this,
                      0,
-                     NULL));
+                     &data_->thread_id_));
 }
 
 
 // Wait for thread to terminate.
 void Thread::Join() {
-  WaitForSingleObject(data_->thread_, INFINITE);
+  if (data_->thread_id_ != GetCurrentThreadId()) {
+    WaitForSingleObject(data_->thread_, INFINITE);
+  }
 }
 
 
@@ -1757,7 +1887,7 @@
 }
 
 
-bool Socket::Setup() {
+bool Socket::SetUp() {
   // Initialize Winsock32
   int err;
   WSADATA winsock_data;
@@ -1833,12 +1963,14 @@
 
 class SamplerThread : public Thread {
  public:
+  static const int kSamplerThreadStackSize = 64 * KB;
+
   explicit SamplerThread(int interval)
-      : Thread("SamplerThread"),
+      : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       instance_ = new SamplerThread(sampler->interval());
@@ -1849,7 +1981,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -1935,14 +2067,15 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SamplerThread* instance_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(SamplerThread);
 };
 
 
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
 SamplerThread* SamplerThread::instance_ = NULL;
 
 
diff --git a/src/platform.h b/src/platform.h
index 034fe34..3b2aa3c 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -79,6 +79,7 @@
 #endif  // WIN32
 
 #include "atomicops.h"
+#include "lazy-instance.h"
 #include "platform-tls.h"
 #include "utils.h"
 #include "v8globals.h"
@@ -96,6 +97,13 @@
 double ceiling(double x);
 double modulo(double x, double y);
 
+// Custom implementation of sin, cos, tan and log.
+double fast_sin(double input);
+double fast_cos(double input);
+double fast_tan(double input);
+double fast_log(double input);
+double fast_sqrt(double input);
+
 // Forward declarations.
 class Socket;
 
@@ -109,7 +117,11 @@
 class OS {
  public:
   // Initializes the platform OS support. Called once at VM startup.
-  static void Setup();
+  static void SetUp();
+
+  // Initializes the platform OS support that depend on CPU features. This is
+  // called after CPU initialization.
+  static void PostSetUp();
 
   // Returns the accumulated user time for thread. This routine
   // can be used for profiling. The implementation should
@@ -172,12 +184,19 @@
                         bool is_executable);
   static void Free(void* address, const size_t size);
 
+  // This is the granularity at which the ProtectCode(...) call can set page
+  // permissions.
+  static intptr_t CommitPageSize();
+
   // Mark code segments non-writable.
   static void ProtectCode(void* address, const size_t size);
 
   // Assign memory as a guard page so that access will cause an exception.
   static void Guard(void* address, const size_t size);
 
+  // Generate a random address to be used for hinting mmap().
+  static void* GetRandomMmapAddr();
+
   // Get the Alignment guaranteed by Allocate().
   static size_t AllocateAlignment();
 
@@ -301,23 +320,46 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
 };
 
-
+// Represents and controls an area of reserved memory.
+// Control of the reserved memory can be assigned to another VirtualMemory
+// object by assignment or copy-contructing. This removes the reserved memory
+// from the original object.
 class VirtualMemory {
  public:
+  // Empty VirtualMemory object, controlling no reserved memory.
+  VirtualMemory();
+
   // Reserves virtual memory with size.
   explicit VirtualMemory(size_t size);
+
+  // Reserves virtual memory containing an area of the given size that
+  // is aligned per alignment. This may not be at the position returned
+  // by address().
+  VirtualMemory(size_t size, size_t alignment);
+
+  // Releases the reserved memory, if any, controlled by this VirtualMemory
+  // object.
   ~VirtualMemory();
 
   // Returns whether the memory has been reserved.
   bool IsReserved();
 
+  // Initialize or resets an embedded VirtualMemory object.
+  void Reset();
+
   // Returns the start address of the reserved memory.
+  // If the memory was reserved with an alignment, this address is not
+  // necessarily aligned. The user might need to round it up to a multiple of
+  // the alignment to get the start of the aligned block.
   void* address() {
     ASSERT(IsReserved());
     return address_;
   }
 
-  // Returns the size of the reserved memory.
+  // Returns the size of the reserved memory. The returned value is only
+  // meaningful when IsReserved() returns true.
+  // If the memory was reserved with an alignment, this size may be larger
+  // than the requested size.
   size_t size() { return size_; }
 
   // Commits real memory. Returns whether the operation succeeded.
@@ -326,11 +368,46 @@
   // Uncommit real memory.  Returns whether the operation succeeded.
   bool Uncommit(void* address, size_t size);
 
+  // Creates a single guard page at the given address.
+  bool Guard(void* address);
+
+  void Release() {
+    ASSERT(IsReserved());
+    // Notice: Order is important here. The VirtualMemory object might live
+    // inside the allocated region.
+    void* address = address_;
+    size_t size = size_;
+    Reset();
+    bool result = ReleaseRegion(address, size);
+    USE(result);
+    ASSERT(result);
+  }
+
+  // Assign control of the reserved region to a different VirtualMemory object.
+  // The old object is no longer functional (IsReserved() returns false).
+  void TakeControl(VirtualMemory* from) {
+    ASSERT(!IsReserved());
+    address_ = from->address_;
+    size_ = from->size_;
+    from->Reset();
+  }
+
+  static void* ReserveRegion(size_t size);
+
+  static bool CommitRegion(void* base, size_t size, bool is_executable);
+
+  static bool UncommitRegion(void* base, size_t size);
+
+  // Must be called with a base pointer that has been returned by ReserveRegion
+  // and the same size it was reserved with.
+  static bool ReleaseRegion(void* base, size_t size);
+
  private:
   void* address_;  // Start address of the virtual memory.
   size_t size_;  // Size of the virtual memory.
 };
 
+
 // ----------------------------------------------------------------------------
 // Thread
 //
@@ -350,16 +427,22 @@
     LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
   };
 
-  struct Options {
-    Options() : name("v8:<unknown>"), stack_size(0) {}
+  class Options {
+   public:
+    Options() : name_("v8:<unknown>"), stack_size_(0) {}
+    Options(const char* name, int stack_size = 0)
+        : name_(name), stack_size_(stack_size) {}
 
-    const char* name;
-    int stack_size;
+    const char* name() const { return name_; }
+    int stack_size() const { return stack_size_; }
+
+   private:
+    const char* name_;
+    int stack_size_;
   };
 
   // Create new thread.
   explicit Thread(const Options& options);
-  explicit Thread(const char* name);
   virtual ~Thread();
 
   // Start new thread by calling the Run() method in the new thread.
@@ -415,7 +498,7 @@
   PlatformData* data() { return data_; }
 
  private:
-  void set_name(const char *name);
+  void set_name(const char* name);
 
   PlatformData* data_;
 
@@ -451,6 +534,25 @@
   virtual bool TryLock() = 0;
 };
 
+struct CreateMutexTrait {
+  static Mutex* Create() {
+    return OS::CreateMutex();
+  }
+};
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+//   static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+//   void my_function() {
+//     ScopedLock my_lock(my_mutex.Pointer());
+//     // Do something.
+//   }
+//
+typedef LazyDynamicInstance<
+    Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
 
 // ----------------------------------------------------------------------------
 // ScopedLock
@@ -491,7 +593,7 @@
   virtual void Wait() = 0;
 
   // Suspends the calling thread until the counter is non zero or the timeout
-  // time has passsed. If timeout happens the return value is false and the
+  // time has passed. If timeout happens the return value is false and the
   // counter is unchanged. Otherwise the semaphore counter is decremented and
   // true is returned. The timeout value is specified in microseconds.
   virtual bool Wait(int timeout) = 0;
@@ -500,6 +602,31 @@
   virtual void Signal() = 0;
 };
 
+template <int InitialValue>
+struct CreateSemaphoreTrait {
+  static Semaphore* Create() {
+    return OS::CreateSemaphore(InitialValue);
+  }
+};
+
+// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+//   // The following semaphore starts at 0.
+//   static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
+//
+//   void my_function() {
+//     // Do something with my_semaphore.Pointer().
+//   }
+//
+template <int InitialValue>
+struct LazySemaphore {
+  typedef typename LazyDynamicInstance<
+      Semaphore, CreateSemaphoreTrait<InitialValue>,
+      ThreadSafeInitOnceTrait>::type type;
+};
+
+#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
+
 
 // ----------------------------------------------------------------------------
 // Socket
@@ -531,7 +658,7 @@
 
   virtual bool IsValid() const = 0;
 
-  static bool Setup();
+  static bool SetUp();
   static int LastError();
   static uint16_t HToN(uint16_t value);
   static uint16_t NToH(uint16_t value);
diff --git a/src/preparse-data.h b/src/preparse-data.h
index c6503c4..f347430 100644
--- a/src/preparse-data.h
+++ b/src/preparse-data.h
@@ -49,11 +49,11 @@
                            int end,
                            int literals,
                            int properties,
-                           int strict_mode) = 0;
+                           LanguageMode language_mode) = 0;
 
   // Logs a symbol creation of a literal or identifier.
   virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
-  virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
+  virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
 
   // Logs an error message and marks the log as containing an error.
   // Further logging will be ignored, and ExtractData will return a vector
@@ -89,12 +89,12 @@
                            int end,
                            int literals,
                            int properties,
-                           int strict_mode) {
+                           LanguageMode language_mode) {
     function_store_.Add(start);
     function_store_.Add(end);
     function_store_.Add(literals);
     function_store_.Add(properties);
-    function_store_.Add(strict_mode);
+    function_store_.Add(language_mode);
   }
 
   // Logs an error message and marks the log as containing an error.
@@ -149,7 +149,7 @@
  public:
   PartialParserRecorder() : FunctionLoggingParserRecorder() { }
   virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
-  virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
+  virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
   virtual ~PartialParserRecorder() { }
   virtual Vector<unsigned> ExtractData();
   virtual int symbol_position() { return 0; }
@@ -171,7 +171,7 @@
     LogSymbol(start, hash, true, Vector<const byte>::cast(literal));
   }
 
-  virtual void LogUC16Symbol(int start, Vector<const uc16> literal) {
+  virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) {
     if (!is_recording_) return;
     int hash = vector_hash(literal);
     LogSymbol(start, hash, false, Vector<const byte>::cast(literal));
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index 899489e..6e8556a 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -46,10 +46,10 @@
 namespace internal {
 
 // UTF16Buffer based on a v8::UnicodeInputStream.
-class InputStreamUTF16Buffer : public UC16CharacterStream {
+class InputStreamUtf16Buffer : public Utf16CharacterStream {
  public:
-  /* The InputStreamUTF16Buffer maintains an internal buffer
-   * that is filled in chunks from the UC16CharacterStream.
+  /* The InputStreamUtf16Buffer maintains an internal buffer
+   * that is filled in chunks from the Utf16CharacterStream.
    * It also maintains unlimited pushback capability, but optimized
    * for small pushbacks.
    * The pushback_buffer_ pointer points to the limit of pushbacks
@@ -60,8 +60,8 @@
    * new buffer. When this buffer is read to the end again, the cursor is
    * switched back to the internal buffer
    */
-  explicit InputStreamUTF16Buffer(v8::UnicodeInputStream* stream)
-      : UC16CharacterStream(),
+  explicit InputStreamUtf16Buffer(v8::UnicodeInputStream* stream)
+      : Utf16CharacterStream(),
         stream_(stream),
         pushback_buffer_(buffer_),
         pushback_buffer_end_cache_(NULL),
@@ -70,7 +70,7 @@
     buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
   }
 
-  virtual ~InputStreamUTF16Buffer() {
+  virtual ~InputStreamUtf16Buffer() {
     if (pushback_buffer_backing_ != NULL) {
       DeleteArray(pushback_buffer_backing_);
     }
@@ -127,12 +127,18 @@
     uc16* buffer_start = buffer_ + kPushBackSize;
     buffer_cursor_ = buffer_end_ = buffer_start;
     while ((value = stream_->Next()) >= 0) {
-      if (value > static_cast<int32_t>(unibrow::Utf8::kMaxThreeByteChar)) {
-        value = unibrow::Utf8::kBadChar;
+      if (value >
+          static_cast<int32_t>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+        buffer_start[buffer_end_++ - buffer_start] =
+            unibrow::Utf16::LeadSurrogate(value);
+        buffer_start[buffer_end_++ - buffer_start] =
+            unibrow::Utf16::TrailSurrogate(value);
+      } else {
+        // buffer_end_ is a const pointer, but buffer_ is writable.
+        buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
       }
-      // buffer_end_ is a const pointer, but buffer_ is writable.
-      buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
-      if (buffer_end_ == buffer_ + kPushBackSize + kBufferSize) break;
+      // Stop one before the end of the buffer in case we get a surrogate pair.
+      if (buffer_end_ <= buffer_ + 1 + kPushBackSize + kBufferSize) break;
     }
     return buffer_end_ > buffer_start;
   }
@@ -179,16 +185,16 @@
 
 
 PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
-  internal::InputStreamUTF16Buffer buffer(input);
+  internal::InputStreamUtf16Buffer buffer(input);
   uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
   internal::UnicodeCache unicode_cache;
-  internal::JavaScriptScanner scanner(&unicode_cache);
+  internal::Scanner scanner(&unicode_cache);
   scanner.Initialize(&buffer);
   internal::CompleteParserRecorder recorder;
   preparser::PreParser::PreParseResult result =
       preparser::PreParser::PreParseProgram(&scanner,
                                             &recorder,
-                                            true,
+                                            internal::kAllowLazy,
                                             stack_limit);
   if (result == preparser::PreParser::kPreParseStackOverflow) {
     return PreParserData::StackOverflow();
diff --git a/src/preparser.cc b/src/preparser.cc
index 6021ebd..20d3b9c 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -52,6 +52,34 @@
 
 namespace preparser {
 
+PreParser::PreParseResult PreParser::PreParseLazyFunction(
+    i::LanguageMode mode, i::ParserRecorder* log) {
+  log_ = log;
+  // Lazy functions always have trivial outer scopes (no with/catch scopes).
+  Scope top_scope(&scope_, kTopLevelScope);
+  set_language_mode(mode);
+  Scope function_scope(&scope_, kFunctionScope);
+  ASSERT_EQ(i::Token::LBRACE, scanner_->current_token());
+  bool ok = true;
+  int start_position = scanner_->peek_location().beg_pos;
+  ParseLazyFunctionLiteralBody(&ok);
+  if (stack_overflow_) return kPreParseStackOverflow;
+  if (!ok) {
+    ReportUnexpectedToken(scanner_->current_token());
+  } else {
+    ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
+    if (!is_classic_mode()) {
+      int end_pos = scanner_->location().end_pos;
+      CheckOctalLiteral(start_position, end_pos, &ok);
+      if (ok) {
+        CheckDelayedStrictModeViolation(start_position, end_pos, &ok);
+      }
+    }
+  }
+  return kPreParseSuccess;
+}
+
+
 // Preparsing checks a JavaScript program and emits preparse-data that helps
 // a later parsing to be faster.
 // See preparser-data.h for the data.
@@ -72,7 +100,7 @@
   if (token == i::Token::ILLEGAL && stack_overflow_) {
     return;
   }
-  i::JavaScriptScanner::Location source_location = scanner_->location();
+  i::Scanner::Location source_location = scanner_->location();
 
   // Four of the tokens are treated specially
   switch (token) {
@@ -117,8 +145,21 @@
 
 
 PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
+  // (Ecma 262 5th Edition, clause 14):
+  // SourceElement:
+  //    Statement
+  //    FunctionDeclaration
+  //
+  // In harmony mode we allow additionally the following productions
+  // SourceElement:
+  //    LetDeclaration
+  //    ConstDeclaration
+
   switch (peek()) {
+    case i::Token::FUNCTION:
+      return ParseFunctionDeclaration(ok);
     case i::Token::LET:
+    case i::Token::CONST:
       return ParseVariableStatement(kSourceElement, ok);
     default:
       return ParseStatement(ok);
@@ -136,7 +177,8 @@
     Statement statement = ParseSourceElement(CHECK_OK);
     if (allow_directive_prologue) {
       if (statement.IsUseStrictLiteral()) {
-        set_strict_mode();
+        set_language_mode(harmony_scoping_ ?
+                          i::EXTENDED_MODE : i::STRICT_MODE);
       } else if (!statement.IsStringLiteral()) {
         allow_directive_prologue = false;
       }
@@ -185,6 +227,7 @@
       return ParseBlock(ok);
 
     case i::Token::CONST:
+    case i::Token::LET:
     case i::Token::VAR:
       return ParseVariableStatement(kStatement, ok);
 
@@ -225,8 +268,19 @@
     case i::Token::TRY:
       return ParseTryStatement(ok);
 
-    case i::Token::FUNCTION:
-      return ParseFunctionDeclaration(ok);
+    case i::Token::FUNCTION: {
+      i::Scanner::Location start_location = scanner_->peek_location();
+      Statement statement = ParseFunctionDeclaration(CHECK_OK);
+      i::Scanner::Location end_location = scanner_->location();
+      if (!is_classic_mode()) {
+        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                        "strict_function", NULL);
+        *ok = false;
+        return Statement::Default();
+      } else {
+        return statement;
+      }
+    }
 
     case i::Token::DEBUGGER:
       return ParseDebuggerStatement(ok);
@@ -271,14 +325,10 @@
   //
   Expect(i::Token::LBRACE, CHECK_OK);
   while (peek() != i::Token::RBRACE) {
-    i::Scanner::Location start_location = scanner_->peek_location();
-    Statement statement = ParseSourceElement(CHECK_OK);
-    i::Scanner::Location end_location = scanner_->location();
-    if (strict_mode() && statement.IsFunctionDeclaration()) {
-      ReportMessageAt(start_location.beg_pos, end_location.end_pos,
-                      "strict_function", NULL);
-      *ok = false;
-      return Statement::Default();
+    if (is_extended_mode()) {
+      ParseSourceElement(CHECK_OK);
+    } else {
+      ParseStatement(CHECK_OK);
     }
   }
   Expect(i::Token::RBRACE, ok);
@@ -294,6 +344,7 @@
 
   Statement result = ParseVariableDeclarations(var_context,
                                                NULL,
+                                               NULL,
                                                CHECK_OK);
   ExpectSemicolon(CHECK_OK);
   return result;
@@ -307,22 +358,73 @@
 // of 'for-in' loops.
 PreParser::Statement PreParser::ParseVariableDeclarations(
     VariableDeclarationContext var_context,
+    VariableDeclarationProperties* decl_props,
     int* num_decl,
     bool* ok) {
   // VariableDeclarations ::
   //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
-
+  //
+  // The ES6 Draft Rev3 specifies the following grammar for const declarations
+  //
+  // ConstDeclaration ::
+  //   const ConstBinding (',' ConstBinding)* ';'
+  // ConstBinding ::
+  //   Identifier '=' AssignmentExpression
+  //
+  // TODO(ES6):
+  // ConstBinding ::
+  //   BindingPattern '=' AssignmentExpression
+  bool require_initializer = false;
   if (peek() == i::Token::VAR) {
     Consume(i::Token::VAR);
   } else if (peek() == i::Token::CONST) {
-    if (strict_mode()) {
+    // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
+    //
+    // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
+    //
+    // * It is a Syntax Error if the code that matches this production is not
+    //   contained in extended code.
+    //
+    // However disallowing const in classic mode will break compatibility with
+    // existing pages. Therefore we keep allowing const with the old
+    // non-harmony semantics in classic mode.
+    Consume(i::Token::CONST);
+    switch (language_mode()) {
+      case i::CLASSIC_MODE:
+        break;
+      case i::STRICT_MODE: {
+        i::Scanner::Location location = scanner_->peek_location();
+        ReportMessageAt(location, "strict_const", NULL);
+        *ok = false;
+        return Statement::Default();
+      }
+      case i::EXTENDED_MODE:
+        if (var_context != kSourceElement &&
+            var_context != kForStatement) {
+          i::Scanner::Location location = scanner_->peek_location();
+          ReportMessageAt(location.beg_pos, location.end_pos,
+                          "unprotected_const", NULL);
+          *ok = false;
+          return Statement::Default();
+        }
+        require_initializer = true;
+        break;
+    }
+  } else if (peek() == i::Token::LET) {
+    // ES6 Draft Rev4 section 12.2.1:
+    //
+    // LetDeclaration : let LetBindingList ;
+    //
+    // * It is a Syntax Error if the code that matches this production is not
+    //   contained in extended code.
+    if (!is_extended_mode()) {
       i::Scanner::Location location = scanner_->peek_location();
-      ReportMessageAt(location, "strict_const", NULL);
+      ReportMessageAt(location.beg_pos, location.end_pos,
+                      "illegal_let", NULL);
       *ok = false;
       return Statement::Default();
     }
-    Consume(i::Token::CONST);
-  } else if (peek() == i::Token::LET) {
+    Consume(i::Token::LET);
     if (var_context != kSourceElement &&
         var_context != kForStatement) {
       i::Scanner::Location location = scanner_->peek_location();
@@ -331,7 +433,6 @@
       *ok = false;
       return Statement::Default();
     }
-    Consume(i::Token::LET);
   } else {
     *ok = false;
     return Statement::Default();
@@ -346,7 +447,7 @@
     // Parse variable name.
     if (nvars > 0) Consume(i::Token::COMMA);
     Identifier identifier  = ParseIdentifier(CHECK_OK);
-    if (strict_mode() && !identifier.IsValidStrictVariable()) {
+    if (!is_classic_mode() && !identifier.IsValidStrictVariable()) {
       StrictModeIdentifierViolation(scanner_->location(),
                                     "strict_var_name",
                                     identifier,
@@ -354,9 +455,10 @@
       return Statement::Default();
     }
     nvars++;
-    if (peek() == i::Token::ASSIGN) {
+    if (peek() == i::Token::ASSIGN || require_initializer) {
       Expect(i::Token::ASSIGN, CHECK_OK);
       ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
+      if (decl_props != NULL) *decl_props = kHasInitializers;
     }
   } while (peek() == i::Token::COMMA);
 
@@ -372,18 +474,11 @@
 
   Expression expr = ParseExpression(true, CHECK_OK);
   if (expr.IsRawIdentifier()) {
-    if (peek() == i::Token::COLON &&
-        (!strict_mode() || !expr.AsIdentifier().IsFutureReserved())) {
+    ASSERT(!expr.AsIdentifier().IsFutureReserved());
+    ASSERT(is_classic_mode() || !expr.AsIdentifier().IsFutureStrictReserved());
+    if (peek() == i::Token::COLON) {
       Consume(i::Token::COLON);
-      i::Scanner::Location start_location = scanner_->peek_location();
-      Statement statement = ParseStatement(CHECK_OK);
-      if (strict_mode() && statement.IsFunctionDeclaration()) {
-        i::Scanner::Location end_location = scanner_->location();
-        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
-                        "strict_function", NULL);
-        *ok = false;
-      }
-      return Statement::Default();
+      return ParseStatement(ok);
     }
     // Preparsing is disabled for extensions (because the extension details
     // aren't passed to lazily compiled functions), so we don't
@@ -476,7 +571,7 @@
   // WithStatement ::
   //   'with' '(' Expression ')' Statement
   Expect(i::Token::WITH, CHECK_OK);
-  if (strict_mode()) {
+  if (!is_classic_mode()) {
     i::Scanner::Location location = scanner_->location();
     ReportMessageAt(location, "strict_mode_with", NULL);
     *ok = false;
@@ -513,15 +608,7 @@
       Expect(i::Token::DEFAULT, CHECK_OK);
       Expect(i::Token::COLON, CHECK_OK);
     } else {
-      i::Scanner::Location start_location = scanner_->peek_location();
-      Statement statement = ParseStatement(CHECK_OK);
-      if (strict_mode() && statement.IsFunctionDeclaration()) {
-        i::Scanner::Location end_location = scanner_->location();
-        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
-                        "strict_function", NULL);
-        *ok = false;
-        return Statement::Default();
-      }
+      ParseStatement(CHECK_OK);
     }
     token = peek();
   }
@@ -567,9 +654,14 @@
   if (peek() != i::Token::SEMICOLON) {
     if (peek() == i::Token::VAR || peek() == i::Token::CONST ||
         peek() == i::Token::LET) {
+      bool is_let = peek() == i::Token::LET;
       int decl_count;
-      ParseVariableDeclarations(kForStatement, &decl_count, CHECK_OK);
-      if (peek() == i::Token::IN && decl_count == 1) {
+      VariableDeclarationProperties decl_props = kHasNoInitializers;
+      ParseVariableDeclarations(
+          kForStatement, &decl_props, &decl_count, CHECK_OK);
+      bool accept_IN = decl_count == 1 &&
+          !(is_let && decl_props == kHasInitializers);
+      if (peek() == i::Token::IN && accept_IN) {
         Expect(i::Token::IN, CHECK_OK);
         ParseExpression(true, CHECK_OK);
         Expect(i::Token::RPAREN, CHECK_OK);
@@ -614,7 +706,7 @@
 
   Expect(i::Token::THROW, CHECK_OK);
   if (scanner_->HasAnyLineTerminatorBeforeNext()) {
-    i::JavaScriptScanner::Location pos = scanner_->location();
+    i::Scanner::Location pos = scanner_->location();
     ReportMessageAt(pos, "newline_after_throw", NULL);
     *ok = false;
     return Statement::Default();
@@ -649,7 +741,7 @@
     Consume(i::Token::CATCH);
     Expect(i::Token::LPAREN, CHECK_OK);
     Identifier id = ParseIdentifier(CHECK_OK);
-    if (strict_mode() && !id.IsValidStrictVariable()) {
+    if (!is_classic_mode() && !id.IsValidStrictVariable()) {
       StrictModeIdentifierViolation(scanner_->location(),
                                     "strict_catch_variable",
                                     id,
@@ -727,7 +819,8 @@
     return expression;
   }
 
-  if (strict_mode() && expression.IsIdentifier() &&
+  if (!is_classic_mode() &&
+      expression.IsIdentifier() &&
       expression.AsIdentifier().IsEvalOrArguments()) {
     i::Scanner::Location after = scanner_->location();
     ReportMessageAt(before.beg_pos, after.end_pos,
@@ -815,7 +908,8 @@
     op = Next();
     i::Scanner::Location before = scanner_->peek_location();
     Expression expression = ParseUnaryExpression(CHECK_OK);
-    if (strict_mode() && expression.IsIdentifier() &&
+    if (!is_classic_mode() &&
+        expression.IsIdentifier() &&
         expression.AsIdentifier().IsEvalOrArguments()) {
       i::Scanner::Location after = scanner_->location();
       ReportMessageAt(before.beg_pos, after.end_pos,
@@ -837,7 +931,8 @@
   Expression expression = ParseLeftHandSideExpression(CHECK_OK);
   if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
       i::Token::IsCountOp(peek())) {
-    if (strict_mode() && expression.IsIdentifier() &&
+    if (!is_classic_mode() &&
+        expression.IsIdentifier() &&
         expression.AsIdentifier().IsEvalOrArguments()) {
       i::Scanner::Location after = scanner_->location();
       ReportMessageAt(before.beg_pos, after.end_pos,
@@ -1024,7 +1119,7 @@
     }
 
     case i::Token::FUTURE_STRICT_RESERVED_WORD:
-      if (strict_mode()) {
+      if (!is_classic_mode()) {
         Next();
         i::Scanner::Location location = scanner_->location();
         ReportMessageAt(location, "strict_reserved_word", NULL);
@@ -1119,12 +1214,12 @@
     old_type = finder->AddAsciiSymbol(scanner_->literal_ascii_string(),
                                       type);
   } else {
-    old_type = finder->AddUC16Symbol(scanner_->literal_uc16_string(), type);
+    old_type = finder->AddUtf16Symbol(scanner_->literal_utf16_string(), type);
   }
   if (HasConflict(old_type, type)) {
     if (IsDataDataConflict(old_type, type)) {
       // Both are data properties.
-      if (!strict_mode()) return;
+      if (is_classic_mode()) return;
       ReportMessageAt(scanner_->location(),
                       "strict_duplicate_property", NULL);
     } else if (IsDataAccessorConflict(old_type, type)) {
@@ -1292,7 +1387,7 @@
           duplicate_finder.AddAsciiSymbol(scanner_->literal_ascii_string(), 1);
     } else {
       prev_value =
-          duplicate_finder.AddUC16Symbol(scanner_->literal_uc16_string(), 1);
+          duplicate_finder.AddUtf16Symbol(scanner_->literal_utf16_string(), 1);
     }
 
     if (prev_value != 0) {
@@ -1307,9 +1402,6 @@
   }
   Expect(i::Token::RPAREN, CHECK_OK);
 
-  Expect(i::Token::LBRACE, CHECK_OK);
-  int function_block_pos = scanner_->location().beg_pos;
-
   // Determine if the function will be lazily compiled.
   // Currently only happens to top-level functions.
   // Optimistically assume that all top-level functions are lazily compiled.
@@ -1318,26 +1410,15 @@
                              !parenthesized_function_);
   parenthesized_function_ = false;
 
+  Expect(i::Token::LBRACE, CHECK_OK);
   if (is_lazily_compiled) {
-    log_->PauseRecording();
-    ParseSourceElements(i::Token::RBRACE, ok);
-    log_->ResumeRecording();
-    if (!*ok) Expression::Default();
-
-    Expect(i::Token::RBRACE, CHECK_OK);
-
-    // Position right after terminal '}'.
-    int end_pos = scanner_->location().end_pos;
-    log_->LogFunction(function_block_pos, end_pos,
-                      function_scope.materialized_literal_count(),
-                      function_scope.expected_properties(),
-                      strict_mode() ? 1 : 0);
+    ParseLazyFunctionLiteralBody(CHECK_OK);
   } else {
-    ParseSourceElements(i::Token::RBRACE, CHECK_OK);
-    Expect(i::Token::RBRACE, CHECK_OK);
+    ParseSourceElements(i::Token::RBRACE, ok);
   }
+  Expect(i::Token::RBRACE, CHECK_OK);
 
-  if (strict_mode()) {
+  if (!is_classic_mode()) {
     int end_position = scanner_->location().end_pos;
     CheckOctalLiteral(start_position, end_position, CHECK_OK);
     CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
@@ -1348,11 +1429,31 @@
 }
 
 
+void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
+  int body_start = scanner_->location().beg_pos;
+  log_->PauseRecording();
+  ParseSourceElements(i::Token::RBRACE, ok);
+  log_->ResumeRecording();
+  if (!*ok) return;
+
+  // Position right after terminal '}'.
+  ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
+  int body_end = scanner_->peek_location().end_pos;
+  log_->LogFunction(body_start, body_end,
+                    scope_->materialized_literal_count(),
+                    scope_->expected_properties(),
+                    language_mode());
+}
+
+
 PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
   // CallRuntime ::
   //   '%' Identifier Arguments
-
   Expect(i::Token::MOD, CHECK_OK);
+  if (!allow_natives_syntax_) {
+    *ok = false;
+    return Expression::Default();
+  }
   ParseIdentifier(CHECK_OK);
   ParseArguments(ok);
 
@@ -1384,7 +1485,7 @@
   if (scanner_->is_literal_ascii()) {
     log_->LogAsciiSymbol(identifier_pos, scanner_->literal_ascii_string());
   } else {
-    log_->LogUC16Symbol(identifier_pos, scanner_->literal_uc16_string());
+    log_->LogUtf16Symbol(identifier_pos, scanner_->literal_utf16_string());
   }
 }
 
@@ -1435,9 +1536,16 @@
       ReportMessageAt(location.beg_pos, location.end_pos,
                       "reserved_word", NULL);
       *ok = false;
+      return GetIdentifierSymbol();
     }
-      // FALLTHROUGH
     case i::Token::FUTURE_STRICT_RESERVED_WORD:
+      if (!is_classic_mode()) {
+        i::Scanner::Location location = scanner_->location();
+        ReportMessageAt(location.beg_pos, location.end_pos,
+                        "strict_reserved_word", NULL);
+        *ok = false;
+      }
+      // FALLTHROUGH
     case i::Token::IDENTIFIER:
       return GetIdentifierSymbol();
     default:
@@ -1450,7 +1558,7 @@
 void PreParser::SetStrictModeViolation(i::Scanner::Location location,
                                        const char* type,
                                        bool* ok) {
-  if (strict_mode()) {
+  if (!is_classic_mode()) {
     ReportMessageAt(location, type, NULL);
     *ok = false;
     return;
@@ -1490,7 +1598,7 @@
   } else if (identifier.IsFutureStrictReserved()) {
     type = "strict_reserved_word";
   }
-  if (strict_mode()) {
+  if (!is_classic_mode()) {
     ReportMessageAt(location, type, NULL);
     *ok = false;
     return;
@@ -1549,7 +1657,7 @@
   return AddSymbol(i::Vector<const byte>::cast(key), true, value);
 }
 
-int DuplicateFinder::AddUC16Symbol(i::Vector<const uint16_t> key, int value) {
+int DuplicateFinder::AddUtf16Symbol(i::Vector<const uint16_t> key, int value) {
   return AddSymbol(i::Vector<const byte>::cast(key), false, value);
 }
 
diff --git a/src/preparser.h b/src/preparser.h
index b97b7cf..f3a4347 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_PREPARSER_H
 #define V8_PREPARSER_H
 
+#include "hashmap.h"
 #include "token.h"
 #include "scanner.h"
 
@@ -64,7 +65,7 @@
         map_(&Match) { }
 
   int AddAsciiSymbol(i::Vector<const char> key, int value);
-  int AddUC16Symbol(i::Vector<const uint16_t> key, int value);
+  int AddUtf16Symbol(i::Vector<const uint16_t> key, int value);
   // Add a a number literal by converting it (if necessary)
   // to the string that ToString(ToNumber(literal)) would generate.
   // and then adding that string with AddAsciiSymbol.
@@ -110,19 +111,54 @@
     kPreParseSuccess
   };
 
+
+  PreParser(i::Scanner* scanner,
+            i::ParserRecorder* log,
+            uintptr_t stack_limit,
+            bool allow_lazy,
+            bool allow_natives_syntax,
+            bool allow_modules)
+      : scanner_(scanner),
+        log_(log),
+        scope_(NULL),
+        stack_limit_(stack_limit),
+        strict_mode_violation_location_(i::Scanner::Location::invalid()),
+        strict_mode_violation_type_(NULL),
+        stack_overflow_(false),
+        allow_lazy_(allow_lazy),
+        allow_modules_(allow_modules),
+        allow_natives_syntax_(allow_natives_syntax),
+        parenthesized_function_(false),
+        harmony_scoping_(scanner->HarmonyScoping()) { }
+
   ~PreParser() {}
 
   // Pre-parse the program from the character stream; returns true on
   // success (even if parsing failed, the pre-parse data successfully
   // captured the syntax error), and false if a stack-overflow happened
   // during parsing.
-  static PreParseResult PreParseProgram(i::JavaScriptScanner* scanner,
+  static PreParseResult PreParseProgram(i::Scanner* scanner,
                                         i::ParserRecorder* log,
-                                        bool allow_lazy,
+                                        int flags,
                                         uintptr_t stack_limit) {
-    return PreParser(scanner, log, stack_limit, allow_lazy).PreParse();
+    bool allow_lazy = (flags & i::kAllowLazy) != 0;
+    bool allow_natives_syntax = (flags & i::kAllowNativesSyntax) != 0;
+    bool allow_modules = (flags & i::kAllowModules) != 0;
+    return PreParser(scanner, log, stack_limit, allow_lazy,
+                     allow_natives_syntax, allow_modules).PreParse();
   }
 
+  // Parses a single function literal, from the opening parentheses before
+  // parameters to the closing brace after the body.
+  // Returns a FunctionEntry describing the body of the funciton in enough
+  // detail that it can be lazily compiled.
+  // The scanner is expected to have matched the "function" keyword and
+  // parameters, and have consumed the initial '{'.
+  // At return, unless an error occured, the scanner is positioned before the
+  // the final '}'.
+  PreParseResult PreParseLazyFunction(i::LanguageMode mode,
+                                      i::ParserRecorder* log);
+
  private:
   // Used to detect duplicates in object literals. Each of the values
   // kGetterProperty, kSetterProperty and kValueProperty represents
@@ -179,6 +215,12 @@
     kForStatement
   };
 
+  // If a list of variable declarations includes any initializers.
+  enum VariableDeclarationProperties {
+    kHasInitializers,
+    kHasNoInitializers
+  };
+
   class Expression;
 
   class Identifier {
@@ -408,7 +450,8 @@
           materialized_literal_count_(0),
           expected_properties_(0),
           with_nesting_count_(0),
-          strict_((prev_ != NULL) && prev_->is_strict()) {
+          language_mode_(
+              (prev_ != NULL) ? prev_->language_mode() : i::CLASSIC_MODE) {
       *variable = this;
     }
     ~Scope() { *variable_ = prev_; }
@@ -418,8 +461,15 @@
     int expected_properties() { return expected_properties_; }
     int materialized_literal_count() { return materialized_literal_count_; }
     bool IsInsideWith() { return with_nesting_count_ != 0; }
-    bool is_strict() { return strict_; }
-    void set_strict() { strict_ = true; }
+    bool is_classic_mode() {
+      return language_mode_ == i::CLASSIC_MODE;
+    }
+    i::LanguageMode language_mode() {
+      return language_mode_;
+    }
+    void set_language_mode(i::LanguageMode language_mode) {
+      language_mode_ = language_mode;
+    }
     void EnterWith() { with_nesting_count_++; }
     void LeaveWith() { with_nesting_count_--; }
 
@@ -430,25 +480,9 @@
     int materialized_literal_count_;
     int expected_properties_;
     int with_nesting_count_;
-    bool strict_;
+    i::LanguageMode language_mode_;
   };
 
-  // Private constructor only used in PreParseProgram.
-  PreParser(i::JavaScriptScanner* scanner,
-            i::ParserRecorder* log,
-            uintptr_t stack_limit,
-            bool allow_lazy)
-      : scanner_(scanner),
-        log_(log),
-        scope_(NULL),
-        stack_limit_(stack_limit),
-        strict_mode_violation_location_(i::Scanner::Location::invalid()),
-        strict_mode_violation_type_(NULL),
-        stack_overflow_(false),
-        allow_lazy_(true),
-        parenthesized_function_(false),
-        harmony_block_scoping_(scanner->HarmonyBlockScoping()) { }
-
   // Preparse the program. Only called in PreParseProgram after creating
   // the instance.
   PreParseResult PreParse() {
@@ -459,7 +493,7 @@
     if (stack_overflow_) return kPreParseStackOverflow;
     if (!ok) {
       ReportUnexpectedToken(scanner_->current_token());
-    } else if (scope_->is_strict()) {
+    } else if (!scope_->is_classic_mode()) {
       CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
     }
     return kPreParseSuccess;
@@ -493,6 +527,7 @@
   Statement ParseVariableStatement(VariableDeclarationContext var_context,
                                    bool* ok);
   Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
+                                      VariableDeclarationProperties* decl_props,
                                       int* num_decl,
                                       bool* ok);
   Statement ParseExpressionOrLabelledStatement(bool* ok);
@@ -527,6 +562,7 @@
 
   Arguments ParseArguments(bool* ok);
   Expression ParseFunctionLiteral(bool* ok);
+  void ParseLazyFunctionLiteralBody(bool* ok);
 
   Identifier ParseIdentifier(bool* ok);
   Identifier ParseIdentifierName(bool* ok);
@@ -562,11 +598,19 @@
 
   bool peek_any_identifier();
 
-  void set_strict_mode() {
-    scope_->set_strict();
+  void set_language_mode(i::LanguageMode language_mode) {
+    scope_->set_language_mode(language_mode);
   }
 
-  bool strict_mode() { return scope_->is_strict(); }
+  bool is_classic_mode() {
+    return scope_->language_mode() == i::CLASSIC_MODE;
+  }
+
+  bool is_extended_mode() {
+    return scope_->language_mode() == i::EXTENDED_MODE;
+  }
+
+  i::LanguageMode language_mode() { return scope_->language_mode(); }
 
   void Consume(i::Token::Value token) { Next(); }
 
@@ -590,7 +634,7 @@
 
   void SetStrictModeViolation(i::Scanner::Location,
                               const char* type,
-                              bool *ok);
+                              bool* ok);
 
   void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
 
@@ -599,7 +643,7 @@
                                      Identifier identifier,
                                      bool* ok);
 
-  i::JavaScriptScanner* scanner_;
+  i::Scanner* scanner_;
   i::ParserRecorder* log_;
   Scope* scope_;
   uintptr_t stack_limit_;
@@ -607,8 +651,10 @@
   const char* strict_mode_violation_type_;
   bool stack_overflow_;
   bool allow_lazy_;
+  bool allow_modules_;
+  bool allow_natives_syntax_;
   bool parenthesized_function_;
-  bool harmony_block_scoping_;
+  bool harmony_scoping_;
 };
 } }  // v8::preparser
 
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 663af28..0d8dadc 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -58,17 +58,70 @@
 }
 
 
-void PrettyPrinter::VisitDeclaration(Declaration* node) {
+void PrettyPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
   Print("var ");
   PrintLiteral(node->proxy()->name(), false);
-  if (node->fun() != NULL) {
-    Print(" = ");
-    PrintFunctionLiteral(node->fun());
-  }
   Print(";");
 }
 
 
+void PrettyPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
+  Print("function ");
+  PrintLiteral(node->proxy()->name(), false);
+  Print(" = ");
+  PrintFunctionLiteral(node->fun());
+  Print(";");
+}
+
+
+void PrettyPrinter::VisitModuleDeclaration(ModuleDeclaration* node) {
+  Print("module ");
+  PrintLiteral(node->proxy()->name(), false);
+  Print(" = ");
+  Visit(node->module());
+  Print(";");
+}
+
+
+void PrettyPrinter::VisitImportDeclaration(ImportDeclaration* node) {
+  Print("import ");
+  PrintLiteral(node->proxy()->name(), false);
+  Print(" from ");
+  Visit(node->module());
+  Print(";");
+}
+
+
+void PrettyPrinter::VisitExportDeclaration(ExportDeclaration* node) {
+  Print("export ");
+  PrintLiteral(node->proxy()->name(), false);
+  Print(";");
+}
+
+
+void PrettyPrinter::VisitModuleLiteral(ModuleLiteral* node) {
+  VisitBlock(node->body());
+}
+
+
+void PrettyPrinter::VisitModuleVariable(ModuleVariable* node) {
+  Visit(node->proxy());
+}
+
+
+void PrettyPrinter::VisitModulePath(ModulePath* node) {
+  Visit(node->module());
+  Print(".");
+  PrintLiteral(node->name(), false);
+}
+
+
+void PrettyPrinter::VisitModuleUrl(ModuleUrl* node) {
+  Print("at ");
+  PrintLiteral(node->url(), true);
+}
+
+
 void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) {
   Visit(node->expression());
   Print(";");
@@ -372,13 +425,6 @@
 }
 
 
-void PrettyPrinter::VisitCompareToNull(CompareToNull* node) {
-  Print("(");
-  Visit(node->expression());
-  Print("%s null)", Token::String(node->op()));
-}
-
-
 void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
   Print("<this-function>");
 }
@@ -454,6 +500,7 @@
 
 
 void PrettyPrinter::PrintStatements(ZoneList<Statement*>* statements) {
+  if (statements == NULL) return;
   for (int i = 0; i < statements->length(); i++) {
     if (i != 0) Print(" ");
     Visit(statements->at(i));
@@ -717,20 +764,61 @@
 }
 
 
-void AstPrinter::VisitDeclaration(Declaration* node) {
-  if (node->fun() == NULL) {
-    // var or const declarations
-    PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
-                                 node->proxy()->var(),
-                                 node->proxy()->name());
-  } else {
-    // function declarations
-    PrintIndented("FUNCTION ");
-    PrintLiteral(node->proxy()->name(), true);
-    Print(" = function ");
-    PrintLiteral(node->fun()->name(), false);
-    Print("\n");
-  }
+void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
+  PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
+                               node->proxy()->var(),
+                               node->proxy()->name());
+}
+
+
+void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
+  PrintIndented("FUNCTION ");
+  PrintLiteral(node->proxy()->name(), true);
+  Print(" = function ");
+  PrintLiteral(node->fun()->name(), false);
+  Print("\n");
+}
+
+
+void AstPrinter::VisitModuleDeclaration(ModuleDeclaration* node) {
+  IndentedScope indent(this, "MODULE");
+  PrintLiteralIndented("NAME", node->proxy()->name(), true);
+  Visit(node->module());
+}
+
+
+void AstPrinter::VisitImportDeclaration(ImportDeclaration* node) {
+  IndentedScope indent(this, "IMPORT");
+  PrintLiteralIndented("NAME", node->proxy()->name(), true);
+  Visit(node->module());
+}
+
+
+void AstPrinter::VisitExportDeclaration(ExportDeclaration* node) {
+  IndentedScope indent(this, "EXPORT ");
+  PrintLiteral(node->proxy()->name(), true);
+}
+
+
+void AstPrinter::VisitModuleLiteral(ModuleLiteral* node) {
+  VisitBlock(node->body());
+}
+
+
+void AstPrinter::VisitModuleVariable(ModuleVariable* node) {
+  Visit(node->proxy());
+}
+
+
+void AstPrinter::VisitModulePath(ModulePath* node) {
+  IndentedScope indent(this, "PATH");
+  PrintIndentedVisit("MODULE", node->module());
+  PrintLiteralIndented("NAME", node->name(), false);
+}
+
+
+void AstPrinter::VisitModuleUrl(ModuleUrl* node) {
+  PrintLiteralIndented("URL", node->url(), true);
 }
 
 
@@ -1020,416 +1108,10 @@
 }
 
 
-void AstPrinter::VisitCompareToNull(CompareToNull* node) {
-  const char* name = node->is_strict()
-      ? "COMPARE-TO-NULL-STRICT"
-      : "COMPARE-TO-NULL";
-  IndentedScope indent(this, name, node);
-  Visit(node->expression());
-}
-
-
 void AstPrinter::VisitThisFunction(ThisFunction* node) {
   IndentedScope indent(this, "THIS-FUNCTION");
 }
 
-
-TagScope::TagScope(JsonAstBuilder* builder, const char* name)
-    : builder_(builder), next_(builder->tag()), has_body_(false) {
-  if (next_ != NULL) {
-    next_->use();
-    builder->Print(",\n");
-  }
-  builder->set_tag(this);
-  builder->PrintIndented("[");
-  builder->Print("\"%s\"", name);
-  builder->increase_indent(JsonAstBuilder::kTagIndentSize);
-}
-
-
-TagScope::~TagScope() {
-  builder_->decrease_indent(JsonAstBuilder::kTagIndentSize);
-  if (has_body_) {
-    builder_->Print("\n");
-    builder_->PrintIndented("]");
-  } else {
-    builder_->Print("]");
-  }
-  builder_->set_tag(next_);
-}
-
-
-AttributesScope::AttributesScope(JsonAstBuilder* builder)
-    : builder_(builder), attribute_count_(0) {
-  builder->set_attributes(this);
-  builder->tag()->use();
-  builder->Print(",\n");
-  builder->PrintIndented("{");
-  builder->increase_indent(JsonAstBuilder::kAttributesIndentSize);
-}
-
-
-AttributesScope::~AttributesScope() {
-  builder_->decrease_indent(JsonAstBuilder::kAttributesIndentSize);
-  if (attribute_count_ > 1) {
-    builder_->Print("\n");
-    builder_->PrintIndented("}");
-  } else {
-    builder_->Print("}");
-  }
-  builder_->set_attributes(NULL);
-}
-
-
-const char* JsonAstBuilder::BuildProgram(FunctionLiteral* program) {
-  Init();
-  Visit(program);
-  Print("\n");
-  return Output();
-}
-
-
-void JsonAstBuilder::AddAttributePrefix(const char* name) {
-  if (attributes()->is_used()) {
-    Print(",\n");
-    PrintIndented("\"");
-  } else {
-    Print("\"");
-  }
-  Print("%s\":", name);
-  attributes()->use();
-}
-
-
-void JsonAstBuilder::AddAttribute(const char* name, Handle<String> value) {
-  SmartArrayPointer<char> value_string = value->ToCString();
-  AddAttributePrefix(name);
-  Print("\"%s\"", *value_string);
-}
-
-
-void JsonAstBuilder::AddAttribute(const char* name, const char* value) {
-  AddAttributePrefix(name);
-  Print("\"%s\"", value);
-}
-
-
-void JsonAstBuilder::AddAttribute(const char* name, int value) {
-  AddAttributePrefix(name);
-  Print("%d", value);
-}
-
-
-void JsonAstBuilder::AddAttribute(const char* name, bool value) {
-  AddAttributePrefix(name);
-  Print(value ? "true" : "false");
-}
-
-
-void JsonAstBuilder::VisitBlock(Block* stmt) {
-  TagScope tag(this, "Block");
-  VisitStatements(stmt->statements());
-}
-
-
-void JsonAstBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
-  TagScope tag(this, "ExpressionStatement");
-  Visit(stmt->expression());
-}
-
-
-void JsonAstBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
-  TagScope tag(this, "EmptyStatement");
-}
-
-
-void JsonAstBuilder::VisitIfStatement(IfStatement* stmt) {
-  TagScope tag(this, "IfStatement");
-  Visit(stmt->condition());
-  Visit(stmt->then_statement());
-  Visit(stmt->else_statement());
-}
-
-
-void JsonAstBuilder::VisitContinueStatement(ContinueStatement* stmt) {
-  TagScope tag(this, "ContinueStatement");
-}
-
-
-void JsonAstBuilder::VisitBreakStatement(BreakStatement* stmt) {
-  TagScope tag(this, "BreakStatement");
-}
-
-
-void JsonAstBuilder::VisitReturnStatement(ReturnStatement* stmt) {
-  TagScope tag(this, "ReturnStatement");
-  Visit(stmt->expression());
-}
-
-
-void JsonAstBuilder::VisitWithStatement(WithStatement* stmt) {
-  TagScope tag(this, "WithStatement");
-  Visit(stmt->expression());
-  Visit(stmt->statement());
-}
-
-
-void JsonAstBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
-  TagScope tag(this, "SwitchStatement");
-}
-
-
-void JsonAstBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
-  TagScope tag(this, "DoWhileStatement");
-  Visit(stmt->body());
-  Visit(stmt->cond());
-}
-
-
-void JsonAstBuilder::VisitWhileStatement(WhileStatement* stmt) {
-  TagScope tag(this, "WhileStatement");
-  Visit(stmt->cond());
-  Visit(stmt->body());
-}
-
-
-void JsonAstBuilder::VisitForStatement(ForStatement* stmt) {
-  TagScope tag(this, "ForStatement");
-  if (stmt->init() != NULL) Visit(stmt->init());
-  if (stmt->cond() != NULL) Visit(stmt->cond());
-  Visit(stmt->body());
-  if (stmt->next() != NULL) Visit(stmt->next());
-}
-
-
-void JsonAstBuilder::VisitForInStatement(ForInStatement* stmt) {
-  TagScope tag(this, "ForInStatement");
-  Visit(stmt->each());
-  Visit(stmt->enumerable());
-  Visit(stmt->body());
-}
-
-
-void JsonAstBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  TagScope tag(this, "TryCatchStatement");
-  { AttributesScope attributes(this);
-    AddAttribute("variable", stmt->variable()->name());
-  }
-  Visit(stmt->try_block());
-  Visit(stmt->catch_block());
-}
-
-
-void JsonAstBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  TagScope tag(this, "TryFinallyStatement");
-  Visit(stmt->try_block());
-  Visit(stmt->finally_block());
-}
-
-
-void JsonAstBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  TagScope tag(this, "DebuggerStatement");
-}
-
-
-void JsonAstBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
-  TagScope tag(this, "FunctionLiteral");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("name", expr->name());
-  }
-  VisitDeclarations(expr->scope()->declarations());
-  VisitStatements(expr->body());
-}
-
-
-void JsonAstBuilder::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* expr) {
-  TagScope tag(this, "SharedFunctionInfoLiteral");
-}
-
-
-void JsonAstBuilder::VisitConditional(Conditional* expr) {
-  TagScope tag(this, "Conditional");
-}
-
-
-void JsonAstBuilder::VisitVariableProxy(VariableProxy* expr) {
-  TagScope tag(this, "Variable");
-  {
-    AttributesScope attributes(this);
-    Variable* var = expr->var();
-    AddAttribute("name", var->name());
-    switch (var->location()) {
-      case Variable::UNALLOCATED:
-        AddAttribute("location", "UNALLOCATED");
-        break;
-      case Variable::PARAMETER:
-        AddAttribute("location", "PARAMETER");
-        AddAttribute("index", var->index());
-        break;
-      case Variable::LOCAL:
-        AddAttribute("location", "LOCAL");
-        AddAttribute("index", var->index());
-        break;
-      case Variable::CONTEXT:
-        AddAttribute("location", "CONTEXT");
-        AddAttribute("index", var->index());
-        break;
-      case Variable::LOOKUP:
-        AddAttribute("location", "LOOKUP");
-        break;
-    }
-  }
-}
-
-
-void JsonAstBuilder::VisitLiteral(Literal* expr) {
-  TagScope tag(this, "Literal");
-  {
-    AttributesScope attributes(this);
-    Handle<Object> handle = expr->handle();
-    if (handle->IsString()) {
-      AddAttribute("handle", Handle<String>(String::cast(*handle)));
-    } else if (handle->IsSmi()) {
-      AddAttribute("handle", Smi::cast(*handle)->value());
-    }
-  }
-}
-
-
-void JsonAstBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
-  TagScope tag(this, "RegExpLiteral");
-}
-
-
-void JsonAstBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
-  TagScope tag(this, "ObjectLiteral");
-}
-
-
-void JsonAstBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
-  TagScope tag(this, "ArrayLiteral");
-}
-
-
-void JsonAstBuilder::VisitAssignment(Assignment* expr) {
-  TagScope tag(this, "Assignment");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("op", Token::Name(expr->op()));
-  }
-  Visit(expr->target());
-  Visit(expr->value());
-}
-
-
-void JsonAstBuilder::VisitThrow(Throw* expr) {
-  TagScope tag(this, "Throw");
-  Visit(expr->exception());
-}
-
-
-void JsonAstBuilder::VisitProperty(Property* expr) {
-  TagScope tag(this, "Property");
-  Visit(expr->obj());
-  Visit(expr->key());
-}
-
-
-void JsonAstBuilder::VisitCall(Call* expr) {
-  TagScope tag(this, "Call");
-  Visit(expr->expression());
-  VisitExpressions(expr->arguments());
-}
-
-
-void JsonAstBuilder::VisitCallNew(CallNew* expr) {
-  TagScope tag(this, "CallNew");
-  Visit(expr->expression());
-  VisitExpressions(expr->arguments());
-}
-
-
-void JsonAstBuilder::VisitCallRuntime(CallRuntime* expr) {
-  TagScope tag(this, "CallRuntime");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("name", expr->name());
-  }
-  VisitExpressions(expr->arguments());
-}
-
-
-void JsonAstBuilder::VisitUnaryOperation(UnaryOperation* expr) {
-  TagScope tag(this, "UnaryOperation");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("op", Token::Name(expr->op()));
-  }
-  Visit(expr->expression());
-}
-
-
-void JsonAstBuilder::VisitCountOperation(CountOperation* expr) {
-  TagScope tag(this, "CountOperation");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("is_prefix", expr->is_prefix());
-    AddAttribute("op", Token::Name(expr->op()));
-  }
-  Visit(expr->expression());
-}
-
-
-void JsonAstBuilder::VisitBinaryOperation(BinaryOperation* expr) {
-  TagScope tag(this, "BinaryOperation");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("op", Token::Name(expr->op()));
-  }
-  Visit(expr->left());
-  Visit(expr->right());
-}
-
-
-void JsonAstBuilder::VisitCompareOperation(CompareOperation* expr) {
-  TagScope tag(this, "CompareOperation");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("op", Token::Name(expr->op()));
-  }
-  Visit(expr->left());
-  Visit(expr->right());
-}
-
-
-void JsonAstBuilder::VisitCompareToNull(CompareToNull* expr) {
-  TagScope tag(this, "CompareToNull");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("is_strict", expr->is_strict());
-  }
-  Visit(expr->expression());
-}
-
-
-void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
-  TagScope tag(this, "ThisFunction");
-}
-
-
-void JsonAstBuilder::VisitDeclaration(Declaration* decl) {
-  TagScope tag(this, "Declaration");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("mode", Variable::Mode2String(decl->mode()));
-  }
-  Visit(decl->proxy());
-  if (decl->fun() != NULL) Visit(decl->fun());
-}
-
-
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index a26c48e..9ac7257 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -112,107 +112,6 @@
   int indent_;
 };
 
-
-// Forward declaration of helper classes.
-class TagScope;
-class AttributesScope;
-
-// Build a C string containing a JSON representation of a function's
-// AST. The representation is based on JsonML (www.jsonml.org).
-class JsonAstBuilder: public PrettyPrinter {
- public:
-  JsonAstBuilder()
-      : indent_(0), top_tag_scope_(NULL), attributes_scope_(NULL) {
-  }
-  virtual ~JsonAstBuilder() {}
-
-  // Controls the indentation of subsequent lines of a tag body after
-  // the first line.
-  static const int kTagIndentSize = 2;
-
-  // Controls the indentation of subsequent lines of an attributes
-  // blocks's body after the first line.
-  static const int kAttributesIndentSize = 1;
-
-  // Construct a JSON representation of a function literal.
-  const char* BuildProgram(FunctionLiteral* program);
-
-  // Print text indented by the current indentation level.
-  void PrintIndented(const char* text) { Print("%*s%s", indent_, "", text); }
-
-  // Change the indentation level.
-  void increase_indent(int amount) { indent_ += amount; }
-  void decrease_indent(int amount) { indent_ -= amount; }
-
-  // The builder maintains a stack of opened AST node constructors.
-  // Each node constructor corresponds to a JsonML tag.
-  TagScope* tag() { return top_tag_scope_; }
-  void set_tag(TagScope* scope) { top_tag_scope_ = scope; }
-
-  // The builder maintains a pointer to the currently opened attributes
-  // of current AST node or NULL if the attributes are not opened.
-  AttributesScope* attributes() { return attributes_scope_; }
-  void set_attributes(AttributesScope* scope) { attributes_scope_ = scope; }
-
-  // Add an attribute to the currently opened attributes.
-  void AddAttribute(const char* name, Handle<String> value);
-  void AddAttribute(const char* name, const char* value);
-  void AddAttribute(const char* name, int value);
-  void AddAttribute(const char* name, bool value);
-
-  // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
-  AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
-  int indent_;
-  TagScope* top_tag_scope_;
-  AttributesScope* attributes_scope_;
-
-  // Utility function used by AddAttribute implementations.
-  void AddAttributePrefix(const char* name);
-};
-
-
-// The JSON AST builder keeps a stack of open element tags (AST node
-// constructors from the current iteration point to the root of the
-// AST).  TagScope is a helper class to manage the opening and closing
-// of tags, the indentation of their bodies, and comma separating their
-// contents.
-class TagScope BASE_EMBEDDED {
- public:
-  TagScope(JsonAstBuilder* builder, const char* name);
-  ~TagScope();
-
-  void use() { has_body_ = true; }
-
- private:
-  JsonAstBuilder* builder_;
-  TagScope* next_;
-  bool has_body_;
-};
-
-
-// AttributesScope is a helper class to manage the opening and closing
-// of attribute blocks, the indentation of their bodies, and comma
-// separating their contents. JsonAstBuilder::AddAttribute adds an
-// attribute to the currently open AttributesScope. They cannot be
-// nested so the builder keeps an optional single scope rather than a
-// stack.
-class AttributesScope BASE_EMBEDDED {
- public:
-  explicit AttributesScope(JsonAstBuilder* builder);
-  ~AttributesScope();
-
-  bool is_used() { return attribute_count_ > 0; }
-  void use() { ++attribute_count_; }
-
- private:
-  JsonAstBuilder* builder_;
-  int attribute_count_;
-};
-
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 88d6e87..65369be 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -95,12 +95,23 @@
 }
 
 
-uint64_t HeapEntry::id() {
-  union {
-    Id stored_id;
-    uint64_t returned_id;
-  } id_adaptor = {id_};
-  return id_adaptor.returned_id;
+SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
+  return kGcRootsFirstSubrootId + delta * kObjectIdStep;
+}
+
+
+HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
+  return reinterpret_cast<HeapObject*>(
+      reinterpret_cast<char*>(kFirstGcSubrootObject) +
+      delta * HeapObjectsMap::kObjectIdStep);
+}
+
+
+int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
+  return static_cast<int>(
+      (reinterpret_cast<char*>(subroot) -
+       reinterpret_cast<char*>(kFirstGcSubrootObject)) /
+      HeapObjectsMap::kObjectIdStep);
 }
 
 } }  // namespace v8::internal
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index e319efb..2d0984e 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -152,9 +152,12 @@
 
 const char* StringsStorage::GetName(String* name) {
   if (name->IsString()) {
-    return AddOrDisposeString(
-        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach(),
-        name->Hash());
+    int length = Min(kMaxNameSize, name->length());
+    SmartArrayPointer<char> data =
+        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
+    uint32_t hash =
+        HashSequentialString(*data, length, name->GetHeap()->HashSeed());
+    return AddOrDisposeString(data.Detach(), hash);
   }
   return "";
 }
@@ -493,8 +496,6 @@
 
 CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
 const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
-const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
-    CodeMap::CodeEntryInfo(NULL, 0);
 
 
 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
@@ -903,7 +904,7 @@
       entry++;
     }
 
-    for (const Address *stack_pos = sample.stack,
+    for (const Address* stack_pos = sample.stack,
            *stack_end = stack_pos + sample.frames_count;
          stack_pos != stack_end;
          ++stack_pos) {
@@ -943,7 +944,7 @@
 
 
 void HeapGraphEdge::Init(int child_index, Type type, int index, HeapEntry* to) {
-  ASSERT(type == kElement || type == kHidden);
+  ASSERT(type == kElement || type == kHidden || type == kWeak);
   child_index_ = child_index;
   type_ = type;
   index_ = index;
@@ -964,25 +965,20 @@
 void HeapEntry::Init(HeapSnapshot* snapshot,
                      Type type,
                      const char* name,
-                     uint64_t id,
+                     SnapshotObjectId id,
                      int self_size,
                      int children_count,
                      int retainers_count) {
   snapshot_ = snapshot;
   type_ = type;
-  painted_ = kUnpainted;
+  painted_ = false;
   name_ = name;
   self_size_ = self_size;
   retained_size_ = 0;
   children_count_ = children_count;
   retainers_count_ = retainers_count;
   dominator_ = NULL;
-
-  union {
-    uint64_t set_id;
-    Id stored_id;
-  } id_adaptor = {id};
-  id_ = id_adaptor.stored_id;
+  id_ = id;
 }
 
 
@@ -991,8 +987,8 @@
                                   const char* name,
                                   HeapEntry* entry,
                                   int retainer_index) {
-  children_arr()[child_index].Init(child_index, type, name, entry);
-  entry->retainers_arr()[retainer_index] = children_arr() + child_index;
+  children()[child_index].Init(child_index, type, name, entry);
+  entry->retainers()[retainer_index] = children_arr() + child_index;
 }
 
 
@@ -1001,22 +997,14 @@
                                     int index,
                                     HeapEntry* entry,
                                     int retainer_index) {
-  children_arr()[child_index].Init(child_index, type, index, entry);
-  entry->retainers_arr()[retainer_index] = children_arr() + child_index;
+  children()[child_index].Init(child_index, type, index, entry);
+  entry->retainers()[retainer_index] = children_arr() + child_index;
 }
 
 
 void HeapEntry::SetUnidirElementReference(
     int child_index, int index, HeapEntry* entry) {
-  children_arr()[child_index].Init(child_index, index, entry);
-}
-
-
-int HeapEntry::RetainedSize(bool exact) {
-  if (exact && (retained_size_ & kExactRetainedSizeTag) == 0) {
-    CalculateExactRetainedSize();
-  }
-  return retained_size_ & (~kExactRetainedSizeTag);
+  children()[child_index].Init(child_index, index, entry);
 }
 
 
@@ -1025,41 +1013,11 @@
 }
 
 
-template<class Visitor>
-void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
-  List<HeapEntry*> list(10);
-  list.Add(this);
-  this->paint_reachable();
-  visitor->Apply(this);
-  while (!list.is_empty()) {
-    HeapEntry* entry = list.RemoveLast();
-    Vector<HeapGraphEdge> children = entry->children();
-    for (int i = 0; i < children.length(); ++i) {
-      if (children[i].type() == HeapGraphEdge::kShortcut) continue;
-      HeapEntry* child = children[i].to();
-      if (!child->painted_reachable()) {
-        list.Add(child);
-        child->paint_reachable();
-        visitor->Apply(child);
-      }
-    }
-  }
-}
-
-
-class NullClass {
- public:
-  void Apply(HeapEntry* entry) { }
-};
-
-void HeapEntry::PaintAllReachable() {
-  NullClass null;
-  ApplyAndPaintAllReachable(&null);
-}
-
-
-void HeapEntry::Print(int max_depth, int indent) {
-  OS::Print("%6d %6d [%llu] ", self_size(), RetainedSize(false), id());
+void HeapEntry::Print(
+    const char* prefix, const char* edge_name, int max_depth, int indent) {
+  OS::Print("%6d %7d @%6llu %*c %s%s: ",
+            self_size(), retained_size(), id(),
+            indent, ' ', prefix, edge_name);
   if (type() != kString) {
     OS::Print("%s %.40s\n", TypeAsString(), name_);
   } else {
@@ -1078,29 +1036,40 @@
   Vector<HeapGraphEdge> ch = children();
   for (int i = 0; i < ch.length(); ++i) {
     HeapGraphEdge& edge = ch[i];
+    const char* edge_prefix = "";
+    EmbeddedVector<char, 64> index;
+    const char* edge_name = index.start();
     switch (edge.type()) {
       case HeapGraphEdge::kContextVariable:
-        OS::Print("  %*c #%s: ", indent, ' ', edge.name());
+        edge_prefix = "#";
+        edge_name = edge.name();
         break;
       case HeapGraphEdge::kElement:
-        OS::Print("  %*c %d: ", indent, ' ', edge.index());
+        OS::SNPrintF(index, "%d", edge.index());
         break;
       case HeapGraphEdge::kInternal:
-        OS::Print("  %*c $%s: ", indent, ' ', edge.name());
+        edge_prefix = "$";
+        edge_name = edge.name();
         break;
       case HeapGraphEdge::kProperty:
-        OS::Print("  %*c %s: ", indent, ' ', edge.name());
+        edge_name = edge.name();
         break;
       case HeapGraphEdge::kHidden:
-        OS::Print("  %*c $%d: ", indent, ' ', edge.index());
+        edge_prefix = "$";
+        OS::SNPrintF(index, "%d", edge.index());
         break;
       case HeapGraphEdge::kShortcut:
-        OS::Print("  %*c ^%s: ", indent, ' ', edge.name());
+        edge_prefix = "^";
+        edge_name = edge.name();
+        break;
+      case HeapGraphEdge::kWeak:
+        edge_prefix = "w";
+        OS::SNPrintF(index, "%d", edge.index());
         break;
       default:
-        OS::Print("!!! unknown edge type: %d ", edge.type());
+        OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
     }
-    edge.to()->Print(max_depth, indent + 2);
+    edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
   }
 }
 
@@ -1116,73 +1085,21 @@
     case kRegExp: return "/regexp/";
     case kHeapNumber: return "/number/";
     case kNative: return "/native/";
+    case kSynthetic: return "/synthetic/";
     default: return "???";
   }
 }
 
 
-int HeapEntry::EntriesSize(int entries_count,
-                           int children_count,
-                           int retainers_count) {
+size_t HeapEntry::EntriesSize(int entries_count,
+                              int children_count,
+                              int retainers_count) {
   return sizeof(HeapEntry) * entries_count         // NOLINT
       + sizeof(HeapGraphEdge) * children_count     // NOLINT
       + sizeof(HeapGraphEdge*) * retainers_count;  // NOLINT
 }
 
 
-class RetainedSizeCalculator {
- public:
-  RetainedSizeCalculator()
-      : retained_size_(0) {
-  }
-
-  int retained_size() const { return retained_size_; }
-
-  void Apply(HeapEntry** entry_ptr) {
-    if ((*entry_ptr)->painted_reachable()) {
-      retained_size_ += (*entry_ptr)->self_size();
-    }
-  }
-
- private:
-  int retained_size_;
-};
-
-void HeapEntry::CalculateExactRetainedSize() {
-  // To calculate retained size, first we paint all reachable nodes in
-  // one color, then we paint (or re-paint) all nodes reachable from
-  // other nodes with a different color. Then we sum up self sizes of
-  // nodes painted with the first color.
-  snapshot()->ClearPaint();
-  PaintAllReachable();
-
-  List<HeapEntry*> list(10);
-  HeapEntry* root = snapshot()->root();
-  if (this != root) {
-    list.Add(root);
-    root->paint_reachable_from_others();
-  }
-  while (!list.is_empty()) {
-    HeapEntry* curr = list.RemoveLast();
-    Vector<HeapGraphEdge> children = curr->children();
-    for (int i = 0; i < children.length(); ++i) {
-      if (children[i].type() == HeapGraphEdge::kShortcut) continue;
-      HeapEntry* child = children[i].to();
-      if (child != this && child->not_painted_reachable_from_others()) {
-        list.Add(child);
-        child->paint_reachable_from_others();
-      }
-    }
-  }
-
-  RetainedSizeCalculator ret_size_calc;
-  snapshot()->IterateEntries(&ret_size_calc);
-  retained_size_ = ret_size_calc.retained_size();
-  ASSERT((retained_size_ & kExactRetainedSizeTag) == 0);
-  retained_size_ |= kExactRetainedSizeTag;
-}
-
-
 // It is very important to keep objects that form a heap snapshot
 // as small as possible.
 namespace {  // Avoid littering the global namespace.
@@ -1191,12 +1108,15 @@
 
 template <> struct SnapshotSizeConstants<4> {
   static const int kExpectedHeapGraphEdgeSize = 12;
-  static const int kExpectedHeapEntrySize = 36;
+  static const int kExpectedHeapEntrySize = 32;
+  static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
 };
 
 template <> struct SnapshotSizeConstants<8> {
   static const int kExpectedHeapGraphEdgeSize = 24;
   static const int kExpectedHeapEntrySize = 48;
+  static const uint64_t kMaxSerializableSnapshotRawSize =
+      static_cast<uint64_t>(6000) * MB;
 };
 
 }  // namespace
@@ -1214,14 +1134,18 @@
       natives_root_entry_(NULL),
       raw_entries_(NULL),
       entries_sorted_(false) {
-  STATIC_ASSERT(
+  STATIC_CHECK(
       sizeof(HeapGraphEdge) ==
       SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
-  STATIC_ASSERT(
+  STATIC_CHECK(
       sizeof(HeapEntry) ==
-      SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize);  // NOLINT
+      SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
+  for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
+    gc_subroot_entries_[i] = NULL;
+  }
 }
 
+
 HeapSnapshot::~HeapSnapshot() {
   DeleteArray(raw_entries_);
 }
@@ -1247,6 +1171,7 @@
   (*entry_ptr)->clear_paint();
 }
 
+
 void HeapSnapshot::ClearPaint() {
   entries_.Iterate(HeapEntryClearPaint);
 }
@@ -1275,13 +1200,15 @@
 }
 
 
-HeapEntry* HeapSnapshot::AddNativesRootEntry(int children_count,
-                                                 int retainers_count) {
-  ASSERT(natives_root_entry_ == NULL);
-  return (natives_root_entry_ = AddEntry(
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag,
+                                           int children_count,
+                                           int retainers_count) {
+  ASSERT(gc_subroot_entries_[tag] == NULL);
+  ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
+  return (gc_subroot_entries_[tag] = AddEntry(
       HeapEntry::kObject,
-      "(Native objects)",
-      HeapObjectsMap::kNativesRootObjectId,
+      VisitorSynchronization::kTagNames[tag],
+      HeapObjectsMap::GetNthGcSubrootId(tag),
       0,
       children_count,
       retainers_count));
@@ -1290,7 +1217,7 @@
 
 HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
                                   const char* name,
-                                  uint64_t id,
+                                  SnapshotObjectId id,
                                   int size,
                                   int children_count,
                                   int retainers_count) {
@@ -1322,7 +1249,7 @@
 }
 
 
-HeapEntry* HeapSnapshot::GetEntryById(uint64_t id) {
+HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
   List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
 
   // Perform a binary search by id.
@@ -1331,7 +1258,7 @@
   while (low <= high) {
     int mid =
         (static_cast<unsigned int>(low) + static_cast<unsigned int>(high)) >> 1;
-    uint64_t mid_id = entries_by_id->at(mid)->id();
+    SnapshotObjectId mid_id = entries_by_id->at(mid)->id();
     if (mid_id > id)
       high = mid - 1;
     else if (mid_id < id)
@@ -1350,6 +1277,7 @@
   return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
 }
 
+
 List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
   if (!entries_sorted_) {
     entries_.Sort(SortByIds);
@@ -1360,17 +1288,20 @@
 
 
 void HeapSnapshot::Print(int max_depth) {
-  root()->Print(max_depth, 0);
+  root()->Print("", "", max_depth, 0);
 }
 
 
 // We split IDs on evens for embedder objects (see
 // HeapObjectsMap::GenerateId) and odds for native objects.
-const uint64_t HeapObjectsMap::kInternalRootObjectId = 1;
-const uint64_t HeapObjectsMap::kGcRootsObjectId = 3;
-const uint64_t HeapObjectsMap::kNativesRootObjectId = 5;
-// Increase kFirstAvailableObjectId if new 'special' objects appear.
-const uint64_t HeapObjectsMap::kFirstAvailableObjectId = 7;
+const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1;
+const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId =
+    HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
+const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId =
+    HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
+const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
+    HeapObjectsMap::kGcRootsFirstSubrootId +
+    VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
 
 HeapObjectsMap::HeapObjectsMap()
     : initial_fill_mode_(true),
@@ -1390,13 +1321,13 @@
 }
 
 
-uint64_t HeapObjectsMap::FindObject(Address addr) {
+SnapshotObjectId HeapObjectsMap::FindObject(Address addr) {
   if (!initial_fill_mode_) {
-    uint64_t existing = FindEntry(addr);
+    SnapshotObjectId existing = FindEntry(addr);
     if (existing != 0) return existing;
   }
-  uint64_t id = next_id_;
-  next_id_ += 2;
+  SnapshotObjectId id = next_id_;
+  next_id_ += kObjectIdStep;
   AddEntry(addr, id);
   return id;
 }
@@ -1408,15 +1339,17 @@
   if (entry != NULL) {
     void* value = entry->value;
     entries_map_.Remove(from, AddressHash(from));
-    entry = entries_map_.Lookup(to, AddressHash(to), true);
-    // We can have an entry at the new location, it is OK, as GC can overwrite
-    // dead objects with alive objects being moved.
-    entry->value = value;
+    if (to != NULL) {
+      entry = entries_map_.Lookup(to, AddressHash(to), true);
+      // We can have an entry at the new location, it is OK, as GC can overwrite
+      // dead objects with alive objects being moved.
+      entry->value = value;
+    }
   }
 }
 
 
-void HeapObjectsMap::AddEntry(Address addr, uint64_t id) {
+void HeapObjectsMap::AddEntry(Address addr, SnapshotObjectId id) {
   HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
   ASSERT(entry->value == NULL);
   entry->value = reinterpret_cast<void*>(entries_->length());
@@ -1424,7 +1357,7 @@
 }
 
 
-uint64_t HeapObjectsMap::FindEntry(Address addr) {
+SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
   HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
   if (entry != NULL) {
     int entry_index =
@@ -1464,8 +1397,8 @@
 }
 
 
-uint64_t HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
-  uint64_t id = static_cast<uint64_t>(info->GetHash());
+SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
+  SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
   const char* label = info->GetLabel();
   id ^= HashSequentialString(label,
                              static_cast<int>(strlen(label)),
@@ -1535,7 +1468,11 @@
 }
 
 
-Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
+Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
+    SnapshotObjectId id) {
+  // First perform a full GC in order to avoid dead objects.
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "HeapSnapshotsCollection::FindHeapObjectById");
   AssertNoAllocation no_allocation;
   HeapObject* object = NULL;
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
@@ -1553,7 +1490,7 @@
 }
 
 
-HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
+HeapEntry* const HeapEntriesMap::kHeapEntryPlaceholder =
     reinterpret_cast<HeapEntry*>(1);
 
 HeapEntriesMap::HeapEntriesMap()
@@ -1682,12 +1619,18 @@
 }
 
 
-HeapObject *const V8HeapExplorer::kInternalRootObject =
+HeapObject* const V8HeapExplorer::kInternalRootObject =
     reinterpret_cast<HeapObject*>(
         static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
-HeapObject *const V8HeapExplorer::kGcRootsObject =
+HeapObject* const V8HeapExplorer::kGcRootsObject =
     reinterpret_cast<HeapObject*>(
         static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
+HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
+    reinterpret_cast<HeapObject*>(
+        static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
+HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
+    reinterpret_cast<HeapObject*>(
+        static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
 
 
 V8HeapExplorer::V8HeapExplorer(
@@ -1720,24 +1663,19 @@
     return snapshot_->AddRootEntry(children_count);
   } else if (object == kGcRootsObject) {
     return snapshot_->AddGcRootsEntry(children_count, retainers_count);
-  } else if (object->IsJSGlobalObject()) {
-    const char* tag = objects_tags_.GetTag(object);
-    const char* name = collection_->names()->GetName(
-        GetConstructorName(JSObject::cast(object)));
-    if (tag != NULL) {
-      name = collection_->names()->GetFormatted("%s / %s", name, tag);
-    }
-    return AddEntry(object,
-                    HeapEntry::kObject,
-                    name,
-                    children_count,
-                    retainers_count);
+  } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
+    return snapshot_->AddGcSubrootEntry(
+        GetGcSubrootOrder(object),
+        children_count,
+        retainers_count);
   } else if (object->IsJSFunction()) {
     JSFunction* func = JSFunction::cast(object);
     SharedFunctionInfo* shared = func->shared();
+    const char* name = shared->bound() ? "native_bind" :
+        collection_->names()->GetName(String::cast(shared->name()));
     return AddEntry(object,
                     HeapEntry::kClosure,
-                    collection_->names()->GetName(String::cast(shared->name())),
+                    name,
                     children_count,
                     retainers_count);
   } else if (object->IsJSRegExp()) {
@@ -1750,8 +1688,7 @@
   } else if (object->IsJSObject()) {
     return AddEntry(object,
                     HeapEntry::kObject,
-                    collection_->names()->GetName(
-                        GetConstructorName(JSObject::cast(object))),
+                    "",
                     children_count,
                     retainers_count);
   } else if (object->IsString()) {
@@ -1783,6 +1720,18 @@
                         : "",
                     children_count,
                     retainers_count);
+  } else if (object->IsGlobalContext()) {
+    return AddEntry(object,
+                    HeapEntry::kHidden,
+                    "system / GlobalContext",
+                    children_count,
+                    retainers_count);
+  } else if (object->IsContext()) {
+    return AddEntry(object,
+                    HeapEntry::kHidden,
+                    "system / Context",
+                    children_count,
+                    retainers_count);
   } else if (object->IsFixedArray() ||
              object->IsFixedDoubleArray() ||
              object->IsByteArray() ||
@@ -1822,9 +1771,38 @@
 }
 
 
+class GcSubrootsEnumerator : public ObjectVisitor {
+ public:
+  GcSubrootsEnumerator(
+      SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
+      : filler_(filler),
+        explorer_(explorer),
+        previous_object_count_(0),
+        object_count_(0) {
+  }
+  void VisitPointers(Object** start, Object** end) {
+    object_count_ += end - start;
+  }
+  void Synchronize(VisitorSynchronization::SyncTag tag) {
+    // Skip empty subroots.
+    if (previous_object_count_ != object_count_) {
+      previous_object_count_ = object_count_;
+      filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
+    }
+  }
+ private:
+  SnapshotFillerInterface* filler_;
+  V8HeapExplorer* explorer_;
+  intptr_t previous_object_count_;
+  intptr_t object_count_;
+};
+
+
 void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
   filler->AddEntry(kInternalRootObject, this);
   filler->AddEntry(kGcRootsObject, this);
+  GcSubrootsEnumerator enumerator(filler, this);
+  heap_->IterateRoots(&enumerator, VISIT_ALL);
 }
 
 
@@ -1843,12 +1821,13 @@
 }
 
 
-int V8HeapExplorer::EstimateObjectsCount() {
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
   int objects_count = 0;
-  for (HeapObject* obj = iterator.next();
+  for (HeapObject* obj = iterator->next();
        obj != NULL;
-       obj = iterator.next(), ++objects_count) {}
+       obj = iterator->next()) {
+    objects_count++;
+  }
   return objects_count;
 }
 
@@ -1921,6 +1900,7 @@
           SetPropertyReference(
               obj, entry,
               heap_->prototype_symbol(), proto_or_map,
+              NULL,
               JSFunction::kPrototypeOrInitialMapOffset);
         } else {
           SetPropertyReference(
@@ -1928,17 +1908,27 @@
               heap_->prototype_symbol(), js_fun->prototype());
         }
       }
+      SharedFunctionInfo* shared_info = js_fun->shared();
+      // JSFunction has either bindings or literals and never both.
+      bool bound = shared_info->bound();
+      TagObject(js_fun->literals_or_bindings(),
+                bound ? "(function bindings)" : "(function literals)");
       SetInternalReference(js_fun, entry,
-                           "shared", js_fun->shared(),
+                           bound ? "bindings" : "literals",
+                           js_fun->literals_or_bindings(),
+                           JSFunction::kLiteralsOffset);
+      SetInternalReference(js_fun, entry,
+                           "shared", shared_info,
                            JSFunction::kSharedFunctionInfoOffset);
       TagObject(js_fun->unchecked_context(), "(context)");
       SetInternalReference(js_fun, entry,
                            "context", js_fun->unchecked_context(),
                            JSFunction::kContextOffset);
-      TagObject(js_fun->literals(), "(function literals)");
-      SetInternalReference(js_fun, entry,
-                           "literals", js_fun->literals(),
-                           JSFunction::kLiteralsOffset);
+      for (int i = JSFunction::kNonWeakFieldsEndOffset;
+           i < JSFunction::kSize;
+           i += kPointerSize) {
+        SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
+      }
     }
     TagObject(js_obj->properties(), "(object properties)");
     SetInternalReference(obj, entry,
@@ -1954,6 +1944,10 @@
       SetInternalReference(obj, entry, 1, cs->first());
       SetInternalReference(obj, entry, 2, cs->second());
     }
+    if (obj->IsSlicedString()) {
+      SlicedString* ss = SlicedString::cast(obj);
+      SetInternalReference(obj, entry, "parent", ss->parent());
+    }
     extract_indexed_refs = false;
   } else if (obj->IsGlobalContext()) {
     Context* context = Context::cast(obj);
@@ -1961,8 +1955,14 @@
               "(context func. result caches)");
     TagObject(context->normalized_map_cache(), "(context norm. map cache)");
     TagObject(context->runtime_context(), "(runtime context)");
-    TagObject(context->map_cache(), "(context map cache)");
     TagObject(context->data(), "(context data)");
+    for (int i = Context::FIRST_WEAK_SLOT;
+         i < Context::GLOBAL_CONTEXT_SLOTS;
+         ++i) {
+      SetWeakReference(obj, entry,
+                       i, context->get(i),
+                       FixedArray::OffsetOfElementAt(i));
+    }
   } else if (obj->IsMap()) {
     Map* map = Map::cast(obj);
     SetInternalReference(obj, entry,
@@ -1976,6 +1976,14 @@
                            "descriptors", map->instance_descriptors(),
                            Map::kInstanceDescriptorsOrBitField3Offset);
     }
+    if (map->prototype_transitions() != heap_->empty_fixed_array()) {
+      TagObject(map->prototype_transitions(), "(prototype transitions)");
+      SetInternalReference(obj,
+                           entry,
+                           "prototype_transitions",
+                           map->prototype_transitions(),
+                           Map::kPrototypeTransitionsOffset);
+    }
     SetInternalReference(obj, entry,
                          "code_cache", map->code_cache(),
                          Map::kCodeCacheOffset);
@@ -1997,6 +2005,9 @@
     SetInternalReference(obj, entry,
                          "script", shared->script(),
                          SharedFunctionInfo::kScriptOffset);
+    SetWeakReference(obj, entry,
+                     1, shared->initial_map(),
+                     SharedFunctionInfo::kInitialMapOffset);
   } else if (obj->IsScript()) {
     Script* script = Script::cast(obj);
     SetInternalReference(obj, entry,
@@ -2015,17 +2026,6 @@
     SetInternalReference(obj, entry,
                          "line_ends", script->line_ends(),
                          Script::kLineEndsOffset);
-  } else if (obj->IsDescriptorArray()) {
-    DescriptorArray* desc_array = DescriptorArray::cast(obj);
-    if (desc_array->length() > DescriptorArray::kContentArrayIndex) {
-      Object* content_array =
-          desc_array->get(DescriptorArray::kContentArrayIndex);
-      TagObject(content_array, "(map descriptor content)");
-      SetInternalReference(obj, entry,
-                           "content", content_array,
-                           FixedArray::OffsetOfElementAt(
-                               DescriptorArray::kContentArrayIndex));
-    }
   } else if (obj->IsCodeCache()) {
     CodeCache* code_cache = CodeCache::cast(obj);
     TagObject(code_cache->default_cache(), "(default code cache)");
@@ -2051,21 +2051,44 @@
 
 void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
                                               HeapEntry* entry) {
-  if (js_obj->IsJSFunction()) {
-    HandleScope hs;
-    JSFunction* func = JSFunction::cast(js_obj);
-    Context* context = func->context();
-    ZoneScope zscope(Isolate::Current(), DELETE_ON_EXIT);
-    SerializedScopeInfo* serialized_scope_info =
-        context->closure()->shared()->scope_info();
-    ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
-    int locals_number = zone_scope_info.NumberOfLocals();
-    for (int i = 0; i < locals_number; ++i) {
-      String* local_name = *zone_scope_info.LocalName(i);
-      int idx = serialized_scope_info->ContextSlotIndex(local_name, NULL);
-      if (idx >= 0 && idx < context->length()) {
-        SetClosureReference(js_obj, entry, local_name, context->get(idx));
-      }
+  if (!js_obj->IsJSFunction()) return;
+
+  JSFunction* func = JSFunction::cast(js_obj);
+  Context* context = func->context();
+  ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+
+  if (func->shared()->bound()) {
+    FixedArray* bindings = func->function_bindings();
+    SetNativeBindReference(js_obj, entry, "bound_this",
+                           bindings->get(JSFunction::kBoundThisIndex));
+    SetNativeBindReference(js_obj, entry, "bound_function",
+                           bindings->get(JSFunction::kBoundFunctionIndex));
+    for (int i = JSFunction::kBoundArgumentsStartIndex;
+         i < bindings->length(); i++) {
+      const char* reference_name = collection_->names()->GetFormatted(
+          "bound_argument_%d",
+          i - JSFunction::kBoundArgumentsStartIndex);
+      SetNativeBindReference(js_obj, entry, reference_name,
+                             bindings->get(i));
+    }
+  } else {
+    // Add context allocated locals.
+    int context_locals = scope_info->ContextLocalCount();
+    for (int i = 0; i < context_locals; ++i) {
+      String* local_name = scope_info->ContextLocalName(i);
+      int idx = Context::MIN_CONTEXT_SLOTS + i;
+      SetClosureReference(js_obj, entry, local_name, context->get(idx));
+    }
+
+    // Add function variable.
+    if (scope_info->HasFunctionName()) {
+      String* name = scope_info->FunctionName();
+      int idx = Context::MIN_CONTEXT_SLOTS + context_locals;
+#ifdef DEBUG
+      VariableMode mode;
+      ASSERT(idx == scope_info->FunctionContextSlotIndex(name, &mode));
+#endif
+      SetClosureReference(js_obj, entry, name, context->get(idx));
     }
   }
 }
@@ -2083,6 +2106,7 @@
             SetPropertyReference(
                 js_obj, entry,
                 descs->GetKey(i), js_obj->InObjectPropertyAt(index),
+                NULL,
                 js_obj->GetInObjectPropertyOffset(index));
           } else {
             SetPropertyReference(
@@ -2096,7 +2120,29 @@
               js_obj, entry,
               descs->GetKey(i), descs->GetConstantFunction(i));
           break;
-        default: ;
+        case CALLBACKS: {
+          Object* callback_obj = descs->GetValue(i);
+          if (callback_obj->IsAccessorPair()) {
+            AccessorPair* accessors = AccessorPair::cast(callback_obj);
+            if (Object* getter = accessors->getter()) {
+              SetPropertyReference(js_obj, entry, descs->GetKey(i),
+                                   getter, "get-%s");
+            }
+            if (Object* setter = accessors->setter()) {
+              SetPropertyReference(js_obj, entry, descs->GetKey(i),
+                                   setter, "set-%s");
+            }
+          }
+          break;
+        }
+        case NORMAL:  // only in slow mode
+        case HANDLER:  // only in lookup results, not in descriptors
+        case INTERCEPTOR:  // only in lookup results, not in descriptors
+        case MAP_TRANSITION:  // we do not care about transitions here...
+        case ELEMENTS_TRANSITION:
+        case CONSTANT_TRANSITION:
+        case NULL_DESCRIPTOR:  // ... and not about "holes"
+          break;
       }
     }
   } else {
@@ -2161,15 +2207,16 @@
 
 
 String* V8HeapExplorer::GetConstructorName(JSObject* object) {
-  if (object->IsJSFunction()) return HEAP->closure_symbol();
+  Heap* heap = object->GetHeap();
+  if (object->IsJSFunction()) return heap->closure_symbol();
   String* constructor_name = object->constructor_name();
-  if (constructor_name == HEAP->Object_symbol()) {
+  if (constructor_name == heap->Object_symbol()) {
     // Look up an immediate "constructor" property, if it is a function,
     // return its name. This is for instances of binding objects, which
     // have prototype constructor type "Object".
     Object* constructor_prop = NULL;
-    LookupResult result;
-    object->LocalLookupRealNamedProperty(HEAP->constructor_symbol(), &result);
+    LookupResult result(heap->isolate());
+    object->LocalLookupRealNamedProperty(heap->constructor_symbol(), &result);
     if (result.IsProperty()) {
       constructor_prop = result.GetLazyValue();
     }
@@ -2192,23 +2239,76 @@
 
 
 class RootsReferencesExtractor : public ObjectVisitor {
- public:
-  explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
-      : explorer_(explorer) {
-  }
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) explorer_->SetGcRootsReference(*p);
-  }
  private:
-  V8HeapExplorer* explorer_;
+  struct IndexTag {
+    IndexTag(int index, VisitorSynchronization::SyncTag tag)
+        : index(index), tag(tag) { }
+    int index;
+    VisitorSynchronization::SyncTag tag;
+  };
+
+ public:
+  RootsReferencesExtractor()
+      : collecting_all_references_(false),
+        previous_reference_count_(0) {
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    if (collecting_all_references_) {
+      for (Object** p = start; p < end; p++) all_references_.Add(*p);
+    } else {
+      for (Object** p = start; p < end; p++) strong_references_.Add(*p);
+    }
+  }
+
+  void SetCollectingAllReferences() { collecting_all_references_ = true; }
+
+  void FillReferences(V8HeapExplorer* explorer) {
+    ASSERT(strong_references_.length() <= all_references_.length());
+    for (int i = 0; i < reference_tags_.length(); ++i) {
+      explorer->SetGcRootsReference(reference_tags_[i].tag);
+    }
+    int strong_index = 0, all_index = 0, tags_index = 0;
+    while (all_index < all_references_.length()) {
+      if (strong_index < strong_references_.length() &&
+          strong_references_[strong_index] == all_references_[all_index]) {
+        explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+                                        false,
+                                        all_references_[all_index++]);
+        ++strong_index;
+      } else {
+        explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+                                        true,
+                                        all_references_[all_index++]);
+      }
+      if (reference_tags_[tags_index].index == all_index) ++tags_index;
+    }
+  }
+
+  void Synchronize(VisitorSynchronization::SyncTag tag) {
+    if (collecting_all_references_ &&
+        previous_reference_count_ != all_references_.length()) {
+      previous_reference_count_ = all_references_.length();
+      reference_tags_.Add(IndexTag(previous_reference_count_, tag));
+    }
+  }
+
+ private:
+  bool collecting_all_references_;
+  List<Object*> strong_references_;
+  List<Object*> all_references_;
+  int previous_reference_count_;
+  List<IndexTag> reference_tags_;
 };
 
 
 bool V8HeapExplorer::IterateAndExtractReferences(
     SnapshotFillerInterface* filler) {
-  filler_ = filler;
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
+
+  filler_ = filler;
   bool interrupted = false;
+
   // Heap iteration with filtering must be finished in any case.
   for (HeapObject* obj = iterator.next();
        obj != NULL;
@@ -2223,13 +2323,42 @@
     return false;
   }
   SetRootGcRootsReference();
-  RootsReferencesExtractor extractor(this);
+  RootsReferencesExtractor extractor;
+  heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
+  extractor.SetCollectingAllReferences();
   heap_->IterateRoots(&extractor, VISIT_ALL);
+  extractor.FillReferences(this);
   filler_ = NULL;
   return progress_->ProgressReport(false);
 }
 
 
+bool V8HeapExplorer::IterateAndSetObjectNames(SnapshotFillerInterface* filler) {
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+  filler_ = filler;
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+    SetObjectName(obj);
+  }
+  return true;
+}
+
+
+void V8HeapExplorer::SetObjectName(HeapObject* object) {
+  if (!object->IsJSObject() || object->IsJSRegExp() || object->IsJSFunction()) {
+    return;
+  }
+  const char* name = collection_->names()->GetName(
+      GetConstructorName(JSObject::cast(object)));
+  if (object->IsJSGlobalObject()) {
+    const char* tag = objects_tags_.GetTag(object);
+    if (tag != NULL) {
+      name = collection_->names()->GetFormatted("%s / %s", name, tag);
+    }
+  }
+  GetEntry(object)->set_name(name);
+}
+
+
 void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
                                          HeapEntry* parent_entry,
                                          String* reference_name,
@@ -2246,6 +2375,22 @@
 }
 
 
+void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
+                                            HeapEntry* parent_entry,
+                                            const char* reference_name,
+                                            Object* child_obj) {
+  HeapEntry* child_entry = GetEntry(child_obj);
+  if (child_entry != NULL) {
+    filler_->SetNamedReference(HeapGraphEdge::kShortcut,
+                               parent_obj,
+                               parent_entry,
+                               reference_name,
+                               child_obj,
+                               child_entry);
+  }
+}
+
+
 void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
                                          HeapEntry* parent_entry,
                                          int index,
@@ -2314,19 +2459,45 @@
 }
 
 
+void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
+                                      HeapEntry* parent_entry,
+                                      int index,
+                                      Object* child_obj,
+                                      int field_offset) {
+  HeapEntry* child_entry = GetEntry(child_obj);
+  if (child_entry != NULL) {
+    filler_->SetIndexedReference(HeapGraphEdge::kWeak,
+                                 parent_obj,
+                                 parent_entry,
+                                 index,
+                                 child_obj,
+                                 child_entry);
+    IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+  }
+}
+
+
 void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
                                           HeapEntry* parent_entry,
                                           String* reference_name,
                                           Object* child_obj,
+                                          const char* name_format_string,
                                           int field_offset) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     HeapGraphEdge::Type type = reference_name->length() > 0 ?
         HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
+    const char* name = name_format_string  != NULL ?
+        collection_->names()->GetFormatted(
+            name_format_string,
+            *reference_name->ToCString(DISALLOW_NULLS,
+                                       ROBUST_STRING_TRAVERSAL)) :
+        collection_->names()->GetName(reference_name);
+
     filler_->SetNamedReference(type,
                                parent_obj,
                                parent_entry,
-                               collection_->names()->GetName(reference_name),
+                               name,
                                child_obj,
                                child_entry);
     IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
@@ -2368,12 +2539,21 @@
 }
 
 
-void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
+void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
+  filler_->SetIndexedAutoIndexReference(
+      HeapGraphEdge::kElement,
+      kGcRootsObject, snapshot_->gc_roots(),
+      GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag));
+}
+
+
+void V8HeapExplorer::SetGcSubrootReference(
+    VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetIndexedAutoIndexReference(
-        HeapGraphEdge::kElement,
-        kGcRootsObject, snapshot_->gc_roots(),
+        is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
+        GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
         child_obj, child_entry);
   }
 }
@@ -2384,7 +2564,6 @@
       !obj->IsOddball() &&
       obj != heap_->raw_unchecked_empty_byte_array() &&
       obj != heap_->raw_unchecked_empty_fixed_array() &&
-      obj != heap_->raw_unchecked_empty_fixed_double_array() &&
       obj != heap_->raw_unchecked_empty_descriptor_array()) {
     objects_tags_.SetTag(obj, tag);
   }
@@ -2417,6 +2596,7 @@
 
 // Modifies heap. Must not be run during heap traversal.
 void V8HeapExplorer::TagGlobalObjects() {
+  HandleScope scope;
   Isolate* isolate = Isolate::Current();
   GlobalObjectsEnumerator enumerator;
   isolate->global_handles()->IterateAllRoots(&enumerator);
@@ -2427,6 +2607,7 @@
   const char** urls = NewArray<const char*>(enumerator.count());
   for (int i = 0, l = enumerator.count(); i < l; ++i) {
     urls[i] = NULL;
+    HandleScope scope;
     Handle<JSGlobalObject> global_obj = enumerator.at(i);
     Object* obj_document;
     if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
@@ -2464,9 +2645,43 @@
   NativeObjectsExplorer* explorer_;
 };
 
-HeapThing const NativeObjectsExplorer::kNativesRootObject =
-    reinterpret_cast<HeapThing>(
-        static_cast<intptr_t>(HeapObjectsMap::kNativesRootObjectId));
+
+class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
+ public:
+  BasicHeapEntriesAllocator(
+      HeapSnapshot* snapshot,
+      HeapEntry::Type entries_type)
+    : snapshot_(snapshot),
+      collection_(snapshot_->collection()),
+      entries_type_(entries_type) {
+  }
+  virtual HeapEntry* AllocateEntry(
+      HeapThing ptr, int children_count, int retainers_count);
+ private:
+  HeapSnapshot* snapshot_;
+  HeapSnapshotsCollection* collection_;
+  HeapEntry::Type entries_type_;
+};
+
+
+HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(
+    HeapThing ptr, int children_count, int retainers_count) {
+  v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
+  intptr_t elements = info->GetElementCount();
+  intptr_t size = info->GetSizeInBytes();
+  return snapshot_->AddEntry(
+      entries_type_,
+      elements != -1 ?
+          collection_->names()->GetFormatted(
+              "%s / %" V8_PTR_PREFIX "d entries",
+              info->GetLabel(),
+              info->GetElementCount()) :
+          collection_->names()->GetCopy(info->GetLabel()),
+      HeapObjectsMap::GenerateId(info),
+      size != -1 ? static_cast<int>(size) : 0,
+      children_count,
+      retainers_count);
+}
 
 
 NativeObjectsExplorer::NativeObjectsExplorer(
@@ -2476,7 +2691,12 @@
       progress_(progress),
       embedder_queried_(false),
       objects_by_info_(RetainedInfosMatch),
+      native_groups_(StringsMatch),
       filler_(NULL) {
+  synthetic_entries_allocator_ =
+      new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic);
+  native_entries_allocator_ =
+      new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative);
 }
 
 
@@ -2491,37 +2711,15 @@
         reinterpret_cast<List<HeapObject*>* >(p->value);
     delete objects;
   }
-}
-
-
-HeapEntry* NativeObjectsExplorer::AllocateEntry(
-    HeapThing ptr, int children_count, int retainers_count) {
-  if (ptr == kNativesRootObject) {
-    return snapshot_->AddNativesRootEntry(children_count, retainers_count);
-  } else {
+  for (HashMap::Entry* p = native_groups_.Start();
+       p != NULL;
+       p = native_groups_.Next(p)) {
     v8::RetainedObjectInfo* info =
-        reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
-    intptr_t elements = info->GetElementCount();
-    intptr_t size = info->GetSizeInBytes();
-    return snapshot_->AddEntry(
-        HeapEntry::kNative,
-        elements != -1 ?
-            collection_->names()->GetFormatted(
-                "%s / %" V8_PTR_PREFIX "d entries",
-                info->GetLabel(),
-                info->GetElementCount()) :
-                collection_->names()->GetCopy(info->GetLabel()),
-        HeapObjectsMap::GenerateId(info),
-        size != -1 ? static_cast<int>(size) : 0,
-        children_count,
-        retainers_count);
+        reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
+    info->Dispose();
   }
-}
-
-
-void NativeObjectsExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
-  if (EstimateObjectsCount() <= 0) return;
-  filler->AddEntry(kNativesRootObject, this);
+  delete synthetic_entries_allocator_;
+  delete native_entries_allocator_;
 }
 
 
@@ -2556,6 +2754,29 @@
   embedder_queried_ = true;
 }
 
+void NativeObjectsExplorer::FillImplicitReferences() {
+  Isolate* isolate = Isolate::Current();
+  List<ImplicitRefGroup*>* groups =
+      isolate->global_handles()->implicit_ref_groups();
+  for (int i = 0; i < groups->length(); ++i) {
+    ImplicitRefGroup* group = groups->at(i);
+    HeapObject* parent = *group->parent_;
+    HeapEntry* parent_entry =
+        filler_->FindOrAddEntry(parent, native_entries_allocator_);
+    ASSERT(parent_entry != NULL);
+    Object*** children = group->children_;
+    for (size_t j = 0; j < group->length_; ++j) {
+      Object* child = *children[j];
+      HeapEntry* child_entry =
+          filler_->FindOrAddEntry(child, native_entries_allocator_);
+      filler_->SetNamedReference(
+          HeapGraphEdge::kInternal,
+          parent, parent_entry,
+          "native",
+          child, child_entry);
+    }
+  }
+}
 
 List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
     v8::RetainedObjectInfo* info) {
@@ -2572,34 +2793,82 @@
 
 bool NativeObjectsExplorer::IterateAndExtractReferences(
     SnapshotFillerInterface* filler) {
-  if (EstimateObjectsCount() <= 0) return true;
   filler_ = filler;
   FillRetainedObjects();
-  for (HashMap::Entry* p = objects_by_info_.Start();
-       p != NULL;
-       p = objects_by_info_.Next(p)) {
-    v8::RetainedObjectInfo* info =
-        reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
-    SetNativeRootReference(info);
-    List<HeapObject*>* objects =
-        reinterpret_cast<List<HeapObject*>* >(p->value);
-    for (int i = 0; i < objects->length(); ++i) {
-      SetWrapperNativeReferences(objects->at(i), info);
+  FillImplicitReferences();
+  if (EstimateObjectsCount() > 0) {
+    for (HashMap::Entry* p = objects_by_info_.Start();
+         p != NULL;
+         p = objects_by_info_.Next(p)) {
+      v8::RetainedObjectInfo* info =
+          reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
+      SetNativeRootReference(info);
+      List<HeapObject*>* objects =
+          reinterpret_cast<List<HeapObject*>* >(p->value);
+      for (int i = 0; i < objects->length(); ++i) {
+        SetWrapperNativeReferences(objects->at(i), info);
+      }
     }
+    SetRootNativeRootsReference();
   }
-  SetRootNativesRootReference();
   filler_ = NULL;
   return true;
 }
 
 
+class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
+ public:
+  explicit NativeGroupRetainedObjectInfo(const char* label)
+      : disposed_(false),
+        hash_(reinterpret_cast<intptr_t>(label)),
+        label_(label) {
+  }
+
+  virtual ~NativeGroupRetainedObjectInfo() {}
+  virtual void Dispose() {
+    CHECK(!disposed_);
+    disposed_ = true;
+    delete this;
+  }
+  virtual bool IsEquivalent(RetainedObjectInfo* other) {
+    return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel());
+  }
+  virtual intptr_t GetHash() { return hash_; }
+  virtual const char* GetLabel() { return label_; }
+
+ private:
+  bool disposed_;
+  intptr_t hash_;
+  const char* label_;
+};
+
+
+NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
+    const char* label) {
+  const char* label_copy = collection_->names()->GetCopy(label);
+  uint32_t hash = HashSequentialString(label_copy,
+                                       static_cast<int>(strlen(label_copy)),
+                                       HEAP->HashSeed());
+  HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
+                                                hash, true);
+  if (entry->value == NULL)
+    entry->value = new NativeGroupRetainedObjectInfo(label);
+  return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
+}
+
+
 void NativeObjectsExplorer::SetNativeRootReference(
     v8::RetainedObjectInfo* info) {
-  HeapEntry* child_entry = filler_->FindOrAddEntry(info, this);
+  HeapEntry* child_entry =
+      filler_->FindOrAddEntry(info, native_entries_allocator_);
   ASSERT(child_entry != NULL);
-  filler_->SetIndexedAutoIndexReference(
-      HeapGraphEdge::kElement,
-      kNativesRootObject, snapshot_->natives_root(),
+  NativeGroupRetainedObjectInfo* group_info =
+      FindOrAddGroupInfo(info->GetGroupLabel());
+  HeapEntry* group_entry =
+      filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
+  filler_->SetNamedAutoIndexReference(
+      HeapGraphEdge::kInternal,
+      group_info, group_entry,
       info, child_entry);
 }
 
@@ -2608,7 +2877,8 @@
     HeapObject* wrapper, v8::RetainedObjectInfo* info) {
   HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
   ASSERT(wrapper_entry != NULL);
-  HeapEntry* info_entry = filler_->FindOrAddEntry(info, this);
+  HeapEntry* info_entry =
+      filler_->FindOrAddEntry(info, native_entries_allocator_);
   ASSERT(info_entry != NULL);
   filler_->SetNamedReference(HeapGraphEdge::kInternal,
                              wrapper, wrapper_entry,
@@ -2620,11 +2890,20 @@
 }
 
 
-void NativeObjectsExplorer::SetRootNativesRootReference() {
-  filler_->SetIndexedAutoIndexReference(
-      HeapGraphEdge::kElement,
-      V8HeapExplorer::kInternalRootObject, snapshot_->root(),
-      kNativesRootObject, snapshot_->natives_root());
+void NativeObjectsExplorer::SetRootNativeRootsReference() {
+  for (HashMap::Entry* entry = native_groups_.Start();
+       entry;
+       entry = native_groups_.Next(entry)) {
+    NativeGroupRetainedObjectInfo* group_info =
+        static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
+    HeapEntry* group_entry =
+        filler_->FindOrAddEntry(group_info, native_entries_allocator_);
+    ASSERT(group_entry != NULL);
+    filler_->SetIndexedAutoIndexReference(
+        HeapGraphEdge::kElement,
+        V8HeapExplorer::kInternalRootObject, snapshot_->root(),
+        group_info, group_entry);
+  }
 }
 
 
@@ -2638,15 +2917,6 @@
 }
 
 
-HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
-                                             v8::ActivityControl* control)
-    : snapshot_(snapshot),
-      control_(control),
-      v8_heap_explorer_(snapshot_, this),
-      dom_explorer_(snapshot_, this) {
-}
-
-
 class SnapshotCounter : public SnapshotFillerInterface {
  public:
   explicit SnapshotCounter(HeapEntriesMap* entries) : entries_(entries) { }
@@ -2771,27 +3041,72 @@
 };
 
 
+HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
+                                             v8::ActivityControl* control)
+    : snapshot_(snapshot),
+      control_(control),
+      v8_heap_explorer_(snapshot_, this),
+      dom_explorer_(snapshot_, this) {
+}
+
+
 bool HeapSnapshotGenerator::GenerateSnapshot() {
   v8_heap_explorer_.TagGlobalObjects();
 
+  // TODO(1562) Profiler assumes that any object that is in the heap after
+  // full GC is reachable from the root when computing dominators.
+  // This is not true for weakly reachable objects.
+  // As a temporary solution we call GC twice.
+  Isolate::Current()->heap()->CollectAllGarbage(
+      Heap::kMakeHeapIterableMask,
+      "HeapSnapshotGenerator::GenerateSnapshot");
+  Isolate::Current()->heap()->CollectAllGarbage(
+      Heap::kMakeHeapIterableMask,
+      "HeapSnapshotGenerator::GenerateSnapshot");
+
+#ifdef DEBUG
+  Heap* debug_heap = Isolate::Current()->heap();
+  ASSERT(!debug_heap->old_data_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->old_pointer_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->code_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->cell_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->map_space()->was_swept_conservatively());
+#endif
+
+  // The following code uses heap iterators, so we want the heap to be
+  // stable. It should follow TagGlobalObjects as that can allocate.
   AssertNoAllocation no_alloc;
 
-  SetProgressTotal(4);  // 2 passes + dominators + sizes.
+#ifdef DEBUG
+  debug_heap->Verify();
+#endif
+
+  SetProgressTotal(2);  // 2 passes.
+
+#ifdef DEBUG
+  debug_heap->Verify();
+#endif
 
   // Pass 1. Iterate heap contents to count entries and references.
   if (!CountEntriesAndReferences()) return false;
 
-  // Allocate and fill entries in the snapshot, allocate references.
+#ifdef DEBUG
+  debug_heap->Verify();
+#endif
+
+  // Allocate memory for entries and references.
   snapshot_->AllocateEntries(entries_.entries_count(),
                              entries_.total_children_count(),
                              entries_.total_retainers_count());
+
+  // Allocate heap objects to entries hash map.
   entries_.AllocateEntries();
 
   // Pass 2. Fill references.
   if (!FillReferences()) return false;
 
   if (!SetEntriesDominators()) return false;
-  if (!ApproximateRetainedSizes()) return false;
+  if (!CalculateRetainedSizes()) return false;
 
   progress_counter_ = progress_total_;
   if (!ProgressReport(true)) return false;
@@ -2818,8 +3133,9 @@
 
 void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
   if (control_ == NULL) return;
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
   progress_total_ = (
-      v8_heap_explorer_.EstimateObjectsCount() +
+      v8_heap_explorer_.EstimateObjectsCount(&iterator) +
       dom_explorer_.EstimateObjectsCount()) * iterations_count;
   progress_counter_ = 0;
 }
@@ -2828,18 +3144,22 @@
 bool HeapSnapshotGenerator::CountEntriesAndReferences() {
   SnapshotCounter counter(&entries_);
   v8_heap_explorer_.AddRootEntries(&counter);
-  dom_explorer_.AddRootEntries(&counter);
-  return
-      v8_heap_explorer_.IterateAndExtractReferences(&counter) &&
-      dom_explorer_.IterateAndExtractReferences(&counter);
+  return v8_heap_explorer_.IterateAndExtractReferences(&counter)
+      && dom_explorer_.IterateAndExtractReferences(&counter);
 }
 
 
 bool HeapSnapshotGenerator::FillReferences() {
   SnapshotFiller filler(snapshot_, &entries_);
-  return
-      v8_heap_explorer_.IterateAndExtractReferences(&filler) &&
-      dom_explorer_.IterateAndExtractReferences(&filler);
+  // IterateAndExtractReferences cannot set object names because
+  // it makes call to JSObject::LocalLookupRealNamedProperty which
+  // in turn may relocate objects in property maps thus changing the heap
+  // layout and affecting retainer counts. This is not acceptable because
+  // number of retainers must not change between count and fill passes.
+  // To avoid this there's a separate postpass that set object names.
+  return v8_heap_explorer_.IterateAndExtractReferences(&filler)
+      && dom_explorer_.IterateAndExtractReferences(&filler)
+      && v8_heap_explorer_.IterateAndSetObjectNames(&filler);
 }
 
 
@@ -2849,7 +3169,7 @@
   int current_entry = 0;
   List<HeapEntry*> nodes_to_visit;
   nodes_to_visit.Add(snapshot_->root());
-  snapshot_->root()->paint_reachable();
+  snapshot_->root()->paint();
   while (!nodes_to_visit.is_empty()) {
     HeapEntry* entry = nodes_to_visit.last();
     Vector<HeapGraphEdge> children = entry->children();
@@ -2857,9 +3177,9 @@
     for (int i = 0; i < children.length(); ++i) {
       if (children[i].type() == HeapGraphEdge::kShortcut) continue;
       HeapEntry* child = children[i].to();
-      if (!child->painted_reachable()) {
+      if (!child->painted()) {
         nodes_to_visit.Add(child);
-        child->paint_reachable();
+        child->paint();
         has_new_edges = true;
       }
     }
@@ -2869,63 +3189,76 @@
       nodes_to_visit.RemoveLast();
     }
   }
-  entries->Truncate(current_entry);
+  ASSERT_EQ(current_entry, entries->length());
 }
 
 
-static int Intersect(int i1, int i2, const Vector<HeapEntry*>& dominators) {
+static int Intersect(int i1, int i2, const Vector<int>& dominators) {
   int finger1 = i1, finger2 = i2;
   while (finger1 != finger2) {
-    while (finger1 < finger2) finger1 = dominators[finger1]->ordered_index();
-    while (finger2 < finger1) finger2 = dominators[finger2]->ordered_index();
+    while (finger1 < finger2) finger1 = dominators[finger1];
+    while (finger2 < finger1) finger2 = dominators[finger2];
   }
   return finger1;
 }
 
+
 // The algorithm is based on the article:
 // K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
 // Softw. Pract. Exper. 4 (2001), pp. 1-10.
 bool HeapSnapshotGenerator::BuildDominatorTree(
     const Vector<HeapEntry*>& entries,
-    Vector<HeapEntry*>* dominators) {
+    Vector<int>* dominators) {
   if (entries.length() == 0) return true;
   const int entries_length = entries.length(), root_index = entries_length - 1;
-  for (int i = 0; i < root_index; ++i) (*dominators)[i] = NULL;
-  (*dominators)[root_index] = entries[root_index];
-  int changed = 1;
-  const int base_progress_counter = progress_counter_;
-  while (changed != 0) {
-    changed = 0;
+  static const int kNoDominator = -1;
+  for (int i = 0; i < root_index; ++i) (*dominators)[i] = kNoDominator;
+  (*dominators)[root_index] = root_index;
+
+  // The affected array is used to mark entries which dominators
+  // have to be racalculated because of changes in their retainers.
+  ScopedVector<bool> affected(entries_length);
+  for (int i = 0; i < affected.length(); ++i) affected[i] = false;
+  // Mark the root direct children as affected.
+  Vector<HeapGraphEdge> children = entries[root_index]->children();
+  for (int i = 0; i < children.length(); ++i) {
+    affected[children[i].to()->ordered_index()] = true;
+  }
+
+  bool changed = true;
+  while (changed) {
+    changed = false;
+    if (!ProgressReport(true)) return false;
     for (int i = root_index - 1; i >= 0; --i) {
-      HeapEntry* new_idom = NULL;
+      if (!affected[i]) continue;
+      affected[i] = false;
+      // If dominator of the entry has already been set to root,
+      // then it can't propagate any further.
+      if ((*dominators)[i] == root_index) continue;
+      int new_idom_index = kNoDominator;
       Vector<HeapGraphEdge*> rets = entries[i]->retainers();
-      int j = 0;
-      for (; j < rets.length(); ++j) {
+      for (int j = 0; j < rets.length(); ++j) {
         if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
-        HeapEntry* ret = rets[j]->From();
-        if (dominators->at(ret->ordered_index()) != NULL) {
-          new_idom = ret;
-          break;
+        int ret_index = rets[j]->From()->ordered_index();
+        if (dominators->at(ret_index) != kNoDominator) {
+          new_idom_index = new_idom_index == kNoDominator
+              ? ret_index
+              : Intersect(ret_index, new_idom_index, *dominators);
+          // If idom has already reached the root, it doesn't make sense
+          // to check other retainers.
+          if (new_idom_index == root_index) break;
         }
       }
-      for (++j; j < rets.length(); ++j) {
-        if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
-        HeapEntry* ret = rets[j]->From();
-        if (dominators->at(ret->ordered_index()) != NULL) {
-          new_idom = entries[Intersect(ret->ordered_index(),
-                                       new_idom->ordered_index(),
-                                       *dominators)];
+      if (new_idom_index != kNoDominator
+          && dominators->at(i) != new_idom_index) {
+        (*dominators)[i] = new_idom_index;
+        changed = true;
+        Vector<HeapGraphEdge> children = entries[i]->children();
+        for (int j = 0; j < children.length(); ++j) {
+          affected[children[j].to()->ordered_index()] = true;
         }
       }
-      if (new_idom != NULL && dominators->at(i) != new_idom) {
-        (*dominators)[i] = new_idom;
-        ++changed;
-      }
     }
-    int remaining = entries_length - changed;
-    if (remaining < 0) remaining = 0;
-    progress_counter_ = base_progress_counter + remaining;
-    if (!ProgressReport(true)) return false;
   }
   return true;
 }
@@ -2935,40 +3268,49 @@
   // This array is used for maintaining reverse postorder of nodes.
   ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries()->length());
   FillReversePostorderIndexes(&ordered_entries);
-  ScopedVector<HeapEntry*> dominators(ordered_entries.length());
+  ScopedVector<int> dominators(ordered_entries.length());
   if (!BuildDominatorTree(ordered_entries, &dominators)) return false;
   for (int i = 0; i < ordered_entries.length(); ++i) {
-    ASSERT(dominators[i] != NULL);
-    ordered_entries[i]->set_dominator(dominators[i]);
+    ASSERT(dominators[i] >= 0);
+    ordered_entries[i]->set_dominator(ordered_entries[dominators[i]]);
   }
   return true;
 }
 
 
-bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
+bool HeapSnapshotGenerator::CalculateRetainedSizes() {
   // As for the dominators tree we only know parent nodes, not
   // children, to sum up total sizes we "bubble" node's self size
   // adding it to all of its parents.
-  for (int i = 0; i < snapshot_->entries()->length(); ++i) {
-    HeapEntry* entry = snapshot_->entries()->at(i);
+  List<HeapEntry*>& entries = *snapshot_->entries();
+  for (int i = 0; i < entries.length(); ++i) {
+    HeapEntry* entry = entries[i];
     entry->set_retained_size(entry->self_size());
   }
-  for (int i = 0;
-       i < snapshot_->entries()->length();
-       ++i, ProgressStep()) {
-    HeapEntry* entry = snapshot_->entries()->at(i);
+  for (int i = 0; i < entries.length(); ++i) {
+    HeapEntry* entry = entries[i];
     int entry_size = entry->self_size();
     for (HeapEntry* dominator = entry->dominator();
          dominator != entry;
          entry = dominator, dominator = entry->dominator()) {
       dominator->add_retained_size(entry_size);
     }
-    if (!ProgressReport()) return false;
   }
   return true;
 }
 
 
+template<int bytes> struct MaxDecimalDigitsIn;
+template<> struct MaxDecimalDigitsIn<4> {
+  static const int kSigned = 11;
+  static const int kUnsigned = 10;
+};
+template<> struct MaxDecimalDigitsIn<8> {
+  static const int kSigned = 20;
+  static const int kUnsigned = 20;
+};
+
+
 class OutputStreamWriter {
  public:
   explicit OutputStreamWriter(v8::OutputStream* stream)
@@ -3018,23 +3360,34 @@
  private:
   template<typename T>
   void AddNumberImpl(T n, const char* format) {
-    ScopedVector<char> buffer(32);
-    int result = OS::SNPrintF(buffer, format, n);
-    USE(result);
-    ASSERT(result != -1);
-    AddString(buffer.start());
+    // Buffer for the longest value plus trailing \0
+    static const int kMaxNumberSize =
+        MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1;
+    if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) {
+      int result = OS::SNPrintF(
+          chunk_.SubVector(chunk_pos_, chunk_size_), format, n);
+      ASSERT(result != -1);
+      chunk_pos_ += result;
+      MaybeWriteChunk();
+    } else {
+      EmbeddedVector<char, kMaxNumberSize> buffer;
+      int result = OS::SNPrintF(buffer, format, n);
+      USE(result);
+      ASSERT(result != -1);
+      AddString(buffer.start());
+    }
   }
   void MaybeWriteChunk() {
     ASSERT(chunk_pos_ <= chunk_size_);
     if (chunk_pos_ == chunk_size_) {
       WriteChunk();
-      chunk_pos_ = 0;
     }
   }
   void WriteChunk() {
     if (aborted_) return;
     if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
         v8::OutputStream::kAbort) aborted_ = true;
+    chunk_pos_ = 0;
   }
 
   v8::OutputStream* stream_;
@@ -3044,15 +3397,14 @@
   bool aborted_;
 };
 
-const int HeapSnapshotJSONSerializer::kMaxSerializableSnapshotRawSize =
-    256 * MB;
 
 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
   ASSERT(writer_ == NULL);
   writer_ = new OutputStreamWriter(stream);
 
   HeapSnapshot* original_snapshot = NULL;
-  if (snapshot_->raw_entries_size() >= kMaxSerializableSnapshotRawSize) {
+  if (snapshot_->raw_entries_size() >=
+      SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
     // The snapshot is too big. Serialize a fake snapshot.
     original_snapshot = snapshot_;
     snapshot_ = CreateFakeSnapshot();
@@ -3079,8 +3431,14 @@
                                           snapshot_->uid());
   result->AllocateEntries(2, 1, 0);
   HeapEntry* root = result->AddRootEntry(1);
+  const char* text = snapshot_->collection()->names()->GetFormatted(
+      "The snapshot is too big. "
+      "Maximum snapshot size is %"  V8_PTR_PREFIX "u MB. "
+      "Actual snapshot size is %"  V8_PTR_PREFIX "u MB.",
+      SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
+      (snapshot_->raw_entries_size() + MB - 1) / MB);
   HeapEntry* message = result->AddEntry(
-      HeapEntry::kString, "The snapshot is too big", 0, 4, 0, 0);
+      HeapEntry::kString, text, 0, 4, 0, 0);
   root->SetUnidirElementReference(0, 1, message);
   result->SetDominatorsToSelf();
   return result;
@@ -3145,37 +3503,51 @@
 
 
 void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
-  writer_->AddCharacter(',');
-  writer_->AddNumber(edge->type());
-  writer_->AddCharacter(',');
-  if (edge->type() == HeapGraphEdge::kElement
-      || edge->type() == HeapGraphEdge::kHidden) {
-    writer_->AddNumber(edge->index());
-  } else {
-    writer_->AddNumber(GetStringId(edge->name()));
-  }
-  writer_->AddCharacter(',');
-  writer_->AddNumber(GetNodeId(edge->to()));
+  // The buffer needs space for 3 ints, 3 commas and \0
+  static const int kBufferSize =
+      MaxDecimalDigitsIn<sizeof(int)>::kSigned * 3 + 3 + 1;  // NOLINT
+  EmbeddedVector<char, kBufferSize> buffer;
+  int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
+      || edge->type() == HeapGraphEdge::kHidden
+      || edge->type() == HeapGraphEdge::kWeak
+      ? edge->index() : GetStringId(edge->name());
+  STATIC_CHECK(sizeof(int) == sizeof(edge->type()));  // NOLINT
+  STATIC_CHECK(sizeof(int) == sizeof(edge_name_or_index));  // NOLINT
+  STATIC_CHECK(sizeof(int) == sizeof(GetNodeId(edge->to())));  // NOLINT
+  int result = OS::SNPrintF(buffer, ",%d,%d,%d",
+      edge->type(), edge_name_or_index, GetNodeId(edge->to()));
+  USE(result);
+  ASSERT(result != -1);
+  writer_->AddString(buffer.start());
 }
 
 
 void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
-  writer_->AddCharacter('\n');
-  writer_->AddCharacter(',');
-  writer_->AddNumber(entry->type());
-  writer_->AddCharacter(',');
-  writer_->AddNumber(GetStringId(entry->name()));
-  writer_->AddCharacter(',');
-  writer_->AddNumber(entry->id());
-  writer_->AddCharacter(',');
-  writer_->AddNumber(entry->self_size());
-  writer_->AddCharacter(',');
-  writer_->AddNumber(entry->RetainedSize(false));
-  writer_->AddCharacter(',');
-  writer_->AddNumber(GetNodeId(entry->dominator()));
+  // The buffer needs space for 6 ints, 1 uint32_t, 7 commas, \n and \0
+  static const int kBufferSize =
+      6 * MaxDecimalDigitsIn<sizeof(int)>::kSigned  // NOLINT
+      + MaxDecimalDigitsIn<sizeof(uint32_t)>::kUnsigned  // NOLINT
+      + 7 + 1 + 1;
+  EmbeddedVector<char, kBufferSize> buffer;
   Vector<HeapGraphEdge> children = entry->children();
-  writer_->AddCharacter(',');
-  writer_->AddNumber(children.length());
+  STATIC_CHECK(sizeof(int) == sizeof(entry->type()));  // NOLINT
+  STATIC_CHECK(sizeof(int) == sizeof(GetStringId(entry->name())));  // NOLINT
+  STATIC_CHECK(sizeof(unsigned) == sizeof(entry->id()));  // NOLINT
+  STATIC_CHECK(sizeof(int) == sizeof(entry->self_size()));  // NOLINT
+  STATIC_CHECK(sizeof(int) == sizeof(entry->retained_size()));  // NOLINT
+  STATIC_CHECK(sizeof(int) == sizeof(GetNodeId(entry->dominator())));  // NOLINT
+  STATIC_CHECK(sizeof(int) == sizeof(children.length()));  // NOLINT
+  int result = OS::SNPrintF(buffer, "\n,%d,%d,%u,%d,%d,%d,%d",
+      entry->type(),
+      GetStringId(entry->name()),
+      entry->id(),
+      entry->self_size(),
+      entry->retained_size(),
+      GetNodeId(entry->dominator()),
+      children.length());
+  USE(result);
+  ASSERT(result != -1);
+  writer_->AddString(buffer.start());
   for (int i = 0; i < children.length(); ++i) {
     SerializeEdge(&children[i]);
     if (writer_->aborted()) return;
@@ -3210,7 +3582,8 @@
             "," JSON_S("closure")
             "," JSON_S("regexp")
             "," JSON_S("number")
-            "," JSON_S("native"))
+            "," JSON_S("native")
+            "," JSON_S("synthetic"))
         "," JSON_S("string")
         "," JSON_S("number")
         "," JSON_S("number")
@@ -3229,7 +3602,8 @@
                     "," JSON_S("property")
                     "," JSON_S("internal")
                     "," JSON_S("hidden")
-                    "," JSON_S("shortcut"))
+                    "," JSON_S("shortcut")
+                    "," JSON_S("weak"))
                 "," JSON_S("string_or_number")
                 "," JSON_S("node"))))));
 #undef JSON_S
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 0beb109..d9a1319 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,6 +35,8 @@
 namespace v8 {
 namespace internal {
 
+typedef uint32_t SnapshotObjectId;
+
 class TokenEnumerator {
  public:
   TokenEnumerator();
@@ -74,6 +76,8 @@
   inline const char* GetFunctionName(const char* name);
 
  private:
+  static const int kMaxNameSize = 1024;
+
   INLINE(static bool StringsMatch(void* key1, void* key2)) {
     return strcmp(reinterpret_cast<char*>(key1),
                   reinterpret_cast<char*>(key2)) == 0;
@@ -257,7 +261,7 @@
     typedef Address Key;
     typedef CodeEntryInfo Value;
     static const Key kNoKey;
-    static const Value kNoValue;
+    static const Value NoValue() { return CodeEntryInfo(NULL, 0); }
     static int Compare(const Key& a, const Key& b) {
       return a < b ? -1 : (a > b ? 1 : 0);
     }
@@ -453,7 +457,8 @@
     kProperty = v8::HeapGraphEdge::kProperty,
     kInternal = v8::HeapGraphEdge::kInternal,
     kHidden = v8::HeapGraphEdge::kHidden,
-    kShortcut = v8::HeapGraphEdge::kShortcut
+    kShortcut = v8::HeapGraphEdge::kShortcut,
+    kWeak = v8::HeapGraphEdge::kWeak
   };
 
   HeapGraphEdge() { }
@@ -463,7 +468,7 @@
 
   Type type() { return static_cast<Type>(type_); }
   int index() {
-    ASSERT(type_ == kElement || type_ == kHidden);
+    ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
     return index_;
   }
   const char* name() {
@@ -522,14 +527,15 @@
     kClosure = v8::HeapGraphNode::kClosure,
     kRegExp = v8::HeapGraphNode::kRegExp,
     kHeapNumber = v8::HeapGraphNode::kHeapNumber,
-    kNative = v8::HeapGraphNode::kNative
+    kNative = v8::HeapGraphNode::kNative,
+    kSynthetic = v8::HeapGraphNode::kSynthetic
   };
 
   HeapEntry() { }
   void Init(HeapSnapshot* snapshot,
             Type type,
             const char* name,
-            uint64_t id,
+            SnapshotObjectId id,
             int self_size,
             int children_count,
             int retainers_count);
@@ -537,7 +543,8 @@
   HeapSnapshot* snapshot() { return snapshot_; }
   Type type() { return static_cast<Type>(type_); }
   const char* name() { return name_; }
-  inline uint64_t id();
+  void set_name(const char* name) { name_ = name; }
+  inline SnapshotObjectId id() { return id_; }
   int self_size() { return self_size_; }
   int retained_size() { return retained_size_; }
   void add_retained_size(int size) { retained_size_ += size; }
@@ -550,23 +557,13 @@
   Vector<HeapGraphEdge*> retainers() {
     return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
   HeapEntry* dominator() { return dominator_; }
-  void set_dominator(HeapEntry* entry) { dominator_ = entry; }
-
-  void clear_paint() { painted_ = kUnpainted; }
-  bool painted_reachable() { return painted_ == kPainted; }
-  void paint_reachable() {
-    ASSERT(painted_ == kUnpainted);
-    painted_ = kPainted;
+  void set_dominator(HeapEntry* entry) {
+    ASSERT(entry != NULL);
+    dominator_ = entry;
   }
-  bool not_painted_reachable_from_others() {
-    return painted_ != kPaintedReachableFromOthers;
-  }
-  void paint_reachable_from_others() {
-    painted_ = kPaintedReachableFromOthers;
-  }
-  template<class Visitor>
-  void ApplyAndPaintAllReachable(Visitor* visitor);
-  void PaintAllReachable();
+  void clear_paint() { painted_ = false; }
+  bool painted() { return painted_; }
+  void paint() { painted_ = true; }
 
   void SetIndexedReference(HeapGraphEdge::Type type,
                            int child_index,
@@ -580,16 +577,18 @@
                          int retainer_index);
   void SetUnidirElementReference(int child_index, int index, HeapEntry* entry);
 
-  int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
-  int RetainedSize(bool exact);
+  size_t EntrySize() {
+    return EntriesSize(1, children_count_, retainers_count_);
+  }
 
-  void Print(int max_depth, int indent);
+  void Print(
+      const char* prefix, const char* edge_name, int max_depth, int indent);
 
   Handle<HeapObject> GetHeapObject();
 
-  static int EntriesSize(int entries_count,
-                         int children_count,
-                         int retainers_count);
+  static size_t EntriesSize(int entries_count,
+                            int children_count,
+                            int retainers_count);
 
  private:
   HeapGraphEdge* children_arr() {
@@ -598,33 +597,22 @@
   HeapGraphEdge** retainers_arr() {
     return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
   }
-  void CalculateExactRetainedSize();
   const char* TypeAsString();
 
-  unsigned painted_: 2;
+  unsigned painted_: 1;
   unsigned type_: 4;
-  int children_count_: 26;
+  int children_count_: 27;
   int retainers_count_;
   int self_size_;
   union {
     int ordered_index_;  // Used during dominator tree building.
     int retained_size_;  // At that moment, there is no retained size yet.
   };
+  SnapshotObjectId id_;
   HeapEntry* dominator_;
   HeapSnapshot* snapshot_;
-  struct Id {
-    uint32_t id1_;
-    uint32_t id2_;
-  } id_;  // This is to avoid extra padding of 64-bit value.
   const char* name_;
 
-  // Paints used for exact retained sizes calculation.
-  static const unsigned kUnpainted = 0;
-  static const unsigned kPainted = 1;
-  static const unsigned kPaintedReachableFromOthers = 2;
-
-  static const int kExactRetainedSizeTag = 1;
-
   DISALLOW_COPY_AND_ASSIGN(HeapEntry);
 };
 
@@ -656,22 +644,26 @@
   HeapEntry* root() { return root_entry_; }
   HeapEntry* gc_roots() { return gc_roots_entry_; }
   HeapEntry* natives_root() { return natives_root_entry_; }
+  HeapEntry* gc_subroot(int index) { return gc_subroot_entries_[index]; }
   List<HeapEntry*>* entries() { return &entries_; }
-  int raw_entries_size() { return raw_entries_size_; }
+  size_t raw_entries_size() { return raw_entries_size_; }
 
   void AllocateEntries(
       int entries_count, int children_count, int retainers_count);
   HeapEntry* AddEntry(HeapEntry::Type type,
                       const char* name,
-                      uint64_t id,
+                      SnapshotObjectId id,
                       int size,
                       int children_count,
                       int retainers_count);
   HeapEntry* AddRootEntry(int children_count);
   HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
+  HeapEntry* AddGcSubrootEntry(int tag,
+                               int children_count,
+                               int retainers_count);
   HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
   void ClearPaint();
-  HeapEntry* GetEntryById(uint64_t id);
+  HeapEntry* GetEntryById(SnapshotObjectId id);
   List<HeapEntry*>* GetSortedEntriesList();
   template<class Visitor>
   void IterateEntries(Visitor* visitor) { entries_.Iterate(visitor); }
@@ -690,10 +682,11 @@
   HeapEntry* root_entry_;
   HeapEntry* gc_roots_entry_;
   HeapEntry* natives_root_entry_;
+  HeapEntry* gc_subroot_entries_[VisitorSynchronization::kNumberOfSyncTags];
   char* raw_entries_;
   List<HeapEntry*> entries_;
   bool entries_sorted_;
-  int raw_entries_size_;
+  size_t raw_entries_size_;
 
   friend class HeapSnapshotTester;
 
@@ -707,26 +700,31 @@
   ~HeapObjectsMap();
 
   void SnapshotGenerationFinished();
-  uint64_t FindObject(Address addr);
+  SnapshotObjectId FindObject(Address addr);
   void MoveObject(Address from, Address to);
 
-  static uint64_t GenerateId(v8::RetainedObjectInfo* info);
+  static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
+  static inline SnapshotObjectId GetNthGcSubrootId(int delta);
 
-  static const uint64_t kInternalRootObjectId;
-  static const uint64_t kGcRootsObjectId;
-  static const uint64_t kNativesRootObjectId;
-  static const uint64_t kFirstAvailableObjectId;
+  static const int kObjectIdStep = 2;
+  static const SnapshotObjectId kInternalRootObjectId;
+  static const SnapshotObjectId kGcRootsObjectId;
+  static const SnapshotObjectId kNativesRootObjectId;
+  static const SnapshotObjectId kGcRootsFirstSubrootId;
+  static const SnapshotObjectId kFirstAvailableObjectId;
 
  private:
   struct EntryInfo {
-    explicit EntryInfo(uint64_t id) : id(id), accessed(true) { }
-    EntryInfo(uint64_t id, bool accessed) : id(id), accessed(accessed) { }
-    uint64_t id;
+    explicit EntryInfo(SnapshotObjectId id) : id(id), accessed(true) { }
+    EntryInfo(SnapshotObjectId id, bool accessed)
+      : id(id),
+        accessed(accessed) { }
+    SnapshotObjectId id;
     bool accessed;
   };
 
-  void AddEntry(Address addr, uint64_t id);
-  uint64_t FindEntry(Address addr);
+  void AddEntry(Address addr, SnapshotObjectId id);
+  SnapshotObjectId FindEntry(Address addr);
   void RemoveDeadEntries();
 
   static bool AddressesMatch(void* key1, void* key2) {
@@ -740,7 +738,7 @@
   }
 
   bool initial_fill_mode_;
-  uint64_t next_id_;
+  SnapshotObjectId next_id_;
   HashMap entries_map_;
   List<EntryInfo>* entries_;
 
@@ -765,8 +763,8 @@
   StringsStorage* names() { return &names_; }
   TokenEnumerator* token_enumerator() { return token_enumerator_; }
 
-  uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); }
-  Handle<HeapObject> FindHeapObjectById(uint64_t id);
+  SnapshotObjectId GetObjectId(Address addr) { return ids_.FindObject(addr); }
+  Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
   void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
 
  private:
@@ -819,7 +817,7 @@
   int total_children_count() { return total_children_count_; }
   int total_retainers_count() { return total_retainers_count_; }
 
-  static HeapEntry *const kHeapEntryPlaceholder;
+  static HeapEntry* const kHeapEntryPlaceholder;
 
  private:
   struct EntryInfo {
@@ -922,8 +920,9 @@
   virtual HeapEntry* AllocateEntry(
       HeapThing ptr, int children_count, int retainers_count);
   void AddRootEntries(SnapshotFillerInterface* filler);
-  int EstimateObjectsCount();
+  int EstimateObjectsCount(HeapIterator* iterator);
   bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+  bool IterateAndSetObjectNames(SnapshotFillerInterface* filler);
   void TagGlobalObjects();
 
   static String* GetConstructorName(JSObject* object);
@@ -948,6 +947,10 @@
                            HeapEntry* parent,
                            String* reference_name,
                            Object* child);
+  void SetNativeBindReference(HeapObject* parent_obj,
+                              HeapEntry* parent,
+                              const char* reference_name,
+                              Object* child);
   void SetElementReference(HeapObject* parent_obj,
                            HeapEntry* parent,
                            int index,
@@ -966,10 +969,16 @@
                           HeapEntry* parent,
                           int index,
                           Object* child);
+  void SetWeakReference(HeapObject* parent_obj,
+                        HeapEntry* parent_entry,
+                        int index,
+                        Object* child_obj,
+                        int field_offset);
   void SetPropertyReference(HeapObject* parent_obj,
                             HeapEntry* parent,
                             String* reference_name,
                             Object* child,
+                            const char* name_format_string = NULL,
                             int field_offset = -1);
   void SetPropertyShortcutReference(HeapObject* parent_obj,
                                     HeapEntry* parent,
@@ -977,11 +986,17 @@
                                     Object* child);
   void SetRootShortcutReference(Object* child);
   void SetRootGcRootsReference();
-  void SetGcRootsReference(Object* child);
+  void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
+  void SetGcSubrootReference(
+      VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
+  void SetObjectName(HeapObject* object);
   void TagObject(Object* obj, const char* tag);
 
   HeapEntry* GetEntry(Object* obj);
 
+  static inline HeapObject* GetNthGcSubrootObject(int delta);
+  static inline int GetGcSubrootOrder(HeapObject* subroot);
+
   Heap* heap_;
   HeapSnapshot* snapshot_;
   HeapSnapshotsCollection* collection_;
@@ -990,31 +1005,36 @@
   HeapObjectsSet objects_tags_;
 
   static HeapObject* const kGcRootsObject;
+  static HeapObject* const kFirstGcSubrootObject;
+  static HeapObject* const kLastGcSubrootObject;
 
   friend class IndexedReferencesExtractor;
+  friend class GcSubrootsEnumerator;
   friend class RootsReferencesExtractor;
 
   DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
 };
 
 
+class NativeGroupRetainedObjectInfo;
+
+
 // An implementation of retained native objects extractor.
-class NativeObjectsExplorer : public HeapEntriesAllocator {
+class NativeObjectsExplorer {
  public:
   NativeObjectsExplorer(HeapSnapshot* snapshot,
                       SnapshottingProgressReportingInterface* progress);
   virtual ~NativeObjectsExplorer();
-  virtual HeapEntry* AllocateEntry(
-      HeapThing ptr, int children_count, int retainers_count);
   void AddRootEntries(SnapshotFillerInterface* filler);
   int EstimateObjectsCount();
   bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
 
  private:
   void FillRetainedObjects();
+  void FillImplicitReferences();
   List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
   void SetNativeRootReference(v8::RetainedObjectInfo* info);
-  void SetRootNativesRootReference();
+  void SetRootNativeRootsReference();
   void SetWrapperNativeReferences(HeapObject* wrapper,
                                       v8::RetainedObjectInfo* info);
   void VisitSubtreeWrapper(Object** p, uint16_t class_id);
@@ -1028,6 +1048,12 @@
         (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
             reinterpret_cast<v8::RetainedObjectInfo*>(key2));
   }
+  INLINE(static bool StringsMatch(void* key1, void* key2)) {
+    return strcmp(reinterpret_cast<char*>(key1),
+                  reinterpret_cast<char*>(key2)) == 0;
+  }
+
+  NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
 
   HeapSnapshot* snapshot_;
   HeapSnapshotsCollection* collection_;
@@ -1036,6 +1062,9 @@
   HeapObjectsSet in_groups_;
   // RetainedObjectInfo* -> List<HeapObject*>*
   HashMap objects_by_info_;
+  HashMap native_groups_;
+  HeapEntriesAllocator* synthetic_entries_allocator_;
+  HeapEntriesAllocator* native_entries_allocator_;
   // Used during references extraction.
   SnapshotFillerInterface* filler_;
 
@@ -1054,9 +1083,9 @@
   bool GenerateSnapshot();
 
  private:
-  bool ApproximateRetainedSizes();
   bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
-                          Vector<HeapEntry*>* dominators);
+                          Vector<int>* dominators);
+  bool CalculateRetainedSizes();
   bool CountEntriesAndReferences();
   bool FillReferences();
   void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
diff --git a/src/property-details.h b/src/property-details.h
new file mode 100644
index 0000000..c79aa96
--- /dev/null
+++ b/src/property-details.h
@@ -0,0 +1,132 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PROPERTY_DETAILS_H_
+#define V8_PROPERTY_DETAILS_H_
+
+#include "../include/v8.h"
+#include "allocation.h"
+#include "utils.h"
+
+// Ecma-262 3rd 8.6.1
+enum PropertyAttributes {
+  NONE              = v8::None,
+  READ_ONLY         = v8::ReadOnly,
+  DONT_ENUM         = v8::DontEnum,
+  DONT_DELETE       = v8::DontDelete,
+  ABSENT            = 16  // Used in runtime to indicate a property is absent.
+  // ABSENT can never be stored in or returned from a descriptor's attributes
+  // bitfield.  It is only used as a return value meaning the attributes of
+  // a non-existent property.
+};
+
+
+namespace v8 {
+namespace internal {
+
+class Smi;
+
+// Type of properties.
+// Order of properties is significant.
+// Must fit in the BitField PropertyDetails::TypeField.
+// A copy of this is in mirror-debugger.js.
+enum PropertyType {
+  NORMAL                    = 0,  // only in slow mode
+  FIELD                     = 1,  // only in fast mode
+  CONSTANT_FUNCTION         = 2,  // only in fast mode
+  CALLBACKS                 = 3,
+  HANDLER                   = 4,  // only in lookup results, not in descriptors
+  INTERCEPTOR               = 5,  // only in lookup results, not in descriptors
+  // All properties before MAP_TRANSITION are real.
+  MAP_TRANSITION            = 6,  // only in fast mode
+  ELEMENTS_TRANSITION       = 7,
+  CONSTANT_TRANSITION       = 8,  // only in fast mode
+  NULL_DESCRIPTOR           = 9,  // only in fast mode
+  // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
+  // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
+  // nonexistent properties.
+  NONEXISTENT = NULL_DESCRIPTOR
+};
+
+
+// PropertyDetails captures type and attributes for a property.
+// They are used both in property dictionaries and instance descriptors.
+class PropertyDetails BASE_EMBEDDED {
+ public:
+  PropertyDetails(PropertyAttributes attributes,
+                  PropertyType type,
+                  int index = 0) {
+    ASSERT(TypeField::is_valid(type));
+    ASSERT(AttributesField::is_valid(attributes));
+    ASSERT(StorageField::is_valid(index));
+
+    value_ = TypeField::encode(type)
+        | AttributesField::encode(attributes)
+        | StorageField::encode(index);
+
+    ASSERT(type == this->type());
+    ASSERT(attributes == this->attributes());
+    ASSERT(index == this->index());
+  }
+
+  // Conversion for storing details as Object*.
+  explicit inline PropertyDetails(Smi* smi);
+  inline Smi* AsSmi();
+
+  PropertyType type() { return TypeField::decode(value_); }
+
+  PropertyAttributes attributes() { return AttributesField::decode(value_); }
+
+  int index() { return StorageField::decode(value_); }
+
+  inline PropertyDetails AsDeleted();
+
+  static bool IsValidIndex(int index) {
+    return StorageField::is_valid(index);
+  }
+
+  bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
+  bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
+  bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
+  bool IsDeleted() { return DeletedField::decode(value_) != 0;}
+
+  // Bit fields in value_ (type, shift, size). Must be public so the
+  // constants can be embedded in generated code.
+  class TypeField:       public BitField<PropertyType,       0, 4> {};
+  class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
+  class DeletedField:    public BitField<uint32_t,           7, 1> {};
+  class StorageField:    public BitField<uint32_t,           8, 32-8> {};
+
+  static const int kInitialIndex = 1;
+
+ private:
+  uint32_t value_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_PROPERTY_DETAILS_H_
diff --git a/src/property.cc b/src/property.cc
index 7cc2df5..78f237d 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,6 +31,15 @@
 namespace internal {
 
 
+void LookupResult::Iterate(ObjectVisitor* visitor) {
+  LookupResult* current = this;  // Could be NULL.
+  while (current != NULL) {
+    visitor->VisitPointer(BitCast<Object**>(&current->holder_));
+    current = current->next_;
+  }
+}
+
+
 #ifdef OBJECT_PRINT
 void LookupResult::Print(FILE* out) {
   if (!IsFound()) {
@@ -82,6 +91,9 @@
       break;
     case CONSTANT_TRANSITION:
       PrintF(out, " -type = constant property transition\n");
+      PrintF(out, " -map:\n");
+      GetTransitionMap()->Print(out);
+      PrintF(out, "\n");
       break;
     case NULL_DESCRIPTOR:
       PrintF(out, " =type = null descriptor\n");
@@ -102,4 +114,28 @@
 #endif
 
 
+bool Descriptor::ContainsTransition() {
+  switch (details_.type()) {
+    case MAP_TRANSITION:
+    case CONSTANT_TRANSITION:
+    case ELEMENTS_TRANSITION:
+      return true;
+    case CALLBACKS: {
+      if (!value_->IsAccessorPair()) return false;
+      AccessorPair* accessors = AccessorPair::cast(value_);
+      return accessors->getter()->IsMap() || accessors->setter()->IsMap();
+    }
+    case NORMAL:
+    case FIELD:
+    case CONSTANT_FUNCTION:
+    case HANDLER:
+    case INTERCEPTOR:
+    case NULL_DESCRIPTOR:
+      return false;
+  }
+  UNREACHABLE();  // Keep the compiler happy.
+  return false;
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/property.h b/src/property.h
index e7d9fc5..04f78b2 100644
--- a/src/property.h
+++ b/src/property.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,11 +49,8 @@
 
   MUST_USE_RESULT MaybeObject* KeyToSymbol() {
     if (!StringShape(key_).IsSymbol()) {
-      Object* result;
-      { MaybeObject* maybe_result = HEAP->LookupSymbol(key_);
-        if (!maybe_result->ToObject(&result)) return maybe_result;
-      }
-      key_ = String::cast(result);
+      MaybeObject* maybe_result = HEAP->LookupSymbol(key_);
+      if (!maybe_result->To(&key_)) return maybe_result;
     }
     return key_;
   }
@@ -71,6 +68,8 @@
     details_ = PropertyDetails(details_.attributes(), details_.type(), index);
   }
 
+  bool ContainsTransition();
+
  private:
   String* key_;
   Object* value_;
@@ -115,11 +114,9 @@
 class ElementsTransitionDescriptor: public Descriptor {
  public:
   ElementsTransitionDescriptor(String* key,
-                               Map* map,
-                               ElementsKind elements_kind)
-      : Descriptor(key, map, PropertyDetails(NONE,
-                                             ELEMENTS_TRANSITION,
-                                             elements_kind)) { }
+                               Object* map_or_array)
+      : Descriptor(key, map_or_array, PropertyDetails(NONE,
+                                                      ELEMENTS_TRANSITION)) { }
 };
 
 // Marks a field name in a map so that adding the field is guaranteed
@@ -164,12 +161,51 @@
 };
 
 
+template <class T>
+bool IsPropertyDescriptor(T* desc) {
+  switch (desc->type()) {
+    case NORMAL:
+    case FIELD:
+    case CONSTANT_FUNCTION:
+    case HANDLER:
+    case INTERCEPTOR:
+      return true;
+    case CALLBACKS: {
+      Object* callback_object = desc->GetCallbackObject();
+      // Non-JavaScript (i.e. native) accessors are always a property, otherwise
+      // either the getter or the setter must be an accessor. Put another way:
+      // If we only see map transitions and holes in a pair, this is not a
+      // property.
+      return (!callback_object->IsAccessorPair() ||
+              AccessorPair::cast(callback_object)->ContainsAccessor());
+    }
+    case MAP_TRANSITION:
+    case ELEMENTS_TRANSITION:
+    case CONSTANT_TRANSITION:
+    case NULL_DESCRIPTOR:
+      return false;
+  }
+  UNREACHABLE();  // keep the compiler happy
+  return false;
+}
+
+
 class LookupResult BASE_EMBEDDED {
  public:
-  LookupResult()
-      : lookup_type_(NOT_FOUND),
+  explicit LookupResult(Isolate* isolate)
+      : isolate_(isolate),
+        next_(isolate->top_lookup_result()),
+        lookup_type_(NOT_FOUND),
+        holder_(NULL),
         cacheable_(true),
-        details_(NONE, NORMAL) {}
+        details_(NONE, NORMAL) {
+    isolate->SetTopLookupResult(this);
+  }
+
+  ~LookupResult() {
+    ASSERT(isolate_->top_lookup_result() == this);
+    isolate_->SetTopLookupResult(next_);
+  }
 
   void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
     lookup_type_ = DESCRIPTOR_TYPE;
@@ -202,9 +238,9 @@
     number_ = entry;
   }
 
-  void HandlerResult() {
+  void HandlerResult(JSProxy* proxy) {
     lookup_type_ = HANDLER_TYPE;
-    holder_ = NULL;
+    holder_ = proxy;
     details_ = PropertyDetails(NONE, HANDLER);
     cacheable_ = false;
   }
@@ -217,11 +253,17 @@
 
   void NotFound() {
     lookup_type_ = NOT_FOUND;
+    holder_ = NULL;
   }
 
   JSObject* holder() {
     ASSERT(IsFound());
-    return holder_;
+    return JSObject::cast(holder_);
+  }
+
+  JSProxy* proxy() {
+    ASSERT(IsFound());
+    return JSProxy::cast(holder_);
   }
 
   PropertyType type() {
@@ -245,15 +287,9 @@
   bool IsFound() { return lookup_type_ != NOT_FOUND; }
   bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
 
-  // Is the result is a property excluding transitions and the null
-  // descriptor?
+  // Is the result is a property excluding transitions and the null descriptor?
   bool IsProperty() {
-    return IsFound() && (type() < FIRST_PHANTOM_PROPERTY_TYPE);
-  }
-
-  // Is the result a property or a transition?
-  bool IsPropertyOrTransition() {
-    return IsFound() && (type() != NULL_DESCRIPTOR);
+    return IsFound() && IsPropertyDescriptor(this);
   }
 
   bool IsCacheable() { return cacheable_; }
@@ -278,10 +314,12 @@
     }
   }
 
+
   Map* GetTransitionMap() {
     ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
-    ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION ||
-           type() == ELEMENTS_TRANSITION);
+    ASSERT(type() == MAP_TRANSITION ||
+           type() == ELEMENTS_TRANSITION ||
+           type() == CONSTANT_TRANSITION);
     return Map::cast(GetValue());
   }
 
@@ -343,7 +381,12 @@
     return holder()->GetNormalizedProperty(this);
   }
 
+  void Iterate(ObjectVisitor* visitor);
+
  private:
+  Isolate* isolate_;
+  LookupResult* next_;
+
   // Where did we find the result;
   enum {
     NOT_FOUND,
@@ -354,7 +397,7 @@
     CONSTANT_TYPE
   } lookup_type_;
 
-  JSObject* holder_;
+  JSReceiver* holder_;
   int number_;
   bool cacheable_;
   PropertyDetails details_;
diff --git a/src/proxy.js b/src/proxy.js
index 4e44cd4..4e86c88 100644
--- a/src/proxy.js
+++ b/src/proxy.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+"use strict";
+
 global.Proxy = new $Object();
 
 var $Proxy = global.Proxy
@@ -32,7 +34,10 @@
 $Proxy.create = function(handler, proto) {
   if (!IS_SPEC_OBJECT(handler))
     throw MakeTypeError("handler_non_object", ["create"])
-  if (!IS_SPEC_OBJECT(proto)) proto = null  // Mozilla does this...
+  if (IS_UNDEFINED(proto))
+    proto = null
+  else if (!(IS_SPEC_OBJECT(proto) || proto === null))
+    throw MakeTypeError("proto_non_object", ["create"])
   return %CreateJSProxy(handler, proto)
 }
 
@@ -42,8 +47,14 @@
   if (!IS_SPEC_FUNCTION(callTrap))
     throw MakeTypeError("trap_function_expected", ["createFunction", "call"])
   if (IS_UNDEFINED(constructTrap)) {
-    constructTrap = callTrap
-  } else if (!IS_SPEC_FUNCTION(constructTrap)) {
+    constructTrap = DerivedConstructTrap(callTrap)
+  } else if (IS_SPEC_FUNCTION(constructTrap)) {
+    // Make sure the trap receives 'undefined' as this.
+    var construct = constructTrap
+    constructTrap = function() {
+      return %Apply(construct, void 0, arguments, 0, %_ArgumentsLength());
+    }
+  } else {
     throw MakeTypeError("trap_function_expected",
                         ["createFunction", "construct"])
   }
@@ -57,6 +68,17 @@
 // Builtins
 ////////////////////////////////////////////////////////////////////////////////
 
+function DerivedConstructTrap(callTrap) {
+  return function() {
+    var proto = this.prototype
+    if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
+    var obj = new $Object()
+    obj.__proto__ = proto
+    var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength());
+    return IS_SPEC_OBJECT(result) ? result : obj
+  }
+}
+
 function DelegateCallAndConstruct(callTrap, constructTrap) {
   return function() {
     return %Apply(%_IsConstructCall() ? constructTrap : callTrap,
@@ -136,9 +158,32 @@
   var enumerableNames = []
   for (var i = 0, count = 0; i < names.length; ++i) {
     var name = names[i]
-    if (this.getOwnPropertyDescriptor(TO_STRING_INLINE(name)).enumerable) {
+    var desc = this.getOwnPropertyDescriptor(TO_STRING_INLINE(name))
+    if (!IS_UNDEFINED(desc) && desc.enumerable) {
       enumerableNames[count++] = names[i]
     }
   }
   return enumerableNames
 }
+
+function DerivedEnumerateTrap() {
+  var names = this.getPropertyNames()
+  var enumerableNames = []
+  for (var i = 0, count = 0; i < names.length; ++i) {
+    var name = names[i]
+    var desc = this.getPropertyDescriptor(TO_STRING_INLINE(name))
+    if (!IS_UNDEFINED(desc) && desc.enumerable) {
+      enumerableNames[count++] = names[i]
+    }
+  }
+  return enumerableNames
+}
+
+function ProxyEnumerate(proxy) {
+  var handler = %GetHandler(proxy)
+  if (IS_UNDEFINED(handler.enumerate)) {
+    return %Apply(DerivedEnumerateTrap, handler, [], 0, 0)
+  } else {
+    return ToStringArray(handler.enumerate(), "enumerate")
+  }
+}
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index b32d71d..f843278 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -37,8 +37,8 @@
     RegExpMacroAssembler* assembler) :
   assembler_(assembler) {
   unsigned int type = assembler->Implementation();
-  ASSERT(type < 4);
-  const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"};
+  ASSERT(type < 5);
+  const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
   PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
 }
 
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index f91ea93..b6fb3c5 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -81,7 +81,7 @@
   if (subject->IsAsciiRepresentation()) {
     const byte* address;
     if (StringShape(subject).IsExternal()) {
-      const char* data = ExternalAsciiString::cast(subject)->resource()->data();
+      const char* data = ExternalAsciiString::cast(subject)->GetChars();
       address = reinterpret_cast<const byte*>(data);
     } else {
       ASSERT(subject->IsSeqAsciiString());
@@ -92,7 +92,7 @@
   }
   const uc16* data;
   if (StringShape(subject).IsExternal()) {
-    data = ExternalTwoByteString::cast(subject)->resource()->data();
+    data = ExternalTwoByteString::cast(subject)->GetChars();
   } else {
     ASSERT(subject->IsSeqTwoByteString());
     data = SeqTwoByteString::cast(subject)->GetChars();
@@ -133,7 +133,7 @@
     subject_ptr = slice->parent();
     slice_offset = slice->offset();
   }
-  // Ensure that an underlying string has the same ascii-ness.
+  // Ensure that an underlying string has the same ASCII-ness.
   bool is_ascii = subject_ptr->IsAsciiRepresentation();
   ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
   // String is now either Sequential or External
diff --git a/src/regexp.js b/src/regexp.js
index 38d4496..eb617ea 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,7 @@
 // Expect $Object = global.Object;
 // Expect $Array = global.Array;
 
-const $RegExp = global.RegExp;
+var $RegExp = global.RegExp;
 
 // A recursive descent parser for Patterns according to the grammar of
 // ECMA-262 15.10.1, with deviations noted below.
@@ -95,12 +95,11 @@
   }
 }
 
-
 // Deprecated RegExp.prototype.compile method.  We behave like the constructor
 // were called again.  In SpiderMonkey, this method returns the regexp object.
 // In JSC, it returns undefined.  For compatibility with JSC, we match their
 // behavior.
-function CompileRegExp(pattern, flags) {
+function RegExpCompile(pattern, flags) {
   // Both JSC and SpiderMonkey treat a missing pattern argument as the
   // empty subject string, and an actual undefined value passed as the
   // pattern as the string 'undefined'.  Note that JSC is inconsistent
@@ -108,6 +107,11 @@
   // RegExp.prototype.compile and in the constructor, where they are
   // the empty string.  For compatibility with JSC, we match their
   // behavior.
+  if (this == $RegExp.prototype) {
+    // We don't allow recompiling RegExp.prototype.
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['RegExp.prototype.compile', this]);
+  }
   if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
     DoConstructRegExp(this, 'undefined', flags);
   } else {
@@ -170,13 +174,6 @@
                         ['RegExp.prototype.exec', this]);
   }
 
-  if (%_ArgumentsLength() === 0) {
-    var regExpInput = LAST_INPUT(lastMatchInfo);
-    if (IS_UNDEFINED(regExpInput)) {
-      throw MakeError('no_input_to_regexp', [this]);
-    }
-    string = regExpInput;
-  }
   string = TO_STRING_INLINE(string);
   var lastIndex = this.lastIndex;
 
@@ -225,14 +222,6 @@
     throw MakeTypeError('incompatible_method_receiver',
                         ['RegExp.prototype.test', this]);
   }
-  if (%_ArgumentsLength() == 0) {
-    var regExpInput = LAST_INPUT(lastMatchInfo);
-    if (IS_UNDEFINED(regExpInput)) {
-      throw MakeError('no_input_to_regexp', [this]);
-    }
-    string = regExpInput;
-  }
-
   string = TO_STRING_INLINE(string);
 
   var lastIndex = this.lastIndex;
@@ -261,29 +250,32 @@
     // Remove irrelevant preceeding '.*' in a non-global test regexp.
     // The expression checks whether this.source starts with '.*' and
     // that the third char is not a '?'.
-    if (%_StringCharCodeAt(this.source, 0) == 46 &&  // '.'
-        %_StringCharCodeAt(this.source, 1) == 42 &&  // '*'
-        %_StringCharCodeAt(this.source, 2) != 63) {  // '?'
-      if (!%_ObjectEquals(regexp_key, this)) {
-        regexp_key = this;
-        regexp_val = new $RegExp(SubString(this.source, 2, this.source.length),
-                                 (!this.ignoreCase
-                                  ? !this.multiline ? "" : "m"
-                                  : !this.multiline ? "i" : "im"));
-      }
-      if (%_RegExpExec(regexp_val, string, 0, lastMatchInfo) === null) {
-        return false;
-      }
+    var regexp = this;
+    if (%_StringCharCodeAt(regexp.source, 0) == 46 &&  // '.'
+        %_StringCharCodeAt(regexp.source, 1) == 42 &&  // '*'
+        %_StringCharCodeAt(regexp.source, 2) != 63) {  // '?'
+      regexp = TrimRegExp(regexp);
     }
-    %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
+    %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
     // matchIndices is either null or the lastMatchInfo array.
-    var matchIndices = %_RegExpExec(this, string, 0, lastMatchInfo);
+    var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
     if (matchIndices === null) return false;
     lastMatchInfoOverride = null;
     return true;
   }
 }
 
+function TrimRegExp(regexp) {
+  if (!%_ObjectEquals(regexp_key, regexp)) {
+    regexp_key = regexp;
+    regexp_val =
+      new $RegExp(SubString(regexp.source, 2, regexp.source.length),
+                  (regexp.ignoreCase ? regexp.multiline ? "im" : "i"
+                                     : regexp.multiline ? "m" : ""));
+  }
+  return regexp_val;
+}
+
 
 function RegExpToString() {
   // If this.source is an empty string, output /(?:)/.
@@ -358,7 +350,8 @@
   } else {
     var override = lastMatchInfoOverride;
     subject = override[override.length - 1];
-    start_index = override[override.length - 2] + subject.length;
+    var pattern = override[override.length - 3];
+    start_index = override[override.length - 2] + pattern.length;
   }
   return SubString(subject, start_index, subject.length);
 }
@@ -408,7 +401,6 @@
 function SetUpRegExp() {
   %CheckIsBootstrapping();
   %FunctionSetInstanceClassName($RegExp, 'RegExp');
-  %FunctionSetPrototype($RegExp, new $Object());
   %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
   %SetCode($RegExp, RegExpConstructor);
 
@@ -416,7 +408,7 @@
     "exec", RegExpExec,
     "test", RegExpTest,
     "toString", RegExpToString,
-    "compile", CompileRegExp
+    "compile", RegExpCompile
   ));
 
   // The length of compile is 1 in SpiderMonkey.
@@ -425,63 +417,67 @@
   // The properties input, $input, and $_ are aliases for each other.  When this
   // value is set the value it is set to is coerced to a string.
   // Getter and setter for the input.
-  function RegExpGetInput() {
+  var RegExpGetInput = function() {
     var regExpInput = LAST_INPUT(lastMatchInfo);
     return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
-  }
-  function RegExpSetInput(string) {
+  };
+  var RegExpSetInput = function(string) {
     LAST_INPUT(lastMatchInfo) = ToString(string);
   };
 
-  %DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE);
-  %DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE);
-  %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, 'input', RegExpGetInput,
+                                    RegExpSetInput, DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, '$_', RegExpGetInput,
+                                    RegExpSetInput, DONT_ENUM | DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, '$input', RegExpGetInput,
+                                    RegExpSetInput, DONT_ENUM | DONT_DELETE);
 
   // The properties multiline and $* are aliases for each other.  When this
   // value is set in SpiderMonkey, the value it is set to is coerced to a
   // boolean.  We mimic that behavior with a slight difference: in SpiderMonkey
   // the value of the expression 'RegExp.multiline = null' (for instance) is the
-  // boolean false (ie, the value after coercion), while in V8 it is the value
-  // null (ie, the value before coercion).
+  // boolean false (i.e., the value after coercion), while in V8 it is the value
+  // null (i.e., the value before coercion).
 
   // Getter and setter for multiline.
   var multiline = false;
-  function RegExpGetMultiline() { return multiline; };
-  function RegExpSetMultiline(flag) { multiline = flag ? true : false; };
+  var RegExpGetMultiline = function() { return multiline; };
+  var RegExpSetMultiline = function(flag) { multiline = flag ? true : false; };
 
-  %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline, DONT_DELETE);
-  %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline, DONT_DELETE);
-  %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline, DONT_ENUM | DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, 'multiline', RegExpGetMultiline,
+                                    RegExpSetMultiline, DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, '$*', RegExpGetMultiline,
+                                    RegExpSetMultiline,
+                                    DONT_ENUM | DONT_DELETE);
 
 
-  function NoOpSetter(ignored) {}
+  var NoOpSetter = function(ignored) {};
 
 
   // Static properties set by a successful match.
-  %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch, DONT_DELETE);
-  %DefineAccessor($RegExp, 'lastMatch', SETTER, NoOpSetter, DONT_DELETE);
-  %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$&', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen, DONT_DELETE);
-  %DefineAccessor($RegExp, 'lastParen', SETTER, NoOpSetter, DONT_DELETE);
-  %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$+', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext, DONT_DELETE);
-  %DefineAccessor($RegExp, 'leftContext', SETTER, NoOpSetter, DONT_DELETE);
-  %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$`', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext, DONT_DELETE);
-  %DefineAccessor($RegExp, 'rightContext', SETTER, NoOpSetter, DONT_DELETE);
-  %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, "$'", SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, 'lastMatch', RegExpGetLastMatch,
+                                    NoOpSetter, DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, '$&', RegExpGetLastMatch,
+                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, 'lastParen', RegExpGetLastParen,
+                                    NoOpSetter, DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, '$+', RegExpGetLastParen,
+                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, 'leftContext',
+                                    RegExpGetLeftContext, NoOpSetter,
+                                    DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, '$`', RegExpGetLeftContext,
+                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, 'rightContext',
+                                    RegExpGetRightContext, NoOpSetter,
+                                    DONT_DELETE);
+  %DefineOrRedefineAccessorProperty($RegExp, "$'", RegExpGetRightContext,
+                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
 
   for (var i = 1; i < 10; ++i) {
-    %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i), DONT_DELETE);
-    %DefineAccessor($RegExp, '$' + i, SETTER, NoOpSetter, DONT_DELETE);
+    %DefineOrRedefineAccessorProperty($RegExp, '$' + i,
+                                      RegExpMakeCaptureGetter(i), NoOpSetter,
+                                      DONT_DELETE);
   }
 }
 
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 3d4c2dc..55f93ee 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,12 +42,18 @@
       : result_(result),
         result_assigned_(false),
         is_set_(false),
-        in_try_(false) {
-  }
+        in_try_(false),
+        factory_(isolate()) { }
+
+  virtual ~Processor() { }
 
   void Process(ZoneList<Statement*>* statements);
   bool result_assigned() const { return result_assigned_; }
 
+  AstNodeFactory<AstNullVisitor>* factory() {
+    return &factory_;
+  }
+
  private:
   Variable* result_;
 
@@ -64,15 +70,13 @@
   bool is_set_;
   bool in_try_;
 
+  AstNodeFactory<AstNullVisitor> factory_;
+
   Expression* SetResult(Expression* value) {
     result_assigned_ = true;
-    Zone* zone = isolate()->zone();
-    VariableProxy* result_proxy = new(zone) VariableProxy(isolate(), result_);
-    return new(zone) Assignment(isolate(),
-                                Token::ASSIGN,
-                                result_proxy,
-                                value,
-                                RelocInfo::kNoPosition);
+    VariableProxy* result_proxy = factory()->NewVariableProxy(result_);
+    return factory()->NewAssignment(
+        Token::ASSIGN, result_proxy, value, RelocInfo::kNoPosition);
   }
 
   // Node visitors.
@@ -205,7 +209,15 @@
 
 
 // Do nothing:
-void Processor::VisitDeclaration(Declaration* node) {}
+void Processor::VisitVariableDeclaration(VariableDeclaration* node) {}
+void Processor::VisitFunctionDeclaration(FunctionDeclaration* node) {}
+void Processor::VisitModuleDeclaration(ModuleDeclaration* node) {}
+void Processor::VisitImportDeclaration(ImportDeclaration* node) {}
+void Processor::VisitExportDeclaration(ExportDeclaration* node) {}
+void Processor::VisitModuleLiteral(ModuleLiteral* node) {}
+void Processor::VisitModuleVariable(ModuleVariable* node) {}
+void Processor::VisitModulePath(ModulePath* node) {}
+void Processor::VisitModuleUrl(ModuleUrl* node) {}
 void Processor::VisitEmptyStatement(EmptyStatement* node) {}
 void Processor::VisitReturnStatement(ReturnStatement* node) {}
 void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
@@ -236,10 +248,21 @@
     if (processor.HasStackOverflow()) return false;
 
     if (processor.result_assigned()) {
-      Isolate* isolate = info->isolate();
-      Zone* zone = isolate->zone();
-      VariableProxy* result_proxy = new(zone) VariableProxy(isolate, result);
-      body->Add(new(zone) ReturnStatement(result_proxy));
+      ASSERT(function->end_position() != RelocInfo::kNoPosition);
+      // Set the position of the assignment statement one character past the
+      // source code, such that it definitely is not in the source code range
+      // of an immediate inner scope. For example in
+      //   eval('with ({x:1}) x = 1');
+      // the end position of the function generated for executing the eval code
+      // coincides with the end of the with scope which is the position of '1'.
+      int position = function->end_position();
+      VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
+          result->name(), false, position);
+      result_proxy->BindTo(result);
+      Statement* result_statement =
+          processor.factory()->NewReturnStatement(result_proxy);
+      result_statement->set_statement_pos(position);
+      body->Add(result_statement);
     }
   }
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 26d8846..6ed4ff4 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,6 +35,7 @@
 #include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
+#include "isolate-inl.h"
 #include "mark-compact.h"
 #include "platform.h"
 #include "scopeinfo.h"
@@ -45,6 +46,8 @@
 
 // Optimization sampler constants.
 static const int kSamplerFrameCount = 2;
+
+// Constants for statistical profiler.
 static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
 
 static const int kSamplerTicksBetweenThresholdAdjustment = 32;
@@ -57,14 +60,24 @@
 
 static const int kSizeLimit = 1500;
 
+// Constants for counter based profiler.
+
+// Number of times a function has to be seen on the stack before it is
+// optimized.
+static const int kProfilerTicksBeforeOptimization = 2;
+
+// Maximum size in bytes of generated code for a function to be optimized
+// the very first time it is seen on the stack.
+static const int kMaxSizeEarlyOpt = 500;
+
 
 Atomic32 RuntimeProfiler::state_ = 0;
-// TODO(isolates): Create the semaphore lazily and clean it up when no
-// longer required.
-Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
+
+// TODO(isolates): Clean up the semaphore when it is no longer required.
+static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
 
 #ifdef DEBUG
-bool RuntimeProfiler::has_been_globally_setup_ = false;
+bool RuntimeProfiler::has_been_globally_set_up_ = false;
 #endif
 bool RuntimeProfiler::enabled_ = false;
 
@@ -81,21 +94,45 @@
 
 
 void RuntimeProfiler::GlobalSetup() {
-  ASSERT(!has_been_globally_setup_);
+  ASSERT(!has_been_globally_set_up_);
   enabled_ = V8::UseCrankshaft() && FLAG_opt;
 #ifdef DEBUG
-  has_been_globally_setup_ = true;
+  has_been_globally_set_up_ = true;
 #endif
 }
 
 
-void RuntimeProfiler::Optimize(JSFunction* function) {
+static void GetICCounts(JSFunction* function,
+                        int* ic_with_typeinfo_count,
+                        int* ic_total_count,
+                        int* percentage) {
+  *ic_total_count = 0;
+  *ic_with_typeinfo_count = 0;
+  Object* raw_info =
+      function->shared()->code()->type_feedback_info();
+  if (raw_info->IsTypeFeedbackInfo()) {
+    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
+    *ic_with_typeinfo_count = info->ic_with_typeinfo_count();
+    *ic_total_count = info->ic_total_count();
+  }
+  *percentage = *ic_total_count > 0
+      ? 100 * *ic_with_typeinfo_count / *ic_total_count
+      : 100;
+}
+
+
+void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
   ASSERT(function->IsOptimizable());
   if (FLAG_trace_opt) {
     PrintF("[marking ");
     function->PrintName();
     PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
-    PrintF(" for recompilation");
+    PrintF(" for recompilation, reason: %s", reason);
+    if (FLAG_type_info_threshold > 0) {
+      int typeinfo, total, percentage;
+      GetICCounts(function, &typeinfo, &total, &percentage);
+      PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
+    }
     PrintF("]\n");
   }
 
@@ -134,15 +171,26 @@
 
   // Get the stack check stub code object to match against.  We aren't
   // prepared to generate it, but we don't expect to have to.
-  StackCheckStub check_stub;
-  Object* check_code;
-  MaybeObject* maybe_check_code = check_stub.TryGetCode();
-  if (maybe_check_code->ToObject(&check_code)) {
+  bool found_code = false;
+  Code* stack_check_code = NULL;
+#if defined(V8_TARGET_ARCH_IA32) || \
+    defined(V8_TARGET_ARCH_ARM) || \
+    defined(V8_TARGET_ARCH_MIPS)
+  if (FLAG_count_based_interrupts) {
+    InterruptStub interrupt_stub;
+    found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
+  } else  // NOLINT
+#endif
+  {  // NOLINT
+    StackCheckStub check_stub;
+    found_code = check_stub.FindCodeInCache(&stack_check_code);
+  }
+  if (found_code) {
     Code* replacement_code =
         isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
     Code* unoptimized_code = shared->code();
     Deoptimizer::PatchStackCheckCode(unoptimized_code,
-                                     Code::cast(check_code),
+                                     stack_check_code,
                                      replacement_code);
   }
 }
@@ -186,28 +234,33 @@
   JSFunction* samples[kSamplerFrameCount];
   int sample_count = 0;
   int frame_count = 0;
+  int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
+                                                 : kSamplerFrameCount;
   for (JavaScriptFrameIterator it(isolate_);
-       frame_count++ < kSamplerFrameCount && !it.done();
+       frame_count++ < frame_count_limit && !it.done();
        it.Advance()) {
     JavaScriptFrame* frame = it.frame();
     JSFunction* function = JSFunction::cast(frame->function());
 
-    // Adjust threshold each time we have processed
-    // a certain number of ticks.
-    if (sampler_ticks_until_threshold_adjustment_ > 0) {
-      sampler_ticks_until_threshold_adjustment_--;
-      if (sampler_ticks_until_threshold_adjustment_ <= 0) {
-        // If the threshold is not already at the minimum
-        // modify and reset the ticks until next adjustment.
-        if (sampler_threshold_ > kSamplerThresholdMin) {
-          sampler_threshold_ -= kSamplerThresholdDelta;
-          sampler_ticks_until_threshold_adjustment_ =
-              kSamplerTicksBetweenThresholdAdjustment;
+    if (!FLAG_watch_ic_patching) {
+      // Adjust threshold each time we have processed
+      // a certain number of ticks.
+      if (sampler_ticks_until_threshold_adjustment_ > 0) {
+        sampler_ticks_until_threshold_adjustment_--;
+        if (sampler_ticks_until_threshold_adjustment_ <= 0) {
+          // If the threshold is not already at the minimum
+          // modify and reset the ticks until next adjustment.
+          if (sampler_threshold_ > kSamplerThresholdMin) {
+            sampler_threshold_ -= kSamplerThresholdDelta;
+            sampler_ticks_until_threshold_adjustment_ =
+                kSamplerTicksBetweenThresholdAdjustment;
+          }
         }
       }
     }
 
-    if (function->IsMarkedForLazyRecompilation()) {
+    if (function->IsMarkedForLazyRecompilation() &&
+        function->shared()->code()->kind() == Code::FUNCTION) {
       Code* unoptimized = function->shared()->code();
       int nesting = unoptimized->allow_osr_at_loop_nesting_level();
       if (nesting == 0) AttemptOnStackReplacement(function);
@@ -217,37 +270,101 @@
 
     // Do not record non-optimizable functions.
     if (!function->IsOptimizable()) continue;
-    samples[sample_count++] = function;
+    if (function->shared()->optimization_disabled()) continue;
 
-    int function_size = function->shared()->SourceSize();
-    int threshold_size_factor = (function_size > kSizeLimit)
-        ? sampler_threshold_size_factor_
-        : 1;
+    // Only record top-level code on top of the execution stack and
+    // avoid optimizing excessively large scripts since top-level code
+    // will be executed only once.
+    const int kMaxToplevelSourceSize = 10 * 1024;
+    if (function->shared()->is_toplevel()
+        && (frame_count > 1
+            || function->shared()->SourceSize() > kMaxToplevelSourceSize)) {
+      continue;
+    }
 
-    int threshold = sampler_threshold_ * threshold_size_factor;
+    if (FLAG_watch_ic_patching) {
+      int ticks = function->shared()->profiler_ticks();
 
-    if (LookupSample(function) >= threshold) {
-      Optimize(function);
+      if (ticks >= kProfilerTicksBeforeOptimization) {
+        int typeinfo, total, percentage;
+        GetICCounts(function, &typeinfo, &total, &percentage);
+        if (percentage >= FLAG_type_info_threshold) {
+          // If this particular function hasn't had any ICs patched for enough
+          // ticks, optimize it now.
+          Optimize(function, "hot and stable");
+        } else if (ticks >= 100) {
+          // If this function does not have enough type info, but has
+          // seen a huge number of ticks, optimize it as it is.
+          Optimize(function, "not much type info but very hot");
+        } else {
+          function->shared()->set_profiler_ticks(ticks + 1);
+          if (FLAG_trace_opt_verbose) {
+            PrintF("[not yet optimizing ");
+            function->PrintName();
+            PrintF(", not enough type info: %d/%d (%d%%)]\n",
+                   typeinfo, total, percentage);
+          }
+        }
+      } else if (!any_ic_changed_ &&
+          function->shared()->code()->instruction_size() < kMaxSizeEarlyOpt) {
+        // If no IC was patched since the last tick and this function is very
+        // small, optimistically optimize it now.
+        Optimize(function, "small function");
+      } else if (!code_generated_ &&
+          !any_ic_changed_ &&
+          total_code_generated_ > 0 &&
+          total_code_generated_ < 2000) {
+        // If no code was generated and no IC was patched since the last tick,
+        // but a little code has already been generated since last Reset(),
+        // then type info might already be stable and we can optimize now.
+        Optimize(function, "stable on startup");
+      } else {
+        function->shared()->set_profiler_ticks(ticks + 1);
+      }
+    } else {  // !FLAG_watch_ic_patching
+      samples[sample_count++] = function;
+
+      int function_size = function->shared()->SourceSize();
+      int threshold_size_factor = (function_size > kSizeLimit)
+          ? sampler_threshold_size_factor_
+          : 1;
+
+      int threshold = sampler_threshold_ * threshold_size_factor;
+
+      if (LookupSample(function) >= threshold) {
+        Optimize(function, "sampler window lookup");
+      }
     }
   }
-
-  // Add the collected functions as samples. It's important not to do
-  // this as part of collecting them because this will interfere with
-  // the sample lookup in case of recursive functions.
-  for (int i = 0; i < sample_count; i++) {
-    AddSample(samples[i], kSamplerFrameWeight[i]);
+  if (FLAG_watch_ic_patching) {
+    any_ic_changed_ = false;
+    code_generated_ = false;
+  } else {  // !FLAG_watch_ic_patching
+    // Add the collected functions as samples. It's important not to do
+    // this as part of collecting them because this will interfere with
+    // the sample lookup in case of recursive functions.
+    for (int i = 0; i < sample_count; i++) {
+      AddSample(samples[i], kSamplerFrameWeight[i]);
+    }
   }
 }
 
 
 void RuntimeProfiler::NotifyTick() {
+#if defined(V8_TARGET_ARCH_IA32) || \
+    defined(V8_TARGET_ARCH_ARM) || \
+    defined(V8_TARGET_ARCH_MIPS)
+  if (FLAG_count_based_interrupts) return;
+#endif
   isolate_->stack_guard()->RequestRuntimeProfilerTick();
 }
 
 
-void RuntimeProfiler::Setup() {
-  ASSERT(has_been_globally_setup_);
-  ClearSampleBuffer();
+void RuntimeProfiler::SetUp() {
+  ASSERT(has_been_globally_set_up_);
+  if (!FLAG_watch_ic_patching) {
+    ClearSampleBuffer();
+  }
   // If the ticker hasn't already started, make sure to do so to get
   // the ticks for the runtime profiler.
   if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
@@ -255,10 +372,14 @@
 
 
 void RuntimeProfiler::Reset() {
-  sampler_threshold_ = kSamplerThresholdInit;
-  sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
-  sampler_ticks_until_threshold_adjustment_ =
-      kSamplerTicksBetweenThresholdAdjustment;
+  if (FLAG_watch_ic_patching) {
+    total_code_generated_ = 0;
+  } else {  // !FLAG_watch_ic_patching
+    sampler_threshold_ = kSamplerThresholdInit;
+    sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
+    sampler_ticks_until_threshold_adjustment_ =
+        kSamplerTicksBetweenThresholdAdjustment;
+  }
 }
 
 
@@ -295,7 +416,7 @@
   // undid the decrement done by the profiler thread. Increment again
   // to get the right count of active isolates.
   NoBarrier_AtomicIncrement(&state_, 1);
-  semaphore_->Signal();
+  semaphore.Pointer()->Signal();
 }
 
 
@@ -308,7 +429,7 @@
   Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
   ASSERT(old_state >= -1);
   if (old_state != 0) return false;
-  semaphore_->Wait();
+  semaphore.Pointer()->Wait();
   return true;
 }
 
@@ -324,7 +445,7 @@
   if (new_state == 0) {
     // The profiler thread is waiting. Wake it up. It must check for
     // stop conditions before attempting to wait again.
-    semaphore_->Signal();
+    semaphore.Pointer()->Signal();
   }
   thread->Join();
   // The profiler thread is now stopped. Undo the increment in case it
@@ -338,7 +459,8 @@
 void RuntimeProfiler::RemoveDeadSamples() {
   for (int i = 0; i < kSamplerWindowSize; i++) {
     Object* function = sampler_window_[i];
-    if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
+    if (function != NULL &&
+        !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
       sampler_window_[i] = NULL;
     }
   }
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 15c2097..e338849 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -46,7 +46,7 @@
   static void GlobalSetup();
 
   static inline bool IsEnabled() {
-    ASSERT(has_been_globally_setup_);
+    ASSERT(has_been_globally_set_up_);
     return enabled_;
   }
 
@@ -54,13 +54,22 @@
 
   void NotifyTick();
 
-  void Setup();
+  void SetUp();
   void Reset();
   void TearDown();
 
   Object** SamplerWindowAddress();
   int SamplerWindowSize();
 
+  void NotifyICChanged() { any_ic_changed_ = true; }
+
+  void NotifyCodeGenerated(int generated_code_size) {
+    if (FLAG_watch_ic_patching) {
+      code_generated_ = true;
+      total_code_generated_ += generated_code_size;
+    }
+  }
+
   // Rate limiting support.
 
   // VM thread interface.
@@ -92,14 +101,14 @@
   void RemoveDeadSamples();
   void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
 
+  void AttemptOnStackReplacement(JSFunction* function);
+
  private:
   static const int kSamplerWindowSize = 16;
 
   static void HandleWakeUp(Isolate* isolate);
 
-  void Optimize(JSFunction* function);
-
-  void AttemptOnStackReplacement(JSFunction* function);
+  void Optimize(JSFunction* function, const char* reason);
 
   void ClearSampleBuffer();
 
@@ -119,14 +128,17 @@
   int sampler_window_position_;
   int sampler_window_weight_[kSamplerWindowSize];
 
+  bool any_ic_changed_;
+  bool code_generated_;
+  int total_code_generated_;
+
   // Possible state values:
   //   -1            => the profiler thread is waiting on the semaphore
   //   0 or positive => the number of isolates running JavaScript code.
   static Atomic32 state_;
-  static Semaphore* semaphore_;
 
 #ifdef DEBUG
-  static bool has_been_globally_setup_;
+  static bool has_been_globally_set_up_;
 #endif
   static bool enabled_;
 };
diff --git a/src/runtime.cc b/src/runtime.cc
index b1c4c10..320ab59 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -40,8 +40,10 @@
 #include "dateparser-inl.h"
 #include "debug.h"
 #include "deoptimizer.h"
+#include "date.h"
 #include "execution.h"
 #include "global-handles.h"
+#include "isolate-inl.h"
 #include "jsregexp.h"
 #include "json-parser.h"
 #include "liveedit.h"
@@ -68,20 +70,20 @@
 // Cast the given object to a value of the specified type and store
 // it in a variable with the given name.  If the object is not of the
 // expected type call IllegalOperation and return.
-#define CONVERT_CHECKED(Type, name, obj)                             \
-  RUNTIME_ASSERT(obj->Is##Type());                                   \
-  Type* name = Type::cast(obj);
-
 #define CONVERT_ARG_CHECKED(Type, name, index)                       \
   RUNTIME_ASSERT(args[index]->Is##Type());                           \
+  Type* name = Type::cast(args[index]);
+
+#define CONVERT_ARG_HANDLE_CHECKED(Type, name, index)                \
+  RUNTIME_ASSERT(args[index]->Is##Type());                           \
   Handle<Type> name = args.at<Type>(index);
 
 // Cast the given object to a boolean and store it in a variable with
 // the given name.  If the object is not a boolean call IllegalOperation
 // and return.
-#define CONVERT_BOOLEAN_CHECKED(name, obj)                            \
-  RUNTIME_ASSERT(obj->IsBoolean());                                   \
-  bool name = (obj)->IsTrue();
+#define CONVERT_BOOLEAN_ARG_CHECKED(name, index)                     \
+  RUNTIME_ASSERT(args[index]->IsBoolean());                          \
+  bool name = args[index]->IsTrue();
 
 // Cast the given argument to a Smi and store its value in an int variable
 // with the given name.  If the argument is not a Smi call IllegalOperation
@@ -105,6 +107,35 @@
   type name = NumberTo##Type(obj);
 
 
+// Cast the given argument to PropertyDetails and store its value in a
+// variable with the given name.  If the argument is not a Smi call
+// IllegalOperation and return.
+#define CONVERT_PROPERTY_DETAILS_CHECKED(name, index)                \
+  RUNTIME_ASSERT(args[index]->IsSmi());                              \
+  PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
+
+
+// Assert that the given argument has a valid value for a StrictModeFlag
+// and store it in a StrictModeFlag variable with the given name.
+#define CONVERT_STRICT_MODE_ARG_CHECKED(name, index)                 \
+  RUNTIME_ASSERT(args[index]->IsSmi());                              \
+  RUNTIME_ASSERT(args.smi_at(index) == kStrictMode ||                \
+                 args.smi_at(index) == kNonStrictMode);              \
+  StrictModeFlag name =                                              \
+      static_cast<StrictModeFlag>(args.smi_at(index));
+
+
+// Assert that the given argument has a valid value for a LanguageMode
+// and store it in a LanguageMode variable with the given name.
+#define CONVERT_LANGUAGE_MODE_ARG(name, index)                       \
+  ASSERT(args[index]->IsSmi());                                      \
+  ASSERT(args.smi_at(index) == CLASSIC_MODE ||                       \
+         args.smi_at(index) == STRICT_MODE ||                        \
+         args.smi_at(index) == EXTENDED_MODE);                       \
+  LanguageMode name =                                                \
+      static_cast<LanguageMode>(args.smi_at(index));
+
+
 MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
                                                    JSObject* boilerplate) {
   StackLimitCheck check(isolate);
@@ -143,7 +174,7 @@
     }
   } else {
     { MaybeObject* maybe_result =
-          heap->AllocateFixedArray(copy->NumberOfLocalProperties(NONE));
+          heap->AllocateFixedArray(copy->NumberOfLocalProperties());
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     FixedArray* names = FixedArray::cast(result);
@@ -177,6 +208,7 @@
   // Pixel elements cannot be created using an object literal.
   ASSERT(!copy->HasExternalArrayElements());
   switch (copy->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(copy->elements());
       if (elements->map() == heap->fixed_cow_array_map()) {
@@ -189,6 +221,9 @@
       } else {
         for (int i = 0; i < elements->length(); i++) {
           Object* value = elements->get(i);
+          ASSERT(value->IsSmi() ||
+                 value->IsTheHole() ||
+                 (copy->GetElementsKind() == FAST_ELEMENTS));
           if (value->IsJSObject()) {
             JSObject* js_object = JSObject::cast(value);
             { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
@@ -240,18 +275,6 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneLiteralBoilerplate) {
-  CONVERT_CHECKED(JSObject, boilerplate, args[0]);
-  return DeepCopyBoilerplate(isolate, boilerplate);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneShallowLiteralBoilerplate) {
-  CONVERT_CHECKED(JSObject, boilerplate, args[0]);
-  return isolate->heap()->CopyJSObject(boilerplate);
-}
-
-
 static Handle<Map> ComputeObjectLiteralMap(
     Handle<Context> context,
     Handle<FixedArray> constant_properties,
@@ -259,45 +282,43 @@
   Isolate* isolate = context->GetIsolate();
   int properties_length = constant_properties->length();
   int number_of_properties = properties_length / 2;
-  if (FLAG_canonicalize_object_literal_maps) {
-    // Check that there are only symbols and array indices among keys.
-    int number_of_symbol_keys = 0;
-    for (int p = 0; p != properties_length; p += 2) {
-      Object* key = constant_properties->get(p);
-      uint32_t element_index = 0;
-      if (key->IsSymbol()) {
-        number_of_symbol_keys++;
-      } else if (key->ToArrayIndex(&element_index)) {
-        // An index key does not require space in the property backing store.
-        number_of_properties--;
-      } else {
-        // Bail out as a non-symbol non-index key makes caching impossible.
-        // ASSERT to make sure that the if condition after the loop is false.
-        ASSERT(number_of_symbol_keys != number_of_properties);
-        break;
-      }
+  // Check that there are only symbols and array indices among keys.
+  int number_of_symbol_keys = 0;
+  for (int p = 0; p != properties_length; p += 2) {
+    Object* key = constant_properties->get(p);
+    uint32_t element_index = 0;
+    if (key->IsSymbol()) {
+      number_of_symbol_keys++;
+    } else if (key->ToArrayIndex(&element_index)) {
+      // An index key does not require space in the property backing store.
+      number_of_properties--;
+    } else {
+      // Bail out as a non-symbol non-index key makes caching impossible.
+      // ASSERT to make sure that the if condition after the loop is false.
+      ASSERT(number_of_symbol_keys != number_of_properties);
+      break;
     }
-    // If we only have symbols and array indices among keys then we can
-    // use the map cache in the global context.
-    const int kMaxKeys = 10;
-    if ((number_of_symbol_keys == number_of_properties) &&
-        (number_of_symbol_keys < kMaxKeys)) {
-      // Create the fixed array with the key.
-      Handle<FixedArray> keys =
-          isolate->factory()->NewFixedArray(number_of_symbol_keys);
-      if (number_of_symbol_keys > 0) {
-        int index = 0;
-        for (int p = 0; p < properties_length; p += 2) {
-          Object* key = constant_properties->get(p);
-          if (key->IsSymbol()) {
-            keys->set(index++, key);
-          }
+  }
+  // If we only have symbols and array indices among keys then we can
+  // use the map cache in the global context.
+  const int kMaxKeys = 10;
+  if ((number_of_symbol_keys == number_of_properties) &&
+      (number_of_symbol_keys < kMaxKeys)) {
+    // Create the fixed array with the key.
+    Handle<FixedArray> keys =
+        isolate->factory()->NewFixedArray(number_of_symbol_keys);
+    if (number_of_symbol_keys > 0) {
+      int index = 0;
+      for (int p = 0; p < properties_length; p += 2) {
+        Object* key = constant_properties->get(p);
+        if (key->IsSymbol()) {
+          keys->set(index++, key);
         }
-        ASSERT(index == number_of_symbol_keys);
       }
-      *is_result_from_cache = true;
-      return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
+      ASSERT(index == number_of_symbol_keys);
     }
+    *is_result_from_cache = true;
+    return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
   }
   *is_result_from_cache = false;
   return isolate->factory()->CopyMap(
@@ -341,7 +362,7 @@
   Handle<JSObject> boilerplate = isolate->factory()->NewJSObjectFromMap(map);
 
   // Normalize the elements of the boilerplate to save space if needed.
-  if (!should_have_fast_elements) NormalizeElements(boilerplate);
+  if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
 
   // Add the constant properties to the boilerplate.
   int length = constant_properties->length();
@@ -351,7 +372,8 @@
     // Normalize the properties of object to avoid n^2 behavior
     // when extending the object multiple properties. Indicate the number of
     // properties to be added.
-    NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
+    JSObject::NormalizeProperties(
+        boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
   }
 
   for (int index = 0; index < length; index +=2) {
@@ -369,22 +391,18 @@
     if (key->IsSymbol()) {
       if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
         // Array index as string (uint32).
-        result = SetOwnElement(boilerplate,
-                               element_index,
-                               value,
-                               kNonStrictMode);
+        result = JSObject::SetOwnElement(
+            boilerplate, element_index, value, kNonStrictMode);
       } else {
         Handle<String> name(String::cast(*key));
         ASSERT(!name->AsArrayIndex(&element_index));
-        result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
-                                                  value, NONE);
+        result = JSObject::SetLocalPropertyIgnoreAttributes(
+            boilerplate, name, value, NONE);
       }
     } else if (key->ToArrayIndex(&element_index)) {
       // Array index (uint32).
-      result = SetOwnElement(boilerplate,
-                             element_index,
-                             value,
-                             kNonStrictMode);
+      result = JSObject::SetOwnElement(
+          boilerplate, element_index, value, kNonStrictMode);
     } else {
       // Non-uint32 number.
       ASSERT(key->IsNumber());
@@ -394,8 +412,8 @@
       const char* str = DoubleToCString(num, buffer);
       Handle<String> name =
           isolate->factory()->NewStringFromAscii(CStrVector(str));
-      result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
-                                                value, NONE);
+      result = JSObject::SetLocalPropertyIgnoreAttributes(
+          boilerplate, name, value, NONE);
     }
     // If setting the property on the boilerplate throws an
     // exception, the exception is converted to an empty handle in
@@ -409,52 +427,110 @@
   // computed properties have been assigned so that we can generate
   // constant function properties.
   if (should_transform && !has_function_literal) {
-    TransformToFastProperties(boilerplate,
-                              boilerplate->map()->unused_property_fields());
+    JSObject::TransformToFastProperties(
+        boilerplate, boilerplate->map()->unused_property_fields());
   }
 
   return boilerplate;
 }
 
 
-static Handle<Object> CreateArrayLiteralBoilerplate(
+MaybeObject* TransitionElements(Handle<Object> object,
+                                ElementsKind to_kind,
+                                Isolate* isolate) {
+  HandleScope scope(isolate);
+  if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
+  ElementsKind from_kind =
+      Handle<JSObject>::cast(object)->map()->elements_kind();
+  if (Map::IsValidElementsTransition(from_kind, to_kind)) {
+    Handle<Object> result = JSObject::TransitionElementsKind(
+        Handle<JSObject>::cast(object), to_kind);
+    if (result.is_null()) return isolate->ThrowIllegalOperation();
+    return *result;
+  }
+  return isolate->ThrowIllegalOperation();
+}
+
+
+static const int kSmiOnlyLiteralMinimumLength = 1024;
+
+
+Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
     Isolate* isolate,
     Handle<FixedArray> literals,
     Handle<FixedArray> elements) {
   // Create the JSArray.
   Handle<JSFunction> constructor(
       JSFunction::GlobalContextFromLiterals(*literals)->array_function());
-  Handle<Object> object = isolate->factory()->NewJSObject(constructor);
+  Handle<JSArray> object =
+      Handle<JSArray>::cast(isolate->factory()->NewJSObject(constructor));
 
-  const bool is_cow =
-      (elements->map() == isolate->heap()->fixed_cow_array_map());
-  Handle<FixedArray> copied_elements =
-      is_cow ? elements : isolate->factory()->CopyFixedArray(elements);
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(elements->get(1)));
 
-  Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
-  if (is_cow) {
-#ifdef DEBUG
-    // Copy-on-write arrays must be shallow (and simple).
-    for (int i = 0; i < content->length(); i++) {
-      ASSERT(!content->get(i)->IsFixedArray());
-    }
-#endif
+  Context* global_context = isolate->context()->global_context();
+  if (constant_elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    object->set_map(Map::cast(global_context->smi_js_array_map()));
+  } else if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
+    object->set_map(Map::cast(global_context->double_js_array_map()));
   } else {
-    for (int i = 0; i < content->length(); i++) {
-      if (content->get(i)->IsFixedArray()) {
-        // The value contains the constant_properties of a
-        // simple object or array literal.
-        Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
-        Handle<Object> result =
-            CreateLiteralBoilerplate(isolate, literals, fa);
-        if (result.is_null()) return result;
-        content->set(i, *result);
+    object->set_map(Map::cast(global_context->object_js_array_map()));
+  }
+
+  Handle<FixedArrayBase> copied_elements_values;
+  if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
+    ASSERT(FLAG_smi_only_arrays);
+    copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
+        Handle<FixedDoubleArray>::cast(constant_elements_values));
+  } else {
+    ASSERT(constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           constant_elements_kind == FAST_ELEMENTS);
+    const bool is_cow =
+        (constant_elements_values->map() ==
+         isolate->heap()->fixed_cow_array_map());
+    if (is_cow) {
+      copied_elements_values = constant_elements_values;
+#if DEBUG
+      Handle<FixedArray> fixed_array_values =
+          Handle<FixedArray>::cast(copied_elements_values);
+      for (int i = 0; i < fixed_array_values->length(); i++) {
+        ASSERT(!fixed_array_values->get(i)->IsFixedArray());
+      }
+#endif
+    } else {
+      Handle<FixedArray> fixed_array_values =
+          Handle<FixedArray>::cast(constant_elements_values);
+      Handle<FixedArray> fixed_array_values_copy =
+          isolate->factory()->CopyFixedArray(fixed_array_values);
+      copied_elements_values = fixed_array_values_copy;
+      for (int i = 0; i < fixed_array_values->length(); i++) {
+        Object* current = fixed_array_values->get(i);
+        if (current->IsFixedArray()) {
+          // The value contains the constant_properties of a
+          // simple object or array literal.
+          Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i)));
+          Handle<Object> result =
+              CreateLiteralBoilerplate(isolate, literals, fa);
+          if (result.is_null()) return result;
+          fixed_array_values_copy->set(i, *result);
+        }
       }
     }
   }
+  object->set_elements(*copied_elements_values);
+  object->set_length(Smi::FromInt(copied_elements_values->length()));
 
-  // Set the elements.
-  Handle<JSArray>::cast(object)->SetContent(*content);
+  //  Ensure that the boilerplate object has FAST_ELEMENTS, unless the flag is
+  //  on or the object is larger than the threshold.
+  if (!FLAG_smi_only_arrays &&
+      constant_elements_values->length() < kSmiOnlyLiteralMinimumLength) {
+    if (object->GetElementsKind() != FAST_ELEMENTS) {
+      CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure());
+    }
+  }
+
   return object;
 }
 
@@ -479,7 +555,8 @@
                                             false,
                                             kHasNoFunctionLiteral);
     case CompileTimeValue::ARRAY_LITERAL:
-      return CreateArrayLiteralBoilerplate(isolate, literals, elements);
+      return Runtime::CreateArrayLiteralBoilerplate(
+          isolate, literals, elements);
     default:
       UNREACHABLE();
       return Handle<Object>::null();
@@ -487,34 +564,12 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralBoilerplate) {
-  // Takes a FixedArray of elements containing the literal elements of
-  // the array literal and produces JSArray with those elements.
-  // Additionally takes the literals array of the surrounding function
-  // which contains the context from which to get the Array function
-  // to use for creating the array literal.
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
-  CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
-
-  Handle<Object> object =
-      CreateArrayLiteralBoilerplate(isolate, literals, elements);
-  if (object.is_null()) return Failure::Exception();
-
-  // Update the functions literal and return the boilerplate.
-  literals->set(literals_index, *object);
-  return *object;
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
-  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
   CONVERT_SMI_ARG_CHECKED(flags, 3);
   bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
   bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
@@ -538,9 +593,9 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
-  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
   CONVERT_SMI_ARG_CHECKED(flags, 3);
   bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
   bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
@@ -564,14 +619,15 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
 
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> boilerplate(literals->get(literals_index), isolate);
   if (*boilerplate == isolate->heap()->undefined_value()) {
-    boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
+    boilerplate =
+        Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
     if (boilerplate.is_null()) return Failure::Exception();
     // Update the functions literal and return the boilerplate.
     literals->set(literals_index, *boilerplate);
@@ -583,14 +639,16 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
 
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> boilerplate(literals->get(literals_index), isolate);
   if (*boilerplate == isolate->heap()->undefined_value()) {
-    boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
+    ASSERT(*elements != isolate->heap()->empty_fixed_array());
+    boilerplate =
+        Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
     if (boilerplate.is_null()) return Failure::Exception();
     // Update the functions literal and return the boilerplate.
     literals->set(literals_index, *boilerplate);
@@ -642,37 +700,113 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSProxy, proxy, args[0]);
+  CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
   return proxy->handler();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSFunctionProxy, proxy, args[0]);
+  CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
   return proxy->call_trap();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSFunctionProxy, proxy, args[0]);
+  CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
   return proxy->construct_trap();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSProxy, proxy, args[0]);
+  CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
   proxy->Fix();
   return isolate->heap()->undefined_value();
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+  Handle<ObjectHashSet> table = isolate->factory()->NewObjectHashSet(0);
+  holder->set_table(*table);
+  return *holder;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+  Handle<Object> key(args[1]);
+  Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+  table = ObjectHashSetAdd(table, key);
+  holder->set_table(*table);
+  return isolate->heap()->undefined_symbol();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+  Handle<Object> key(args[1]);
+  Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+  return isolate->heap()->ToBoolean(table->Contains(*key));
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+  Handle<Object> key(args[1]);
+  Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+  table = ObjectHashSetRemove(table, key);
+  holder->set_table(*table);
+  return isolate->heap()->undefined_symbol();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+  Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
+  holder->set_table(*table);
+  return *holder;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+  Handle<Object> key(args[1]);
+  return ObjectHashTable::cast(holder->table())->Lookup(*key);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+  Handle<Object> key(args[1]);
+  Handle<Object> value(args[2]);
+  Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+  Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
+  holder->set_table(*new_table);
+  return *value;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
   ASSERT(weakmap->map()->inobject_properties() == 0);
   Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
   weakmap->set_table(*table);
@@ -684,22 +818,19 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
-  // TODO(mstarzinger): Currently we cannot use JSProxy objects as keys
-  // because they cannot be cast to JSObject to get an identity hash code.
-  CONVERT_ARG_CHECKED(JSObject, key, 1);
-  return weakmap->table()->Lookup(*key);
+  CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
+  return ObjectHashTable::cast(weakmap->table())->Lookup(*key);
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
-  // TODO(mstarzinger): See Runtime_WeakMapGet above.
-  CONVERT_ARG_CHECKED(JSObject, key, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
   Handle<Object> value(args[2]);
-  Handle<ObjectHashTable> table(weakmap->table());
+  Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
   Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
   weakmap->set_table(*new_table);
   return *value;
@@ -718,7 +849,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSReceiver, input_obj, args[0]);
+  CONVERT_ARG_CHECKED(JSReceiver, input_obj, 0);
   Object* obj = input_obj;
   // We don't expect access checks to be needed on JSProxy objects.
   ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
@@ -752,57 +883,6 @@
 }
 
 
-// Inserts an object as the hidden prototype of another object.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenPrototype) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 2);
-  CONVERT_CHECKED(JSObject, jsobject, args[0]);
-  CONVERT_CHECKED(JSObject, proto, args[1]);
-
-  // Sanity checks.  The old prototype (that we are replacing) could
-  // theoretically be null, but if it is not null then check that we
-  // didn't already install a hidden prototype here.
-  RUNTIME_ASSERT(!jsobject->GetPrototype()->IsHeapObject() ||
-    !HeapObject::cast(jsobject->GetPrototype())->map()->is_hidden_prototype());
-  RUNTIME_ASSERT(!proto->map()->is_hidden_prototype());
-
-  // Allocate up front before we start altering state in case we get a GC.
-  Object* map_or_failure;
-  { MaybeObject* maybe_map_or_failure = proto->map()->CopyDropTransitions();
-    if (!maybe_map_or_failure->ToObject(&map_or_failure)) {
-      return maybe_map_or_failure;
-    }
-  }
-  Map* new_proto_map = Map::cast(map_or_failure);
-
-  { MaybeObject* maybe_map_or_failure = jsobject->map()->CopyDropTransitions();
-    if (!maybe_map_or_failure->ToObject(&map_or_failure)) {
-      return maybe_map_or_failure;
-    }
-  }
-  Map* new_map = Map::cast(map_or_failure);
-
-  // Set proto's prototype to be the old prototype of the object.
-  new_proto_map->set_prototype(jsobject->GetPrototype());
-  proto->set_map(new_proto_map);
-  new_proto_map->set_is_hidden_prototype();
-
-  // Set the object's prototype to proto.
-  new_map->set_prototype(proto);
-  jsobject->set_map(new_map);
-
-  return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConstructCall) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 0);
-  JavaScriptFrameIterator it(isolate);
-  return isolate->heap()->ToBoolean(it.frame()->IsConstructor());
-}
-
-
 // Recursively traverses hidden prototypes if property is not found
 static void GetOwnPropertyImplementation(JSObject* obj,
                                          String* name,
@@ -916,23 +996,14 @@
   DESCRIPTOR_SIZE
 };
 
-// Returns an array with the property description:
-//  if args[1] is not a property on args[0]
-//          returns undefined
-//  if args[1] is a data property on args[0]
-//         [false, value, Writeable, Enumerable, Configurable]
-//  if args[1] is an accessor on args[0]
-//         [true, GetFunction, SetFunction, Enumerable, Configurable]
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
-  ASSERT(args.length() == 2);
+
+static MaybeObject* GetOwnProperty(Isolate* isolate,
+                                   Handle<JSObject> obj,
+                                   Handle<String> name) {
   Heap* heap = isolate->heap();
-  HandleScope scope(isolate);
   Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
   Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
-  LookupResult result;
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
-  CONVERT_ARG_CHECKED(String, name, 1);
-
+  LookupResult result(isolate);
   // This could be an element.
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
@@ -952,7 +1023,7 @@
         elms->set(IS_ACCESSOR_INDEX, heap->false_value());
         elms->set(VALUE_INDEX, *substr);
         elms->set(WRITABLE_INDEX, heap->false_value());
-        elms->set(ENUMERABLE_INDEX,  heap->false_value());
+        elms->set(ENUMERABLE_INDEX,  heap->true_value());
         elms->set(CONFIGURABLE_INDEX, heap->false_value());
         return *desc;
       }
@@ -960,7 +1031,7 @@
       case JSObject::INTERCEPTED_ELEMENT:
       case JSObject::FAST_ELEMENT: {
         elms->set(IS_ACCESSOR_INDEX, heap->false_value());
-        Handle<Object> value = GetElement(obj, index);
+        Handle<Object> value = Object::GetElement(obj, index);
         RETURN_IF_EMPTY_HANDLE(isolate, value);
         elms->set(VALUE_INDEX, *value);
         elms->set(WRITABLE_INDEX, heap->true_value());
@@ -990,21 +1061,21 @@
         switch (details.type()) {
           case CALLBACKS: {
             // This is an accessor property with getter and/or setter.
-            FixedArray* callbacks =
-                FixedArray::cast(dictionary->ValueAt(entry));
+            AccessorPair* accessors =
+                AccessorPair::cast(dictionary->ValueAt(entry));
             elms->set(IS_ACCESSOR_INDEX, heap->true_value());
             if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
-              elms->set(GETTER_INDEX, callbacks->get(0));
+              elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
             }
             if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
-              elms->set(SETTER_INDEX, callbacks->get(1));
+              elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
             }
             break;
           }
           case NORMAL: {
             // This is a data property.
             elms->set(IS_ACCESSOR_INDEX, heap->false_value());
-            Handle<Object> value = GetElement(obj, index);
+            Handle<Object> value = Object::GetElement(obj, index);
             ASSERT(!value.is_null());
             elms->set(VALUE_INDEX, *value);
             elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
@@ -1036,18 +1107,18 @@
   elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
 
   bool is_js_accessor = (result.type() == CALLBACKS) &&
-                        (result.GetCallbackObject()->IsFixedArray());
+                        (result.GetCallbackObject()->IsAccessorPair());
 
   if (is_js_accessor) {
     // __defineGetter__/__defineSetter__ callback.
     elms->set(IS_ACCESSOR_INDEX, heap->true_value());
 
-    FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
+    AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
     if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
-      elms->set(GETTER_INDEX, structure->get(0));
+      elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
     }
     if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
-      elms->set(SETTER_INDEX, structure->get(1));
+      elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
     }
   } else {
     elms->set(IS_ACCESSOR_INDEX, heap->false_value());
@@ -1066,16 +1137,32 @@
 }
 
 
+// Returns an array with the property description:
+//  if args[1] is not a property on args[0]
+//          returns undefined
+//  if args[1] is a data property on args[0]
+//         [false, value, Writeable, Enumerable, Configurable]
+//  if args[1] is an accessor on args[0]
+//         [true, GetFunction, SetFunction, Enumerable, Configurable]
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
+  ASSERT(args.length() == 2);
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+  return GetOwnProperty(isolate, obj, name);
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSObject, obj, args[0]);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
   return obj->PreventExtensions();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSObject, obj, args[0]);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
   if (obj->IsJSGlobalProxy()) {
     Object* proto = obj->GetPrototype();
     if (proto->IsNull()) return isolate->heap()->false_value();
@@ -1089,9 +1176,9 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(JSRegExp, re, 0);
-  CONVERT_ARG_CHECKED(String, pattern, 1);
-  CONVERT_ARG_CHECKED(String, flags, 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
   Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
   if (result.is_null()) return Failure::Exception();
   return *result;
@@ -1101,7 +1188,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FunctionTemplateInfo, data, 0);
   return *isolate->factory()->CreateApiFunction(data);
 }
 
@@ -1116,9 +1203,8 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
   ASSERT(args.length() == 2);
-  CONVERT_CHECKED(HeapObject, templ, args[0]);
-  CONVERT_CHECKED(Smi, field, args[1]);
-  int index = field->value();
+  CONVERT_ARG_CHECKED(HeapObject, templ, 0);
+  CONVERT_SMI_ARG_CHECKED(index, 1)
   int offset = index * kPointerSize + HeapObject::kHeaderSize;
   InstanceType type = templ->map()->instance_type();
   RUNTIME_ASSERT(type ==  FUNCTION_TEMPLATE_INFO_TYPE ||
@@ -1135,7 +1221,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(HeapObject, object, args[0]);
+  CONVERT_ARG_CHECKED(HeapObject, object, 0);
   Map* old_map = object->map();
   bool needs_access_checks = old_map->is_access_check_needed();
   if (needs_access_checks) {
@@ -1154,7 +1240,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(HeapObject, object, args[0]);
+  CONVERT_ARG_CHECKED(HeapObject, object, 0);
   Map* old_map = object->map();
   if (!old_map->is_access_check_needed()) {
     // Copy map so it won't interfere constructor's initial map.
@@ -1190,7 +1276,7 @@
       isolate->context()->global());
 
   Handle<Context> context = args.at<Context>(0);
-  CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1);
   CONVERT_SMI_ARG_CHECKED(flags, 2);
 
   // Traverse the name/value pairs and set the properties.
@@ -1208,49 +1294,20 @@
     if (value->IsUndefined() || is_const_property) {
       // Lookup the property in the global object, and don't set the
       // value of the variable if the property is already there.
-      LookupResult lookup;
+      LookupResult lookup(isolate);
       global->Lookup(*name, &lookup);
       if (lookup.IsProperty()) {
-        // Determine if the property is local by comparing the holder
-        // against the global object. The information will be used to
-        // avoid throwing re-declaration errors when declaring
-        // variables or constants that exist in the prototype chain.
-        bool is_local = (*global == lookup.holder());
-        // Get the property attributes and determine if the property is
-        // read-only.
-        PropertyAttributes attributes = global->GetPropertyAttribute(*name);
-        bool is_read_only = (attributes & READ_ONLY) != 0;
-        if (lookup.type() == INTERCEPTOR) {
-          // If the interceptor says the property is there, we
-          // just return undefined without overwriting the property.
-          // Otherwise, we continue to setting the property.
-          if (attributes != ABSENT) {
-            // Check if the existing property conflicts with regards to const.
-            if (is_local && (is_read_only || is_const_property)) {
-              const char* type = (is_read_only) ? "const" : "var";
-              return ThrowRedeclarationError(isolate, type, name);
-            };
-            // The property already exists without conflicting: Go to
-            // the next declaration.
-            continue;
-          }
-          // Fall-through and introduce the absent property by using
-          // SetProperty.
-        } else {
-          // For const properties, we treat a callback with this name
-          // even in the prototype as a conflicting declaration.
-          if (is_const_property && (lookup.type() == CALLBACKS)) {
-            return ThrowRedeclarationError(isolate, "const", name);
-          }
-          // Otherwise, we check for locally conflicting declarations.
-          if (is_local && (is_read_only || is_const_property)) {
-            const char* type = (is_read_only) ? "const" : "var";
-            return ThrowRedeclarationError(isolate, type, name);
-          }
-          // The property already exists without conflicting: Go to
-          // the next declaration.
+        // We found an existing property. Unless it was an interceptor
+        // that claims the property is absent, skip this declaration.
+        if (lookup.type() != INTERCEPTOR) {
           continue;
         }
+        PropertyAttributes attributes = global->GetPropertyAttribute(*name);
+        if (attributes != ABSENT) {
+          continue;
+        }
+        // Fall-through and introduce the absent property by using
+        // SetProperty.
       }
     } else {
       is_function_declaration = true;
@@ -1264,36 +1321,24 @@
       value = function;
     }
 
-    LookupResult lookup;
+    LookupResult lookup(isolate);
     global->LocalLookup(*name, &lookup);
 
-    // There's a local property that we need to overwrite because
-    // we're either declaring a function or there's an interceptor
-    // that claims the property is absent.
-    //
-    // Check for conflicting re-declarations. We cannot have
-    // conflicting types in case of intercepted properties because
-    // they are absent.
-    if (lookup.IsProperty() &&
-        (lookup.type() != INTERCEPTOR) &&
-        (lookup.IsReadOnly() || is_const_property)) {
-      const char* type = (lookup.IsReadOnly()) ? "const" : "var";
-      return ThrowRedeclarationError(isolate, type, name);
-    }
-
     // Compute the property attributes. According to ECMA-262, section
     // 13, page 71, the property must be read-only and
     // non-deletable. However, neither SpiderMonkey nor KJS creates the
     // property as read-only, so we don't either.
     int attr = NONE;
-    if ((flags & kDeclareGlobalsEvalFlag) == 0) {
+    if (!DeclareGlobalsEvalFlag::decode(flags)) {
       attr |= DONT_DELETE;
     }
-    bool is_native = (flags & kDeclareGlobalsNativeFlag) != 0;
+    bool is_native = DeclareGlobalsNativeFlag::decode(flags);
     if (is_const_property || (is_native && is_function_declaration)) {
       attr |= READ_ONLY;
     }
 
+    LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
+
     // Safari does not allow the invocation of callback setters for
     // function declarations. To mimic this behavior, we do not allow
     // the invocation of setters for function values. This makes a
@@ -1301,28 +1346,33 @@
     // handlers such as "function onload() {}". Firefox does call the
     // onload setter in those case and Safari does not. We follow
     // Safari for compatibility.
-    if (value->IsJSFunction()) {
-      // Do not change DONT_DELETE to false from true.
+    if (is_function_declaration) {
       if (lookup.IsProperty() && (lookup.type() != INTERCEPTOR)) {
+        // Do not overwrite READ_ONLY properties.
+        if (lookup.GetAttributes() & READ_ONLY) {
+          if (language_mode != CLASSIC_MODE) {
+            Handle<Object> args[] = { name };
+            return isolate->Throw(*isolate->factory()->NewTypeError(
+                "strict_cannot_assign", HandleVector(args, ARRAY_SIZE(args))));
+          }
+          continue;
+        }
+        // Do not change DONT_DELETE to false from true.
         attr |= lookup.GetAttributes() & DONT_DELETE;
       }
       PropertyAttributes attributes = static_cast<PropertyAttributes>(attr);
 
-      RETURN_IF_EMPTY_HANDLE(isolate,
-                             SetLocalPropertyIgnoreAttributes(global,
-                                                              name,
-                                                              value,
-                                                              attributes));
+      RETURN_IF_EMPTY_HANDLE(
+          isolate,
+          JSObject::SetLocalPropertyIgnoreAttributes(global, name, value,
+                                                     attributes));
     } else {
-      StrictModeFlag strict_mode =
-          ((flags & kDeclareGlobalsStrictModeFlag) != 0) ? kStrictMode
-                                                         : kNonStrictMode;
-      RETURN_IF_EMPTY_HANDLE(isolate,
-                             SetProperty(global,
-                                         name,
-                                         value,
-                                         static_cast<PropertyAttributes>(attr),
-                                         strict_mode));
+      RETURN_IF_EMPTY_HANDLE(
+          isolate,
+          JSReceiver::SetProperty(global, name, value,
+                                  static_cast<PropertyAttributes>(attr),
+                                  language_mode == CLASSIC_MODE
+                                      ? kNonStrictMode : kStrictMode));
     }
   }
 
@@ -1335,15 +1385,17 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
 
-  CONVERT_ARG_CHECKED(Context, context, 0);
+  // Declarations are always made in a function or global context.  In the
+  // case of eval code, the context passed is the context of the caller,
+  // which may be some nested context and not the declaration context.
+  RUNTIME_ASSERT(args[0]->IsContext());
+  Handle<Context> context(Context::cast(args[0])->declaration_context());
+
   Handle<String> name(String::cast(args[1]));
   PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
   RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
   Handle<Object> initial_value(args[3], isolate);
 
-  // Declarations are always done in a function or global context.
-  context = Handle<Context>(context->declaration_context());
-
   int index;
   PropertyAttributes attributes;
   ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
@@ -1352,9 +1404,7 @@
       context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
   if (attributes != ABSENT) {
-    // The name was declared before; check for conflicting
-    // re-declarations: This is similar to the code in parser.cc in
-    // the AstBuildingParser::Declare function.
+    // The name was declared before; check for conflicting re-declarations.
     if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
       // Functions are not read-only.
       ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
@@ -1365,53 +1415,42 @@
     // Initialize it if necessary.
     if (*initial_value != NULL) {
       if (index >= 0) {
-        // The variable or constant context slot should always be in
-        // the function context or the arguments object.
-        if (holder->IsContext()) {
-          ASSERT(holder.is_identical_to(context));
-          if (((attributes & READ_ONLY) == 0) ||
-              context->get(index)->IsTheHole()) {
-            context->set(index, *initial_value);
-          }
-        } else {
-          // The holder is an arguments object.
-          Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
-          Handle<Object> result = SetElement(arguments, index, initial_value,
-                                             kNonStrictMode);
-          if (result.is_null()) return Failure::Exception();
+        ASSERT(holder.is_identical_to(context));
+        if (((attributes & READ_ONLY) == 0) ||
+            context->get(index)->IsTheHole()) {
+          context->set(index, *initial_value);
         }
       } else {
-        // Slow case: The property is not in the FixedArray part of the context.
-        Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+        // Slow case: The property is in the context extension object of a
+        // function context or the global object of a global context.
+        Handle<JSObject> object = Handle<JSObject>::cast(holder);
         RETURN_IF_EMPTY_HANDLE(
             isolate,
-            SetProperty(context_ext, name, initial_value,
-                        mode, kNonStrictMode));
+            JSReceiver::SetProperty(object, name, initial_value, mode,
+                                    kNonStrictMode));
       }
     }
 
   } else {
     // The property is not in the function context. It needs to be
-    // "declared" in the function context's extension context, or in the
-    // global context.
-    Handle<JSObject> context_ext;
+    // "declared" in the function context's extension context or as a
+    // property of the the global object.
+    Handle<JSObject> object;
     if (context->has_extension()) {
-      // The function context's extension context exists - use it.
-      context_ext = Handle<JSObject>(JSObject::cast(context->extension()));
+      object = Handle<JSObject>(JSObject::cast(context->extension()));
     } else {
-      // The function context's extension context does not exists - allocate
-      // it.
-      context_ext = isolate->factory()->NewJSObject(
+      // Context extension objects are allocated lazily.
+      ASSERT(context->IsFunctionContext());
+      object = isolate->factory()->NewJSObject(
           isolate->context_extension_function());
-      // And store it in the extension slot.
-      context->set_extension(*context_ext);
+      context->set_extension(*object);
     }
-    ASSERT(*context_ext != NULL);
+    ASSERT(*object != NULL);
 
     // Declare the property by setting it to the initial value if provided,
     // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
     // constant declarations).
-    ASSERT(!context_ext->HasLocalProperty(*name));
+    ASSERT(!object->HasLocalProperty(*name));
     Handle<Object> value(isolate->heap()->undefined_value(), isolate);
     if (*initial_value != NULL) value = initial_value;
     // Declaring a const context slot is a conflicting declaration if
@@ -1421,16 +1460,16 @@
     // SetProperty and no setters are invoked for those since they are
     // not real JSObjects.
     if (initial_value->IsTheHole() &&
-        !context_ext->IsJSContextExtensionObject()) {
-      LookupResult lookup;
-      context_ext->Lookup(*name, &lookup);
-      if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
+        !object->IsJSContextExtensionObject()) {
+      LookupResult lookup(isolate);
+      object->Lookup(*name, &lookup);
+      if (lookup.IsFound() && (lookup.type() == CALLBACKS)) {
         return ThrowRedeclarationError(isolate, "const", name);
       }
     }
-    RETURN_IF_EMPTY_HANDLE(isolate,
-                           SetProperty(context_ext, name, value, mode,
-                                       kNonStrictMode));
+    RETURN_IF_EMPTY_HANDLE(
+        isolate,
+        JSReceiver::SetProperty(object, name, value, mode, kNonStrictMode));
   }
 
   return isolate->heap()->undefined_value();
@@ -1440,7 +1479,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
   NoHandleAllocation nha;
   // args[0] == name
-  // args[1] == strict_mode
+  // args[1] == language_mode
   // args[2] == value (optional)
 
   // Determine if we need to assign to the variable if it already
@@ -1448,11 +1487,12 @@
   RUNTIME_ASSERT(args.length() == 2 || args.length() == 3);
   bool assign = args.length() == 3;
 
-  CONVERT_ARG_CHECKED(String, name, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   GlobalObject* global = isolate->context()->global();
   RUNTIME_ASSERT(args[1]->IsSmi());
-  StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(1));
-  ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
+  CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
+  StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
+      ? kNonStrictMode : kStrictMode;
 
   // According to ECMA-262, section 12.2, page 62, the property must
   // not be deletable.
@@ -1465,67 +1505,35 @@
   // to assign to the property.
   // Note that objects can have hidden prototypes, so we need to traverse
   // the whole chain of hidden prototypes to do a 'local' lookup.
-  JSObject* real_holder = global;
-  LookupResult lookup;
-  while (true) {
-    real_holder->LocalLookup(*name, &lookup);
-    if (lookup.IsProperty()) {
-      // Determine if this is a redeclaration of something read-only.
-      if (lookup.IsReadOnly()) {
-        // If we found readonly property on one of hidden prototypes,
-        // just shadow it.
-        if (real_holder != isolate->context()->global()) break;
-        return ThrowRedeclarationError(isolate, "const", name);
-      }
-
-      // Determine if this is a redeclaration of an intercepted read-only
-      // property and figure out if the property exists at all.
-      bool found = true;
-      PropertyType type = lookup.type();
-      if (type == INTERCEPTOR) {
-        HandleScope handle_scope(isolate);
-        Handle<JSObject> holder(real_holder);
-        PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
-        real_holder = *holder;
-        if (intercepted == ABSENT) {
-          // The interceptor claims the property isn't there. We need to
-          // make sure to introduce it.
-          found = false;
-        } else if ((intercepted & READ_ONLY) != 0) {
-          // The property is present, but read-only. Since we're trying to
-          // overwrite it with a variable declaration we must throw a
-          // re-declaration error.  However if we found readonly property
-          // on one of hidden prototypes, just shadow it.
-          if (real_holder != isolate->context()->global()) break;
-          return ThrowRedeclarationError(isolate, "const", name);
+  Object* object = global;
+  LookupResult lookup(isolate);
+  while (object->IsJSObject() &&
+         JSObject::cast(object)->map()->is_hidden_prototype()) {
+    JSObject* raw_holder = JSObject::cast(object);
+    raw_holder->LocalLookup(*name, &lookup);
+    if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
+      HandleScope handle_scope(isolate);
+      Handle<JSObject> holder(raw_holder);
+      PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
+      // Update the raw pointer in case it's changed due to GC.
+      raw_holder = *holder;
+      if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+        // Found an interceptor that's not read only.
+        if (assign) {
+          return raw_holder->SetProperty(
+              &lookup, *name, args[2], attributes, strict_mode_flag);
+        } else {
+          return isolate->heap()->undefined_value();
         }
       }
-
-      if (found && !assign) {
-        // The global property is there and we're not assigning any value
-        // to it. Just return.
-        return isolate->heap()->undefined_value();
-      }
-
-      // Assign the value (or undefined) to the property.
-      Object* value = (assign) ? args[2] : isolate->heap()->undefined_value();
-      return real_holder->SetProperty(
-          &lookup, *name, value, attributes, strict_mode);
     }
-
-    Object* proto = real_holder->GetPrototype();
-    if (!proto->IsJSObject())
-      break;
-
-    if (!JSObject::cast(proto)->map()->is_hidden_prototype())
-      break;
-
-    real_holder = JSObject::cast(proto);
+    object = raw_holder->GetPrototype();
   }
 
+  // Reload global in case the loop above performed a GC.
   global = isolate->context()->global();
   if (assign) {
-    return global->SetProperty(*name, args[2], attributes, strict_mode);
+    return global->SetProperty(*name, args[2], attributes, strict_mode_flag);
   }
   return isolate->heap()->undefined_value();
 }
@@ -1536,7 +1544,7 @@
   // of the constant is the first argument and the initial value
   // is the second.
   RUNTIME_ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(String, name, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   Handle<Object> value = args.at<Object>(1);
 
   // Get the current global object from top.
@@ -1552,7 +1560,7 @@
   // add it as a local property even in case of callbacks in the
   // prototype chain (this rules out using SetProperty).
   // We use SetLocalPropertyIgnoreAttributes instead
-  LookupResult lookup;
+  LookupResult lookup(isolate);
   global->LocalLookup(*name, &lookup);
   if (!lookup.IsProperty()) {
     return global->SetLocalPropertyIgnoreAttributes(*name,
@@ -1560,25 +1568,9 @@
                                                     attributes);
   }
 
-  // Determine if this is a redeclaration of something not
-  // read-only. In case the result is hidden behind an interceptor we
-  // need to ask it for the property attributes.
   if (!lookup.IsReadOnly()) {
-    if (lookup.type() != INTERCEPTOR) {
-      return ThrowRedeclarationError(isolate, "var", name);
-    }
-
-    PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
-
-    // Throw re-declaration error if the intercepted property is present
-    // but not read-only.
-    if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
-      return ThrowRedeclarationError(isolate, "var", name);
-    }
-
     // Restore global object from context (in case of GC) and continue
-    // with setting the value because the property is either absent or
-    // read-only. We also have to do redo the lookup.
+    // with setting the value.
     HandleScope handle_scope(isolate);
     Handle<GlobalObject> global(isolate->context()->global());
 
@@ -1586,28 +1578,27 @@
     // property through an interceptor and only do it if it's
     // uninitialized, e.g. the hole. Nirk...
     // Passing non-strict mode because the property is writable.
-    RETURN_IF_EMPTY_HANDLE(isolate,
-                           SetProperty(global,
-                                       name,
-                                       value,
-                                       attributes,
-                                       kNonStrictMode));
+    RETURN_IF_EMPTY_HANDLE(
+        isolate,
+        JSReceiver::SetProperty(global, name, value, attributes,
+                                kNonStrictMode));
     return *value;
   }
 
-  // Set the value, but only we're assigning the initial value to a
+  // Set the value, but only if we're assigning the initial value to a
   // constant. For now, we determine this by checking if the
   // current value is the hole.
-  // Strict mode handling not needed (const disallowed in strict mode).
+  // Strict mode handling not needed (const is disallowed in strict mode).
   PropertyType type = lookup.type();
   if (type == FIELD) {
     FixedArray* properties = global->properties();
     int index = lookup.GetFieldIndex();
-    if (properties->get(index)->IsTheHole()) {
+    if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
       properties->set(index, *value);
     }
   } else if (type == NORMAL) {
-    if (global->GetNormalizedProperty(&lookup)->IsTheHole()) {
+    if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
+        !lookup.IsReadOnly()) {
       global->SetNormalizedProperty(&lookup, *value);
     }
   } else {
@@ -1627,11 +1618,12 @@
 
   Handle<Object> value(args[0], isolate);
   ASSERT(!value->IsTheHole());
-  CONVERT_ARG_CHECKED(Context, context, 1);
-  Handle<String> name(String::cast(args[2]));
 
   // Initializations are always done in a function or global context.
-  context = Handle<Context>(context->declaration_context());
+  RUNTIME_ASSERT(args[1]->IsContext());
+  Handle<Context> context(Context::cast(args[1])->declaration_context());
+
+  Handle<String> name(String::cast(args[2]));
 
   int index;
   PropertyAttributes attributes;
@@ -1640,72 +1632,64 @@
   Handle<Object> holder =
       context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
-  // In most situations, the property introduced by the const
-  // declaration should be present in the context extension object.
-  // However, because declaration and initialization are separate, the
-  // property might have been deleted (if it was introduced by eval)
-  // before we reach the initialization point.
-  //
-  // Example:
-  //
-  //    function f() { eval("delete x; const x;"); }
-  //
-  // In that case, the initialization behaves like a normal assignment
-  // to property 'x'.
   if (index >= 0) {
-    if (holder->IsContext()) {
-      // Property was found in a context.  Perform the assignment if we
-      // found some non-constant or an uninitialized constant.
-      Handle<Context> context = Handle<Context>::cast(holder);
-      if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
-        context->set(index, *value);
-      }
-    } else {
-      // The holder is an arguments object.
-      ASSERT((attributes & READ_ONLY) == 0);
-      Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
-      RETURN_IF_EMPTY_HANDLE(
-          isolate,
-          SetElement(arguments, index, value, kNonStrictMode));
+    ASSERT(holder->IsContext());
+    // Property was found in a context.  Perform the assignment if we
+    // found some non-constant or an uninitialized constant.
+    Handle<Context> context = Handle<Context>::cast(holder);
+    if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
+      context->set(index, *value);
     }
     return *value;
   }
 
-  // The property could not be found, we introduce it in the global
-  // context.
+  // The property could not be found, we introduce it as a property of the
+  // global object.
   if (attributes == ABSENT) {
     Handle<JSObject> global = Handle<JSObject>(
         isolate->context()->global());
     // Strict mode not needed (const disallowed in strict mode).
     RETURN_IF_EMPTY_HANDLE(
         isolate,
-        SetProperty(global, name, value, NONE, kNonStrictMode));
+        JSReceiver::SetProperty(global, name, value, NONE, kNonStrictMode));
     return *value;
   }
 
-  // The property was present in a context extension object.
-  Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+  // The property was present in some function's context extension object,
+  // as a property on the subject of a with, or as a property of the global
+  // object.
+  //
+  // In most situations, eval-introduced consts should still be present in
+  // the context extension object.  However, because declaration and
+  // initialization are separate, the property might have been deleted
+  // before we reach the initialization point.
+  //
+  // Example:
+  //
+  //    function f() { eval("delete x; const x;"); }
+  //
+  // In that case, the initialization behaves like a normal assignment.
+  Handle<JSObject> object = Handle<JSObject>::cast(holder);
 
-  if (*context_ext == context->extension()) {
-    // This is the property that was introduced by the const
-    // declaration.  Set it if it hasn't been set before.  NOTE: We
-    // cannot use GetProperty() to get the current value as it
-    // 'unholes' the value.
-    LookupResult lookup;
-    context_ext->LocalLookupRealNamedProperty(*name, &lookup);
-    ASSERT(lookup.IsProperty());  // the property was declared
+  if (*object == context->extension()) {
+    // This is the property that was introduced by the const declaration.
+    // Set it if it hasn't been set before.  NOTE: We cannot use
+    // GetProperty() to get the current value as it 'unholes' the value.
+    LookupResult lookup(isolate);
+    object->LocalLookupRealNamedProperty(*name, &lookup);
+    ASSERT(lookup.IsFound());  // the property was declared
     ASSERT(lookup.IsReadOnly());  // and it was declared as read-only
 
     PropertyType type = lookup.type();
     if (type == FIELD) {
-      FixedArray* properties = context_ext->properties();
+      FixedArray* properties = object->properties();
       int index = lookup.GetFieldIndex();
       if (properties->get(index)->IsTheHole()) {
         properties->set(index, *value);
       }
     } else if (type == NORMAL) {
-      if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) {
-        context_ext->SetNormalizedProperty(&lookup, *value);
+      if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
+        object->SetNormalizedProperty(&lookup, *value);
       }
     } else {
       // We should not reach here. Any real, named property should be
@@ -1713,13 +1697,14 @@
       UNREACHABLE();
     }
   } else {
-    // The property was found in a different context extension object.
-    // Set it if it is not a read-only property.
+    // The property was found on some other object.  Set it if it is not a
+    // read-only property.
     if ((attributes & READ_ONLY) == 0) {
       // Strict mode not needed (const disallowed in strict mode).
       RETURN_IF_EMPTY_HANDLE(
           isolate,
-          SetProperty(context_ext, name, value, attributes, kNonStrictMode));
+          JSReceiver::SetProperty(object, name, value, attributes,
+                                  kNonStrictMode));
     }
   }
 
@@ -1731,10 +1716,10 @@
                  Runtime_OptimizeObjectForAddingMultipleProperties) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_SMI_ARG_CHECKED(properties, 1);
   if (object->HasFastProperties()) {
-    NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
+    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
   }
   return *object;
 }
@@ -1743,12 +1728,12 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
-  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
-  CONVERT_ARG_CHECKED(String, subject, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
   // Due to the way the JS calls are constructed this must be less than the
   // length of a string, i.e. it is always a Smi.  We check anyway for security.
   CONVERT_SMI_ARG_CHECKED(index, 2);
-  CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
   RUNTIME_ASSERT(last_match_info->HasFastElements());
   RUNTIME_ASSERT(index >= 0);
   RUNTIME_ASSERT(index <= subject->length());
@@ -1800,8 +1785,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
   AssertNoAllocation no_alloc;
   ASSERT(args.length() == 5);
-  CONVERT_CHECKED(JSRegExp, regexp, args[0]);
-  CONVERT_CHECKED(String, source, args[1]);
+  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+  CONVERT_ARG_CHECKED(String, source, 1);
 
   Object* global = args[2];
   if (!global->IsTrue()) global = isolate->heap()->false_value();
@@ -1818,14 +1803,17 @@
       JSFunction::cast(constructor)->initial_map() == map) {
     // If we still have the original map, set in-object properties directly.
     regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
-    // TODO(lrn): Consider skipping write barrier on booleans as well.
-    // Both true and false should be in oldspace at all times.
-    regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, global);
-    regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, ignoreCase);
-    regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline);
+    // Both true and false are immovable immortal objects so no need for write
+    // barrier.
+    regexp->InObjectPropertyAtPut(
+        JSRegExp::kGlobalFieldIndex, global, SKIP_WRITE_BARRIER);
+    regexp->InObjectPropertyAtPut(
+        JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
+    regexp->InObjectPropertyAtPut(
+        JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
     regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
                                   Smi::FromInt(0),
-                                  SKIP_WRITE_BARRIER);
+                                  SKIP_WRITE_BARRIER);  // It's a Smi.
     return regexp;
   }
 
@@ -1866,7 +1854,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSArray, prototype, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0);
   // This is necessary to enable fast checks for absence of elements
   // on Array.prototype and below.
   prototype->set_elements(isolate->heap()->empty_fixed_array());
@@ -1887,7 +1875,7 @@
                                       code,
                                       false);
   optimized->shared()->DontAdaptArguments();
-  SetProperty(holder, key, optimized, NONE, kStrictMode);
+  JSReceiver::SetProperty(holder, key, optimized, NONE, kStrictMode);
   return optimized;
 }
 
@@ -1895,7 +1883,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSObject, holder, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, holder, 0);
 
   InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
   InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
@@ -1910,11 +1898,21 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
-  NoHandleAllocation handle_free;
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSFunction, function, args[0]);
+  CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
+
+  if (!callable->IsJSFunction()) {
+    HandleScope scope(isolate);
+    bool threw = false;
+    Handle<Object> delegate =
+        Execution::TryGetFunctionDelegate(Handle<JSReceiver>(callable), &threw);
+    if (threw) return Failure::Exception();
+    callable = JSFunction::cast(*delegate);
+  }
+  JSFunction* function = JSFunction::cast(callable);
+
   SharedFunctionInfo* shared = function->shared();
-  if (shared->native() || shared->strict_mode()) {
+  if (shared->native() || !shared->is_classic_mode()) {
     return isolate->heap()->undefined_value();
   }
   // Returns undefined for strict or native functions, or
@@ -1929,7 +1927,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
-  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
   int index = args.smi_at(1);
   Handle<String> pattern = args.at<String>(2);
   Handle<String> flags = args.at<String>(3);
@@ -1960,7 +1958,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSFunction, f, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return f->shared()->name();
 }
 
@@ -1969,8 +1967,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(JSFunction, f, args[0]);
-  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_ARG_CHECKED(JSFunction, f, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
   f->shared()->set_name(name);
   return isolate->heap()->undefined_value();
 }
@@ -1979,7 +1977,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSFunction, f, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return isolate->heap()->ToBoolean(
       f->shared()->name_should_print_as_anonymous());
 }
@@ -1988,26 +1986,17 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSFunction, f, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, f, 0);
   f->shared()->set_name_should_print_as_anonymous(true);
   return isolate->heap()->undefined_value();
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetBound) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
-  fun->shared()->set_bound(true);
-  return isolate->heap()->undefined_value();
-}
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSFunction, f, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, f, 0);
   Object* obj = f->RemovePrototype();
   if (obj->IsFailure()) return obj;
 
@@ -2019,7 +2008,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate);
   if (!script->IsScript()) return isolate->heap()->undefined_value();
 
@@ -2028,11 +2017,12 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
-  NoHandleAllocation ha;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSFunction, f, args[0]);
-  return f->shared()->GetSourceCode();
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
+  Handle<SharedFunctionInfo> shared(f->shared());
+  return *shared->GetSourceCode();
 }
 
 
@@ -2040,7 +2030,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   int pos = fun->shared()->start_position();
   return Smi::FromInt(pos);
 }
@@ -2049,7 +2039,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(Code, code, args[0]);
+  CONVERT_ARG_CHECKED(Code, code, 0);
   CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
 
   RUNTIME_ASSERT(0 <= offset && offset < code->Size());
@@ -2063,8 +2053,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
-  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
   fun->SetInstanceClassName(name);
   return isolate->heap()->undefined_value();
 }
@@ -2074,28 +2064,10 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
-  CONVERT_CHECKED(Smi, length, args[1]);
-  fun->shared()->set_length(length->value());
-  return length;
-}
-
-
-// Creates a local, readonly, property called length with the correct
-// length (when read by the user). This effectively overwrites the
-// interceptor used to normally provide the length.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionSetLength) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 2);
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
-  CONVERT_CHECKED(Smi, length, args[1]);
-  MaybeObject* maybe_name =
-      isolate->heap()->AllocateStringFromAscii(CStrVector("length"));
-  String* name;
-  if (!maybe_name->To(&name)) return maybe_name;
-  PropertyAttributes attr =
-      static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
-  return fun->AddProperty(name, length, attr, kNonStrictMode);
+  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+  CONVERT_SMI_ARG_CHECKED(length, 1);
+  fun->shared()->set_length(length);
+  return isolate->heap()->undefined_value();
 }
 
 
@@ -2103,7 +2075,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   ASSERT(fun->should_have_prototype());
   Object* obj;
   { MaybeObject* maybe_obj =
@@ -2117,7 +2089,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
   NoHandleAllocation ha;
   RUNTIME_ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSFunction, function, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
 
   MaybeObject* maybe_name =
       isolate->heap()->AllocateStringFromAscii(CStrVector("prototype"));
@@ -2173,7 +2145,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSFunction, f, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return isolate->heap()->ToBoolean(f->shared()->IsApiFunction());
 }
 
@@ -2182,7 +2154,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSFunction, f, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return isolate->heap()->ToBoolean(f->IsBuiltin());
 }
 
@@ -2191,7 +2163,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(JSFunction, target, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
   Handle<Object> code = args.at<Object>(1);
 
   Handle<Context> context(target->context());
@@ -2201,13 +2173,12 @@
     Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
     Handle<SharedFunctionInfo> shared(fun->shared());
 
-    if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
+    if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
       return Failure::Exception();
     }
     // Since we don't store the source for this we should never
     // optimize this.
     shared->code()->set_optimizable(false);
-
     // Set the code, scope info, formal parameter count,
     // and the length of the target function.
     target->shared()->set_code(shared->code());
@@ -2239,9 +2210,7 @@
       literals->set(JSFunction::kLiteralGlobalContextIndex,
                     context->global_context());
     }
-    // It's okay to skip the write barrier here because the literals
-    // are guaranteed to be in old space.
-    target->set_literals(*literals, SKIP_WRITE_BARRIER);
+    target->set_literals(*literals);
     target->set_next_function_link(isolate->heap()->undefined_value());
 
     if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
@@ -2258,7 +2227,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_SMI_ARG_CHECKED(num, 1);
   RUNTIME_ASSERT(num >= 0);
   SetExpectedNofProperties(function, num);
@@ -2282,7 +2251,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(String, subject, args[0]);
+  CONVERT_ARG_CHECKED(String, subject, 0);
   Object* index = args[1];
   RUNTIME_ASSERT(index->IsNumber());
 
@@ -2325,7 +2294,8 @@
  public:
   explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
       : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
-        length_(0) {
+        length_(0),
+        has_non_smi_elements_(false) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
     ASSERT(initial_capacity > 0);
@@ -2333,7 +2303,8 @@
 
   explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
       : array_(backing_store),
-        length_(0) {
+        length_(0),
+        has_non_smi_elements_(false) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
     ASSERT(backing_store->length() > 0);
@@ -2361,12 +2332,15 @@
   }
 
   void Add(Object* value) {
+    ASSERT(!value->IsSmi());
     ASSERT(length_ < capacity());
     array_->set(length_, value);
     length_++;
+    has_non_smi_elements_ = true;
   }
 
   void Add(Smi* value) {
+    ASSERT(value->IsSmi());
     ASSERT(length_ < capacity());
     array_->set(length_, value);
     length_++;
@@ -2391,7 +2365,7 @@
   }
 
   Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
-    target_array->set_elements(*array_);
+    FACTORY->SetContent(target_array, array_);
     target_array->set_length(Smi::FromInt(length_));
     return target_array;
   }
@@ -2399,6 +2373,7 @@
  private:
   Handle<FixedArray> array_;
   int length_;
+  bool has_non_smi_elements_;
 };
 
 
@@ -2893,7 +2868,7 @@
       }
     } else {
       Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
-      if (pattern->IsAsciiRepresentation()) {
+      if (pattern_content.IsAscii()) {
         FindStringIndices(isolate,
                           subject_vector,
                           pattern_content.ToAsciiVector(),
@@ -3019,7 +2994,7 @@
 
   // Shortcut for simple non-regexp global replacements
   if (is_global &&
-      regexp->TypeTag() == JSRegExp::ATOM &&
+      regexp_handle->TypeTag() == JSRegExp::ATOM &&
       compiled_replacement.simple_hint()) {
     if (subject_handle->HasOnlyAsciiChars() &&
         replacement_handle->HasOnlyAsciiChars()) {
@@ -3242,6 +3217,9 @@
 
   Address end_of_string = answer->address() + string_size;
   isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
+  if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
+    MemoryChunk::IncrementLiveBytesFromMutator(answer->address(), -delta);
+  }
 
   return *answer;
 }
@@ -3250,7 +3228,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
   ASSERT(args.length() == 4);
 
-  CONVERT_CHECKED(String, subject, args[0]);
+  CONVERT_ARG_CHECKED(String, subject, 0);
   if (!subject->IsFlat()) {
     Object* flat_subject;
     { MaybeObject* maybe_flat_subject = subject->TryFlatten();
@@ -3261,7 +3239,7 @@
     subject = String::cast(flat_subject);
   }
 
-  CONVERT_CHECKED(String, replacement, args[2]);
+  CONVERT_ARG_CHECKED(String, replacement, 2);
   if (!replacement->IsFlat()) {
     Object* flat_replacement;
     { MaybeObject* maybe_flat_replacement = replacement->TryFlatten();
@@ -3272,8 +3250,8 @@
     replacement = String::cast(flat_replacement);
   }
 
-  CONVERT_CHECKED(JSRegExp, regexp, args[1]);
-  CONVERT_CHECKED(JSArray, last_match_info, args[3]);
+  CONVERT_ARG_CHECKED(JSRegExp, regexp, 1);
+  CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
 
   ASSERT(last_match_info->HasFastElements());
 
@@ -3295,6 +3273,79 @@
 }
 
 
+Handle<String> Runtime::StringReplaceOneCharWithString(Isolate* isolate,
+                                                       Handle<String> subject,
+                                                       Handle<String> search,
+                                                       Handle<String> replace,
+                                                       bool* found,
+                                                       int recursion_limit) {
+  if (recursion_limit == 0) return Handle<String>::null();
+  if (subject->IsConsString()) {
+    ConsString* cons = ConsString::cast(*subject);
+    Handle<String> first = Handle<String>(cons->first());
+    Handle<String> second = Handle<String>(cons->second());
+    Handle<String> new_first =
+        StringReplaceOneCharWithString(isolate,
+                                       first,
+                                       search,
+                                       replace,
+                                       found,
+                                       recursion_limit - 1);
+    if (*found) return isolate->factory()->NewConsString(new_first, second);
+    if (new_first.is_null()) return new_first;
+
+    Handle<String> new_second =
+        StringReplaceOneCharWithString(isolate,
+                                       second,
+                                       search,
+                                       replace,
+                                       found,
+                                       recursion_limit - 1);
+    if (*found) return isolate->factory()->NewConsString(first, new_second);
+    if (new_second.is_null()) return new_second;
+
+    return subject;
+  } else {
+    int index = StringMatch(isolate, subject, search, 0);
+    if (index == -1) return subject;
+    *found = true;
+    Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
+    Handle<String> cons1 = isolate->factory()->NewConsString(first, replace);
+    Handle<String> second =
+        isolate->factory()->NewSubString(subject, index + 1, subject->length());
+    return isolate->factory()->NewConsString(cons1, second);
+  }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
+  ASSERT(args.length() == 3);
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, search, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, replace, 2);
+
+  // If the cons string tree is too deep, we simply abort the recursion and
+  // retry with a flattened subject string.
+  const int kRecursionLimit = 0x1000;
+  bool found = false;
+  Handle<String> result =
+      Runtime::StringReplaceOneCharWithString(isolate,
+                                              subject,
+                                              search,
+                                              replace,
+                                              &found,
+                                              kRecursionLimit);
+  if (!result.is_null()) return *result;
+  return *Runtime::StringReplaceOneCharWithString(isolate,
+                                                  FlattenGetString(subject),
+                                                  search,
+                                                  replace,
+                                                  &found,
+                                                  kRecursionLimit);
+}
+
+
 // Perform string match of pattern on subject, starting at start index.
 // Caller must ensure that 0 <= start_index <= sub->length(),
 // and should check that pat->length() + start_index <= sub->length().
@@ -3351,8 +3402,8 @@
   HandleScope scope(isolate);  // create a new handle scope
   ASSERT(args.length() == 3);
 
-  CONVERT_ARG_CHECKED(String, sub, 0);
-  CONVERT_ARG_CHECKED(String, pat, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
 
   Object* index = args[2];
   uint32_t start_index;
@@ -3403,8 +3454,8 @@
   HandleScope scope(isolate);  // create a new handle scope
   ASSERT(args.length() == 3);
 
-  CONVERT_ARG_CHECKED(String, sub, 0);
-  CONVERT_ARG_CHECKED(String, pat, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
 
   Object* index = args[2];
   uint32_t start_index;
@@ -3462,8 +3513,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(String, str1, args[0]);
-  CONVERT_CHECKED(String, str2, args[1]);
+  CONVERT_ARG_CHECKED(String, str1, 0);
+  CONVERT_ARG_CHECKED(String, str2, 1);
 
   if (str1 == str2) return Smi::FromInt(0);  // Equal.
   int str1_length = str1->length();
@@ -3510,7 +3561,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
-  CONVERT_CHECKED(String, value, args[0]);
+  CONVERT_ARG_CHECKED(String, value, 0);
   int start, end;
   // We have a fast integer-only case here to avoid a conversion to double in
   // the common case where from and to are Smis.
@@ -3536,9 +3587,9 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
   ASSERT_EQ(3, args.length());
 
-  CONVERT_ARG_CHECKED(String, subject, 0);
-  CONVERT_ARG_CHECKED(JSRegExp, regexp, 1);
-  CONVERT_ARG_CHECKED(JSArray, regexp_info, 2);
+  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
   HandleScope handles;
 
   Handle<Object> match = RegExpImpl::Exec(regexp, subject, 0, regexp_info);
@@ -3729,7 +3780,7 @@
   int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
   if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
 
-  OffsetsVector registers(required_registers);
+  OffsetsVector registers(required_registers, isolate);
   Vector<int32_t> register_vector(registers.vector(), registers.length());
   int subject_length = subject->length();
   bool first = true;
@@ -3802,7 +3853,7 @@
   int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
   if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
 
-  OffsetsVector registers(required_registers);
+  OffsetsVector registers(required_registers, isolate);
   Vector<int32_t> register_vector(registers.vector(), registers.length());
 
   RegExpImpl::IrregexpResult result =
@@ -3821,7 +3872,7 @@
   if (result == RegExpImpl::RE_SUCCESS) {
     // Need to keep a copy of the previous match for creating last_match_info
     // at the end, so we have two vectors that we swap between.
-    OffsetsVector registers2(required_registers);
+    OffsetsVector registers2(required_registers, isolate);
     Vector<int> prev_register_vector(registers2.vector(), registers2.length());
     bool first = true;
     do {
@@ -3929,11 +3980,11 @@
   ASSERT(args.length() == 4);
   HandleScope handles(isolate);
 
-  CONVERT_ARG_CHECKED(String, subject, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
   if (!subject->IsFlat()) FlattenString(subject);
-  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
-  CONVERT_ARG_CHECKED(JSArray, last_match_info, 2);
-  CONVERT_ARG_CHECKED(JSArray, result_array, 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
 
   ASSERT(last_match_info->HasFastElements());
   ASSERT(regexp->GetFlags().is_global());
@@ -4001,13 +4052,13 @@
   // Slow case.
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   char* str = DoubleToRadixCString(value, radix);
   MaybeObject* result =
@@ -4023,13 +4074,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4048,13 +4099,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4073,13 +4124,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4122,15 +4173,9 @@
   }
 
   if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
-    Handle<Object> prototype = GetPrototype(object);
-    return prototype->GetElement(index);
+    return object->GetPrototype()->GetElement(index);
   }
 
-  return GetElement(object, index);
-}
-
-
-MaybeObject* Runtime::GetElement(Handle<Object> object, uint32_t index) {
   return object->GetElement(index);
 }
 
@@ -4203,40 +4248,63 @@
   //
   // Additionally, we need to make sure that we do not cache results
   // for objects that require access checks.
-  if (args[0]->IsJSObject() &&
-      !args[0]->IsJSGlobalProxy() &&
-      !args[0]->IsAccessCheckNeeded() &&
-      args[1]->IsString()) {
-    JSObject* receiver = JSObject::cast(args[0]);
-    String* key = String::cast(args[1]);
-    if (receiver->HasFastProperties()) {
-      // Attempt to use lookup cache.
-      Map* receiver_map = receiver->map();
-      KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
-      int offset = keyed_lookup_cache->Lookup(receiver_map, key);
-      if (offset != -1) {
-        Object* value = receiver->FastPropertyAt(offset);
-        return value->IsTheHole() ? isolate->heap()->undefined_value() : value;
+  if (args[0]->IsJSObject()) {
+    if (!args[0]->IsJSGlobalProxy() &&
+        !args[0]->IsAccessCheckNeeded() &&
+        args[1]->IsString()) {
+      JSObject* receiver = JSObject::cast(args[0]);
+      String* key = String::cast(args[1]);
+      if (receiver->HasFastProperties()) {
+        // Attempt to use lookup cache.
+        Map* receiver_map = receiver->map();
+        KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
+        int offset = keyed_lookup_cache->Lookup(receiver_map, key);
+        if (offset != -1) {
+          Object* value = receiver->FastPropertyAt(offset);
+          return value->IsTheHole()
+              ? isolate->heap()->undefined_value()
+              : value;
+        }
+        // Lookup cache miss.  Perform lookup and update the cache if
+        // appropriate.
+        LookupResult result(isolate);
+        receiver->LocalLookup(key, &result);
+        if (result.IsFound() && result.type() == FIELD) {
+          int offset = result.GetFieldIndex();
+          keyed_lookup_cache->Update(receiver_map, key, offset);
+          return receiver->FastPropertyAt(offset);
+        }
+      } else {
+        // Attempt dictionary lookup.
+        StringDictionary* dictionary = receiver->property_dictionary();
+        int entry = dictionary->FindEntry(key);
+        if ((entry != StringDictionary::kNotFound) &&
+            (dictionary->DetailsAt(entry).type() == NORMAL)) {
+          Object* value = dictionary->ValueAt(entry);
+          if (!receiver->IsGlobalObject()) return value;
+          value = JSGlobalPropertyCell::cast(value)->value();
+          if (!value->IsTheHole()) return value;
+          // If value is the hole do the general lookup.
+        }
       }
-      // Lookup cache miss.  Perform lookup and update the cache if appropriate.
-      LookupResult result;
-      receiver->LocalLookup(key, &result);
-      if (result.IsProperty() && result.type() == FIELD) {
-        int offset = result.GetFieldIndex();
-        keyed_lookup_cache->Update(receiver_map, key, offset);
-        return receiver->FastPropertyAt(offset);
-      }
-    } else {
-      // Attempt dictionary lookup.
-      StringDictionary* dictionary = receiver->property_dictionary();
-      int entry = dictionary->FindEntry(key);
-      if ((entry != StringDictionary::kNotFound) &&
-          (dictionary->DetailsAt(entry).type() == NORMAL)) {
-        Object* value = dictionary->ValueAt(entry);
-        if (!receiver->IsGlobalObject()) return value;
-        value = JSGlobalPropertyCell::cast(value)->value();
-        if (!value->IsTheHole()) return value;
-        // If value is the hole do the general lookup.
+    } else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) {
+      // JSObject without a string key. If the key is a Smi, check for a
+      // definite out-of-bounds access to elements, which is a strong indicator
+      // that subsequent accesses will also call the runtime. Proactively
+      // transition elements to FAST_ELEMENTS to avoid excessive boxing of
+      // doubles for those future calls in the case that the elements would
+      // become FAST_DOUBLE_ELEMENTS.
+      Handle<JSObject> js_object(args.at<JSObject>(0));
+      ElementsKind elements_kind = js_object->GetElementsKind();
+      if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+          elements_kind == FAST_DOUBLE_ELEMENTS) {
+        FixedArrayBase* elements = js_object->elements();
+        if (args.at<Smi>(1)->value() >= elements->length()) {
+          MaybeObject* maybe_object = TransitionElements(js_object,
+                                                         FAST_ELEMENTS,
+                                                         isolate);
+          if (maybe_object->IsFailure()) return maybe_object;
+        }
       }
     }
   } else if (args[0]->IsString() && args[1]->IsSmi()) {
@@ -4256,6 +4324,12 @@
                                     args.at<Object>(1));
 }
 
+
+static bool IsValidAccessor(Handle<Object> obj) {
+  return obj->IsUndefined() || obj->IsSpecFunction() || obj->IsNull();
+}
+
+
 // Implements part of 8.12.9 DefineOwnProperty.
 // There are 3 cases that lead here:
 // Step 4b - define a new accessor property.
@@ -4265,32 +4339,21 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
   ASSERT(args.length() == 5);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
-  CONVERT_CHECKED(String, name, args[1]);
-  CONVERT_CHECKED(Smi, flag_setter, args[2]);
-  Object* fun = args[3];
-  RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined());
-  CONVERT_CHECKED(Smi, flag_attr, args[4]);
-  int unchecked = flag_attr->value();
-  RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   RUNTIME_ASSERT(!obj->IsNull());
-  LookupResult result;
-  obj->LocalLookupRealNamedProperty(name, &result);
-
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
+  RUNTIME_ASSERT(IsValidAccessor(getter));
+  CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
+  RUNTIME_ASSERT(IsValidAccessor(setter));
+  CONVERT_SMI_ARG_CHECKED(unchecked, 4);
+  RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
   PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
-  // If an existing property is either FIELD, NORMAL or CONSTANT_FUNCTION
-  // delete it to avoid running into trouble in DefineAccessor, which
-  // handles this incorrectly if the property is readonly (does nothing)
-  if (result.IsProperty() &&
-      (result.type() == FIELD || result.type() == NORMAL
-       || result.type() == CONSTANT_FUNCTION)) {
-    Object* ok;
-    { MaybeObject* maybe_ok =
-          obj->DeleteProperty(name, JSReceiver::NORMAL_DELETION);
-      if (!maybe_ok->ToObject(&ok)) return maybe_ok;
-    }
-  }
-  return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
+
+  bool fast = obj->HasFastProperties();
+  JSObject::DefineAccessor(obj, name, getter, setter, attr);
+  if (fast) JSObject::TransformToFastProperties(obj, 0);
+  return isolate->heap()->undefined_value();
 }
 
 // Implements part of 8.12.9 DefineOwnProperty.
@@ -4302,71 +4365,36 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
   ASSERT(args.length() == 4);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSObject, js_object, 0);
-  CONVERT_ARG_CHECKED(String, name, 1);
-  Handle<Object> obj_value = args.at<Object>(2);
-
-  CONVERT_CHECKED(Smi, flag, args[3]);
-  int unchecked = flag->value();
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, obj_value, 2);
+  CONVERT_SMI_ARG_CHECKED(unchecked, 3);
   RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
-
   PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
 
-  // Check if this is an element.
-  uint32_t index;
-  bool is_element = name->AsArrayIndex(&index);
-
-  // Special case for elements if any of the flags are true.
-  // If elements are in fast case we always implicitly assume that:
-  // DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
-  if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
-      is_element) {
-    // Normalize the elements to enable attributes on the property.
-    if (js_object->IsJSGlobalProxy()) {
-      // We do not need to do access checks here since these has already
-      // been performed by the call to GetOwnProperty.
-      Handle<Object> proto(js_object->GetPrototype());
-      // If proxy is detached, ignore the assignment. Alternatively,
-      // we could throw an exception.
-      if (proto->IsNull()) return *obj_value;
-      js_object = Handle<JSObject>::cast(proto);
-    }
-
-    // Don't allow element properties to be redefined on objects with external
-    // array elements.
-    if (js_object->HasExternalArrayElements()) {
-      Handle<Object> args[2] = { js_object, name };
-      Handle<Object> error =
-          isolate->factory()->NewTypeError("redef_external_array_element",
-                                           HandleVector(args, 2));
-      return isolate->Throw(*error);
-    }
-
-    Handle<SeededNumberDictionary> dictionary = NormalizeElements(js_object);
-    // Make sure that we never go back to fast case.
-    dictionary->set_requires_slow_elements();
-    PropertyDetails details = PropertyDetails(attr, NORMAL);
-    Handle<SeededNumberDictionary> extended_dictionary =
-        SeededNumberDictionarySet(dictionary, index, obj_value, details);
-    if (*extended_dictionary != *dictionary) {
-      if (js_object->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
-        FixedArray::cast(js_object->elements())->set(1, *extended_dictionary);
-      } else {
-        js_object->set_elements(*extended_dictionary);
-      }
-    }
-    return *obj_value;
-  }
-
-  LookupResult result;
+  LookupResult result(isolate);
   js_object->LocalLookupRealNamedProperty(*name, &result);
 
-  // To be compatible with safari we do not change the value on API objects
-  // in defineProperty. Firefox disagrees here, and actually changes the value.
-  if (result.IsProperty() &&
-      (result.type() == CALLBACKS) &&
-      result.GetCallbackObject()->IsAccessorInfo()) {
-    return isolate->heap()->undefined_value();
+  // Special case for callback properties.
+  if (result.IsFound() && result.type() == CALLBACKS) {
+    Object* callback = result.GetCallbackObject();
+    // To be compatible with Safari we do not change the value on API objects
+    // in Object.defineProperty(). Firefox disagrees here, and actually changes
+    // the value.
+    if (callback->IsAccessorInfo()) {
+      return isolate->heap()->undefined_value();
+    }
+    // Avoid redefining foreign callback as data property, just use the stored
+    // setter to update the value instead.
+    // TODO(mstarzinger): So far this only works if property attributes don't
+    // change, this should be fixed once we cleanup the underlying code.
+    if (callback->IsForeign() && result.GetAttributes() == attr) {
+      return js_object->SetPropertyWithCallback(callback,
+                                                *name,
+                                                *obj_value,
+                                                result.holder(),
+                                                kStrictMode);
+    }
   }
 
   // Take special care when attributes are different and there is already
@@ -4383,7 +4411,7 @@
       // we don't have to check for null.
       js_object = Handle<JSObject>(JSObject::cast(js_object->GetPrototype()));
     }
-    NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
+    JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
     // Use IgnoreAttributes version since a readonly property may be
     // overridden and SetProperty does not allow this.
     return js_object->SetLocalPropertyIgnoreAttributes(*name,
@@ -4399,34 +4427,13 @@
 }
 
 
-// Special case for elements if any of the flags are true.
-// If elements are in fast case we always implicitly assume that:
-// DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
-static MaybeObject* NormalizeObjectSetElement(Isolate* isolate,
-                                              Handle<JSObject> js_object,
-                                              uint32_t index,
-                                              Handle<Object> value,
-                                              PropertyAttributes attr) {
-  // Normalize the elements to enable attributes on the property.
-  Handle<SeededNumberDictionary> dictionary = NormalizeElements(js_object);
-  // Make sure that we never go back to fast case.
-  dictionary->set_requires_slow_elements();
-  PropertyDetails details = PropertyDetails(attr, NORMAL);
-  Handle<SeededNumberDictionary> extended_dictionary =
-      SeededNumberDictionarySet(dictionary, index, value, details);
-  if (*extended_dictionary != *dictionary) {
-    js_object->set_elements(*extended_dictionary);
-  }
-  return *value;
-}
-
-
 MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
                                         Handle<Object> object,
                                         Handle<Object> key,
                                         Handle<Object> value,
                                         PropertyAttributes attr,
                                         StrictModeFlag strict_mode) {
+  SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
   HandleScope scope(isolate);
 
   if (object->IsUndefined() || object->IsNull()) {
@@ -4437,6 +4444,14 @@
     return isolate->Throw(*error);
   }
 
+  if (object->IsJSProxy()) {
+    bool has_pending_exception = false;
+    Handle<Object> name = Execution::ToString(key, &has_pending_exception);
+    if (has_pending_exception) return Failure::Exception();
+    return JSProxy::cast(*object)->SetProperty(
+        String::cast(*name), *value, attr, strict_mode);
+  }
+
   // If the object isn't a JavaScript object, we ignore the store.
   if (!object->IsJSObject()) return *value;
 
@@ -4456,11 +4471,8 @@
       return *value;
     }
 
-    if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
-      return NormalizeObjectSetElement(isolate, js_object, index, value, attr);
-    }
-
-    Handle<Object> result = SetElement(js_object, index, value, strict_mode);
+    Handle<Object> result = JSObject::SetElement(
+        js_object, index, value, attr, strict_mode, set_mode);
     if (result.is_null()) return Failure::Exception();
     return *value;
   }
@@ -4468,18 +4480,13 @@
   if (key->IsString()) {
     Handle<Object> result;
     if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
-      if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
-        return NormalizeObjectSetElement(isolate,
-                                         js_object,
-                                         index,
-                                         value,
-                                         attr);
-      }
-      result = SetElement(js_object, index, value, strict_mode);
+      result = JSObject::SetElement(
+          js_object, index, value, attr, strict_mode, set_mode);
     } else {
       Handle<String> key_string = Handle<String>::cast(key);
       key_string->TryFlatten();
-      result = SetProperty(js_object, key_string, value, attr, strict_mode);
+      result = JSReceiver::SetProperty(
+          js_object, key_string, value, attr, strict_mode);
     }
     if (result.is_null()) return Failure::Exception();
     return *value;
@@ -4492,7 +4499,8 @@
   Handle<String> name = Handle<String>::cast(converted);
 
   if (name->AsArrayIndex(&index)) {
-    return js_object->SetElement(index, *value, strict_mode, true);
+    return js_object->SetElement(
+        index, *value, attr, strict_mode, true, set_mode);
   } else {
     return js_object->SetProperty(*name, *value, attr, strict_mode);
   }
@@ -4520,12 +4528,14 @@
       return *value;
     }
 
-    return js_object->SetElement(index, *value, kNonStrictMode, true);
+    return js_object->SetElement(
+        index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
   }
 
   if (key->IsString()) {
     if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
-      return js_object->SetElement(index, *value, kNonStrictMode, true);
+      return js_object->SetElement(
+          index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
     } else {
       Handle<String> key_string = Handle<String>::cast(key);
       key_string->TryFlatten();
@@ -4542,7 +4552,8 @@
   Handle<String> name = Handle<String>::cast(converted);
 
   if (name->AsArrayIndex(&index)) {
-    return js_object->SetElement(index, *value, kNonStrictMode, true);
+    return js_object->SetElement(
+        index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
   } else {
     return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
   }
@@ -4556,7 +4567,7 @@
 
   // Check if the given key is an array index.
   uint32_t index;
-  if (receiver->IsJSObject() && key->ToArrayIndex(&index)) {
+  if (key->ToArrayIndex(&index)) {
     // In Firefox/SpiderMonkey, Safari and Opera you can access the
     // characters of a string using [] notation.  In the case of a
     // String object we just need to redirect the deletion to the
@@ -4567,8 +4578,7 @@
       return isolate->heap()->true_value();
     }
 
-    return JSObject::cast(*receiver)->DeleteElement(
-        index, JSReceiver::FORCE_DELETION);
+    return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION);
   }
 
   Handle<String> key_string;
@@ -4603,10 +4613,8 @@
 
   StrictModeFlag strict_mode = kNonStrictMode;
   if (args.length() == 5) {
-    CONVERT_SMI_ARG_CHECKED(strict_unchecked, 4);
-    RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
-                   strict_unchecked == kNonStrictMode);
-    strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
+    CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_flag, 4);
+    strict_mode = strict_mode_flag;
   }
 
   return Runtime::SetObjectProperty(isolate,
@@ -4618,6 +4626,22 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
+  NoHandleAllocation ha;
+  RUNTIME_ASSERT(args.length() == 1);
+  Handle<Object> object = args.at<Object>(0);
+  return TransitionElements(object, FAST_DOUBLE_ELEMENTS, isolate);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
+  NoHandleAllocation ha;
+  RUNTIME_ASSERT(args.length() == 1);
+  Handle<Object> object = args.at<Object>(0);
+  return TransitionElements(object, FAST_ELEMENTS, isolate);
+}
+
+
 // Set the native flag on the function.
 // This is used to decide if we should transform null and undefined
 // into the global object when doing call and apply.
@@ -4635,18 +4659,57 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
+  RUNTIME_ASSERT(args.length() == 5);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  CONVERT_SMI_ARG_CHECKED(store_index, 1);
+  Handle<Object> value = args.at<Object>(2);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3);
+  CONVERT_SMI_ARG_CHECKED(literal_index, 4);
+  HandleScope scope;
+
+  Object* raw_boilerplate_object = literals->get(literal_index);
+  Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
+#if DEBUG
+  ElementsKind elements_kind = object->GetElementsKind();
+#endif
+  ASSERT(elements_kind <= FAST_DOUBLE_ELEMENTS);
+  // Smis should never trigger transitions.
+  ASSERT(!value->IsSmi());
+
+  if (value->IsNumber()) {
+    ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
+    JSObject::TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+    JSObject::TransitionElementsKind(boilerplate_object, FAST_DOUBLE_ELEMENTS);
+    ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+    FixedDoubleArray* double_array =
+        FixedDoubleArray::cast(object->elements());
+    HeapNumber* number = HeapNumber::cast(*value);
+    double_array->set(store_index, number->Number());
+  } else {
+    ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           elements_kind == FAST_DOUBLE_ELEMENTS);
+    JSObject::TransitionElementsKind(object, FAST_ELEMENTS);
+    JSObject::TransitionElementsKind(boilerplate_object, FAST_ELEMENTS);
+    FixedArray* object_array =
+        FixedArray::cast(object->elements());
+    object_array->set(store_index, *value);
+  }
+  return *object;
+}
+
+
 // Set a local property, even if it is READ_ONLY.  If the property does not
 // exist, it will be added with attributes NONE.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
   NoHandleAllocation ha;
   RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
-  CONVERT_CHECKED(JSObject, object, args[0]);
-  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
   // Compute attributes.
   PropertyAttributes attributes = NONE;
   if (args.length() == 4) {
-    CONVERT_CHECKED(Smi, value_obj, args[3]);
-    int unchecked_value = value_obj->value();
+    CONVERT_SMI_ARG_CHECKED(unchecked_value, 3);
     // Only attribute bits should be set.
     RUNTIME_ASSERT(
         (unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
@@ -4662,10 +4725,10 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
-  CONVERT_CHECKED(JSReceiver, object, args[0]);
-  CONVERT_CHECKED(String, key, args[1]);
-  CONVERT_SMI_ARG_CHECKED(strict, 2);
-  return object->DeleteProperty(key, (strict == kStrictMode)
+  CONVERT_ARG_CHECKED(JSReceiver, object, 0);
+  CONVERT_ARG_CHECKED(String, key, 1);
+  CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
+  return object->DeleteProperty(key, (strict_mode == kStrictMode)
                                       ? JSReceiver::STRICT_DELETION
                                       : JSReceiver::NORMAL_DELETION);
 }
@@ -4692,7 +4755,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  CONVERT_CHECKED(String, key, args[1]);
+  CONVERT_ARG_CHECKED(String, key, 1);
 
   uint32_t index;
   const bool key_is_array_index = key->AsArrayIndex(&index);
@@ -4730,29 +4793,24 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
+  CONVERT_ARG_CHECKED(String, key, 1);
 
-  // Only JS receivers can have properties.
-  if (args[0]->IsJSReceiver()) {
-    JSReceiver* receiver = JSReceiver::cast(args[0]);
-    CONVERT_CHECKED(String, key, args[1]);
-    if (receiver->HasProperty(key)) return isolate->heap()->true_value();
-  }
-  return isolate->heap()->false_value();
+  bool result = receiver->HasProperty(key);
+  if (isolate->has_pending_exception()) return Failure::Exception();
+  return isolate->heap()->ToBoolean(result);
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
+  CONVERT_SMI_ARG_CHECKED(index, 1);
 
-  // Only JS objects can have elements.
-  if (args[0]->IsJSObject()) {
-    JSObject* object = JSObject::cast(args[0]);
-    CONVERT_CHECKED(Smi, index_obj, args[1]);
-    uint32_t index = index_obj->value();
-    if (object->HasElement(index)) return isolate->heap()->true_value();
-  }
-  return isolate->heap()->false_value();
+  bool result = receiver->HasElement(index);
+  if (isolate->has_pending_exception()) return Failure::Exception();
+  return isolate->heap()->ToBoolean(result);
 }
 
 
@@ -4760,12 +4818,42 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(JSObject, object, args[0]);
-  CONVERT_CHECKED(String, key, args[1]);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_CHECKED(String, key, 1);
 
   uint32_t index;
   if (key->AsArrayIndex(&index)) {
-    return isolate->heap()->ToBoolean(object->HasElement(index));
+    JSObject::LocalElementType type = object->HasLocalElement(index);
+    switch (type) {
+      case JSObject::UNDEFINED_ELEMENT:
+      case JSObject::STRING_CHARACTER_ELEMENT:
+        return isolate->heap()->false_value();
+      case JSObject::INTERCEPTED_ELEMENT:
+      case JSObject::FAST_ELEMENT:
+        return isolate->heap()->true_value();
+      case JSObject::DICTIONARY_ELEMENT: {
+        if (object->IsJSGlobalProxy()) {
+          Object* proto = object->GetPrototype();
+          if (proto->IsNull()) {
+            return isolate->heap()->false_value();
+          }
+          ASSERT(proto->IsJSGlobalObject());
+          object = JSObject::cast(proto);
+        }
+        FixedArray* elements = FixedArray::cast(object->elements());
+        SeededNumberDictionary* dictionary = NULL;
+        if (elements->map() ==
+            isolate->heap()->non_strict_arguments_elements_map()) {
+          dictionary = SeededNumberDictionary::cast(elements->get(1));
+        } else {
+          dictionary = SeededNumberDictionary::cast(elements);
+        }
+        int entry = dictionary->FindEntry(index);
+        ASSERT(entry != SeededNumberDictionary::kNotFound);
+        PropertyDetails details = dictionary->DetailsAt(entry);
+        return isolate->heap()->ToBoolean(!details.IsDontEnum());
+      }
+    }
   }
 
   PropertyAttributes att = object->GetLocalPropertyAttribute(key);
@@ -4776,8 +4864,11 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSObject, object, 0);
-  return *GetKeysFor(object);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+  bool threw = false;
+  Handle<JSArray> result = GetKeysFor(object, &threw);
+  if (threw) return Failure::Exception();
+  return *result;
 }
 
 
@@ -4789,14 +4880,16 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSObject, raw_object, args[0]);
+  CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0);
 
   if (raw_object->IsSimpleEnum()) return raw_object->map();
 
   HandleScope scope(isolate);
-  Handle<JSObject> object(raw_object);
-  Handle<FixedArray> content = GetKeysInFixedArrayFor(object,
-                                                      INCLUDE_PROTOS);
+  Handle<JSReceiver> object(raw_object);
+  bool threw = false;
+  Handle<FixedArray> content =
+      GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, &threw);
+  if (threw) return Failure::Exception();
 
   // Test again, since cache may have been built by preceding call.
   if (object->IsSimpleEnum()) return object->map();
@@ -4828,7 +4921,7 @@
   if (!args[0]->IsJSObject()) {
     return isolate->heap()->undefined_value();
   }
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
 
   // Skip the global proxy as it has no properties and always delegates to the
   // real global object.
@@ -4861,7 +4954,7 @@
       return *isolate->factory()->NewJSArray(0);
     }
     int n;
-    n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
+    n = jsproto->NumberOfLocalProperties();
     local_property_count[i] = n;
     total_property_count += n;
     if (i < length - 1) {
@@ -4915,7 +5008,7 @@
   if (!args[0]->IsJSObject()) {
     return isolate->heap()->undefined_value();
   }
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
 
   int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
   Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
@@ -4932,7 +5025,7 @@
   if (!args[0]->IsJSObject()) {
     return Smi::FromInt(0);
   }
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
 
   int result = 0;
   if (obj->HasNamedInterceptor()) result |= 2;
@@ -4947,7 +5040,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
 
   if (obj->HasNamedInterceptor()) {
     v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
@@ -4962,7 +5055,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
 
   if (obj->HasIndexedInterceptor()) {
     v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
@@ -4974,7 +5067,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
   ASSERT_EQ(args.length(), 1);
-  CONVERT_CHECKED(JSObject, raw_object, args[0]);
+  CONVERT_ARG_CHECKED(JSObject, raw_object, 0);
   HandleScope scope(isolate);
   Handle<JSObject> object(raw_object);
 
@@ -4993,8 +5086,11 @@
     object = Handle<JSObject>::cast(proto);
   }
 
-  Handle<FixedArray> contents = GetKeysInFixedArrayFor(object,
-                                                       LOCAL_ONLY);
+  bool threw = false;
+  Handle<FixedArray> contents =
+      GetKeysInFixedArrayFor(object, LOCAL_ONLY, &threw);
+  if (threw) return Failure::Exception();
+
   // Some fast paths through GetKeysInFixedArrayFor reuse a cached
   // property array and since the result is mutable we have to create
   // a fresh clone on each invocation.
@@ -5058,7 +5154,7 @@
   if (key->Equals(isolate->heap()->callee_symbol())) {
     Object* function = frame->function();
     if (function->IsJSFunction() &&
-        JSFunction::cast(function)->shared()->strict_mode()) {
+        !JSFunction::cast(function)->shared()->is_classic_mode()) {
       return isolate->Throw(*isolate->factory()->NewTypeError(
           "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
     }
@@ -5071,31 +5167,20 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
-  HandleScope scope(isolate);
-
   ASSERT(args.length() == 1);
-  Handle<Object> object = args.at<Object>(0);
-  if (object->IsJSObject()) {
-    Handle<JSObject> js_object = Handle<JSObject>::cast(object);
-    if (!js_object->HasFastProperties() && !js_object->IsGlobalObject()) {
-      MaybeObject* ok = js_object->TransformToFastProperties(0);
-      if (ok->IsRetryAfterGC()) return ok;
-    }
-  }
-  return *object;
+  Object* object = args[0];
+  return (object->IsJSObject() && !object->IsGlobalObject())
+      ? JSObject::cast(object)->TransformToFastProperties(0)
+      : object;
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_ToSlowProperties) {
-  HandleScope scope(isolate);
-
   ASSERT(args.length() == 1);
-  Handle<Object> object = args.at<Object>(0);
-  if (object->IsJSObject() && !object->IsJSGlobalProxy()) {
-    Handle<JSObject> js_object = Handle<JSObject>::cast(object);
-    NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
-  }
-  return *object;
+  Object* obj = args[0];
+  return (obj->IsJSObject() && !obj->IsJSGlobalProxy())
+      ? JSObject::cast(obj)->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0)
+      : obj;
 }
 
 
@@ -5174,7 +5259,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(String, subject, args[0]);
+  CONVERT_ARG_CHECKED(String, subject, 0);
   subject->TryFlatten();
 
   // Fast case: short integer or some sorts of junk values.
@@ -5230,7 +5315,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSArray, codes, args[0]);
+  CONVERT_ARG_CHECKED(JSArray, codes, 0);
   int length = Smi::cast(codes->length())->value();
 
   // Check if the string can be ASCII.
@@ -5310,7 +5395,7 @@
   const char hex_chars[] = "0123456789ABCDEF";
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(String, source, args[0]);
+  CONVERT_ARG_CHECKED(String, source, 0);
 
   source->TryFlatten();
 
@@ -5428,7 +5513,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(String, source, args[0]);
+  CONVERT_ARG_CHECKED(String, source, 0);
 
   source->TryFlatten();
 
@@ -5579,7 +5664,7 @@
   StringType* new_string = StringType::cast(new_object);
 
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqAsciiString::kHeaderSize);
+      new_string->address() + SeqString::kHeaderSize);
   if (comma) *(write_cursor++) = ',';
   *(write_cursor++) = '"';
 
@@ -5667,16 +5752,15 @@
   StringType* new_string = StringType::cast(new_object);
   ASSERT(isolate->heap()->new_space()->Contains(new_string));
 
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqAsciiString::kHeaderSize);
+      new_string->address() + SeqString::kHeaderSize);
   if (comma) *(write_cursor++) = ',';
   write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
                                                   write_cursor,
                                                   characters);
   int final_length = static_cast<int>(
       write_cursor - reinterpret_cast<Char*>(
-          new_string->address() + SeqAsciiString::kHeaderSize));
+          new_string->address() + SeqString::kHeaderSize));
   isolate->heap()->new_space()->
       template ShrinkStringAtAllocationBoundary<StringType>(
           new_string, final_length);
@@ -5686,7 +5770,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
   NoHandleAllocation ha;
-  CONVERT_CHECKED(String, str, args[0]);
+  CONVERT_ARG_CHECKED(String, str, 0);
   if (!str->IsFlat()) {
     MaybeObject* try_flatten = str->TryFlatten();
     Object* flat;
@@ -5710,7 +5794,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
   NoHandleAllocation ha;
-  CONVERT_CHECKED(String, str, args[0]);
+  CONVERT_ARG_CHECKED(String, str, 0);
   if (!str->IsFlat()) {
     MaybeObject* try_flatten = str->TryFlatten();
     Object* flat;
@@ -5754,9 +5838,8 @@
   StringType* new_string = StringType::cast(new_object);
   ASSERT(isolate->heap()->new_space()->Contains(new_string));
 
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqAsciiString::kHeaderSize);
+      new_string->address() + SeqString::kHeaderSize);
   *(write_cursor++) = '[';
   for (int i = 0; i < length; i++) {
     if (i != 0) *(write_cursor++) = ',';
@@ -5777,7 +5860,7 @@
 
   int final_length = static_cast<int>(
       write_cursor - reinterpret_cast<Char*>(
-          new_string->address() + SeqAsciiString::kHeaderSize));
+          new_string->address() + SeqString::kHeaderSize));
   isolate->heap()->new_space()->
       template ShrinkStringAtAllocationBoundary<StringType>(
           new_string, final_length);
@@ -5788,7 +5871,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSArray, array, args[0]);
+  CONVERT_ARG_CHECKED(JSArray, array, 0);
 
   if (!array->HasFastElements()) return isolate->heap()->undefined_value();
   FixedArray* elements = FixedArray::cast(array->elements());
@@ -5830,7 +5913,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
   NoHandleAllocation ha;
 
-  CONVERT_CHECKED(String, s, args[0]);
+  CONVERT_ARG_CHECKED(String, s, 0);
   CONVERT_SMI_ARG_CHECKED(radix, 1);
 
   s->TryFlatten();
@@ -5843,7 +5926,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
   NoHandleAllocation ha;
-  CONVERT_CHECKED(String, str, args[0]);
+  CONVERT_ARG_CHECKED(String, str, 0);
 
   // ECMA-262 section 15.1.2.3, empty string is NaN
   double value = StringToDouble(isolate->unicode_cache(),
@@ -5868,8 +5951,8 @@
   //
   // Allocate the resulting string.
   //
-  // NOTE: This assumes that the upper/lower case of an ascii
-  // character is also ascii.  This is currently the case, but it
+  // NOTE: This assumes that the upper/lower case of an ASCII
+  // character is also ASCII.  This is currently the case, but it
   // might break in the future if we implement more context and locale
   // dependent upper/lower conversions.
   Object* o;
@@ -5969,9 +6052,9 @@
 // This function is only useful when it can be inlined and the
 // boundaries are statically known.
 // Requires: all bytes in the input word and the boundaries must be
-// ascii (less than 0x7F).
+// ASCII (less than 0x7F).
 static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
-  // Every byte in an ascii string is less than or equal to 0x7F.
+  // Every byte in an ASCII string is less than or equal to 0x7F.
   ASSERT((w & (kOneInEveryByte * 0x7F)) == w);
   // Use strict inequalities since in edge cases the function could be
   // further simplified.
@@ -6092,17 +6175,17 @@
     Isolate* isolate,
     unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
   NoHandleAllocation ha;
-  CONVERT_CHECKED(String, s, args[0]);
+  CONVERT_ARG_CHECKED(String, s, 0);
   s = s->TryFlattenGetString();
 
   const int length = s->length();
   // Assume that the string is not empty; we need this assumption later
   if (length == 0) return s;
 
-  // Simpler handling of ascii strings.
+  // Simpler handling of ASCII strings.
   //
-  // NOTE: This assumes that the upper/lower case of an ascii
-  // character is also ascii.  This is currently the case, but it
+  // NOTE: This assumes that the upper/lower case of an ASCII
+  // character is also ASCII.  This is currently the case, but it
   // might break in the future if we implement more context and locale
   // dependent upper/lower conversions.
   if (s->IsSeqAsciiString()) {
@@ -6146,7 +6229,7 @@
 
 
 static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
-  return unibrow::WhiteSpace::Is(c) || c == 0x200b;
+  return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
 }
 
 
@@ -6154,9 +6237,9 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
-  CONVERT_CHECKED(String, s, args[0]);
-  CONVERT_BOOLEAN_CHECKED(trimLeft, args[1]);
-  CONVERT_BOOLEAN_CHECKED(trimRight, args[2]);
+  CONVERT_ARG_CHECKED(String, s, 0);
+  CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
+  CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2);
 
   s->TryFlatten();
   int length = s->length();
@@ -6181,8 +6264,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
   ASSERT(args.length() == 3);
   HandleScope handle_scope(isolate);
-  CONVERT_ARG_CHECKED(String, subject, 0);
-  CONVERT_ARG_CHECKED(String, pattern, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
 
   int subject_length = subject->length();
@@ -6229,6 +6312,8 @@
   int part_count = indices.length();
 
   Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
+  MaybeObject* maybe_result = result->EnsureCanContainHeapObjectElements();
+  if (maybe_result->IsFailure()) return maybe_result;
   result->set_length(Smi::FromInt(part_count));
 
   ASSERT(result->HasFastElements());
@@ -6263,7 +6348,7 @@
 }
 
 
-// Copies ascii characters to the given fixed array looking up
+// Copies ASCII characters to the given fixed array looking up
 // one-char strings in the cache. Gives up on the first char that is
 // not in the cache and fills the remainder with smi zeros. Returns
 // the length of the successfully copied prefix.
@@ -6275,11 +6360,11 @@
   FixedArray* ascii_cache = heap->single_character_string_cache();
   Object* undefined = heap->undefined_value();
   int i;
+  WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
   for (i = 0; i < length; ++i) {
     Object* value = ascii_cache->get(chars[i]);
     if (value == undefined) break;
-    ASSERT(!heap->InNewSpace(value));
-    elements->set(i, value, SKIP_WRITE_BARRIER);
+    elements->set(i, value, mode);
   }
   if (i < length) {
     ASSERT(Smi::FromInt(0) == 0);
@@ -6301,7 +6386,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(String, s, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
 
   s = FlattenGetString(s);
@@ -6352,7 +6437,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(String, value, args[0]);
+  CONVERT_ARG_CHECKED(String, value, 0);
   return value->ToObject();
 }
 
@@ -6543,8 +6628,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  CONVERT_CHECKED(String, str1, args[0]);
-  CONVERT_CHECKED(String, str2, args[1]);
+  CONVERT_ARG_CHECKED(String, str1, 0);
+  CONVERT_ARG_CHECKED(String, str2, 1);
   isolate->counters()->string_add_runtime()->Increment();
   return isolate->heap()->AllocateConsString(str1, str2);
 }
@@ -6592,17 +6677,20 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
-  CONVERT_CHECKED(JSArray, array, args[0]);
+  CONVERT_ARG_CHECKED(JSArray, array, 0);
   if (!args[1]->IsSmi()) {
     isolate->context()->mark_out_of_memory();
     return Failure::OutOfMemoryException();
   }
   int array_length = args.smi_at(1);
-  CONVERT_CHECKED(String, special, args[2]);
+  CONVERT_ARG_CHECKED(String, special, 2);
 
   // This assumption is used by the slice encoding in one or two smis.
   ASSERT(Smi::kMaxValue >= String::kMaxLength);
 
+  MaybeObject* maybe_result = array->EnsureCanContainHeapObjectElements();
+  if (maybe_result->IsFailure()) return maybe_result;
+
   int special_length = special->length();
   if (!array->HasFastElements()) {
     return isolate->Throw(isolate->heap()->illegal_argument_symbol());
@@ -6664,6 +6752,7 @@
         ascii = false;
       }
     } else {
+      ASSERT(!elt->IsTheHole());
       return isolate->Throw(isolate->heap()->illegal_argument_symbol());
     }
     if (increment > String::kMaxLength - position) {
@@ -6705,13 +6794,13 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
-  CONVERT_CHECKED(JSArray, array, args[0]);
+  CONVERT_ARG_CHECKED(JSArray, array, 0);
   if (!args[1]->IsSmi()) {
     isolate->context()->mark_out_of_memory();
     return Failure::OutOfMemoryException();
   }
   int array_length = args.smi_at(1);
-  CONVERT_CHECKED(String, separator, args[2]);
+  CONVERT_ARG_CHECKED(String, separator, 2);
 
   if (!array->HasFastElements()) {
     return isolate->Throw(isolate->heap()->illegal_argument_symbol());
@@ -6829,10 +6918,11 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
-  CONVERT_CHECKED(JSArray, elements_array, args[0]);
-  RUNTIME_ASSERT(elements_array->HasFastElements());
+  CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
+  RUNTIME_ASSERT(elements_array->HasFastElements() ||
+                 elements_array->HasFastSmiOnlyElements());
   CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
-  CONVERT_CHECKED(String, separator, args[2]);
+  CONVERT_ARG_CHECKED(String, separator, 2);
   // elements_array is fast-mode JSarray of alternating positions
   // (increasing order) and strings.
   // array_length is length of original array (used to add separators);
@@ -6854,7 +6944,8 @@
   FixedArray* elements = FixedArray::cast(elements_array->elements());
   for (int i = 0; i < elements_length; i += 2) {
     RUNTIME_ASSERT(elements->get(i)->IsNumber());
-    CONVERT_CHECKED(String, string, elements->get(i + 1));
+    RUNTIME_ASSERT(elements->get(i + 1)->IsString());
+    String* string = String::cast(elements->get(i + 1));
     int length = string->length();
     if (is_ascii && !string->IsAsciiRepresentation()) {
       is_ascii = false;
@@ -7012,8 +7103,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(String, x, args[0]);
-  CONVERT_CHECKED(String, y, args[1]);
+  CONVERT_ARG_CHECKED(String, x, 0);
+  CONVERT_ARG_CHECKED(String, y, 1);
 
   bool not_equal = !x->Equals(y);
   // This is slightly convoluted because the value that signifies
@@ -7044,12 +7135,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-
-  // Extract the integer values from the Smis.
-  CONVERT_CHECKED(Smi, x, args[0]);
-  CONVERT_CHECKED(Smi, y, args[1]);
-  int x_value = x->value();
-  int y_value = y->value();
+  CONVERT_SMI_ARG_CHECKED(x_value, 0);
+  CONVERT_SMI_ARG_CHECKED(y_value, 1);
 
   // If the integers are equal so are the string representations.
   if (x_value == y_value) return Smi::FromInt(EQUAL);
@@ -7189,8 +7276,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(String, x, args[0]);
-  CONVERT_CHECKED(String, y, args[1]);
+  CONVERT_ARG_CHECKED(String, x, 0);
+  CONVERT_ARG_CHECKED(String, y, 1);
 
   isolate->counters()->string_compare_runtime()->Increment();
 
@@ -7325,7 +7412,8 @@
   return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
 }
 
-
+// Slow version of Math.pow.  We check for fast paths for special cases.
+// Used if SSE2/VFP3 is not available.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -7341,22 +7429,38 @@
   }
 
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
-  return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
+  int y_int = static_cast<int>(y);
+  double result;
+  if (y == y_int) {
+    result = power_double_int(x, y_int);  // Returns 1 if exponent is 0.
+  } else  if (y == 0.5) {
+    result = (isinf(x)) ? V8_INFINITY
+                        : fast_sqrt(x + 0.0);  // Convert -0 to +0.
+  } else if (y == -0.5) {
+    result = (isinf(x)) ? 0
+                        : 1.0 / fast_sqrt(x + 0.0);  // Convert -0 to +0.
+  } else {
+    result = power_double_double(x, y);
+  }
+  if (isnan(result)) return isolate->heap()->nan_value();
+  return isolate->heap()->AllocateHeapNumber(result);
 }
 
-// Fast version of Math.pow if we know that y is not an integer and
-// y is not -0.5 or 0.5. Used as slowcase from codegen.
+// Fast version of Math.pow if we know that y is not an integer and y is not
+// -0.5 or 0.5.  Used as slow case from full codegen.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
+  isolate->counters()->math_pow()->Increment();
+
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   if (y == 0) {
     return Smi::FromInt(1);
-  } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
-    return isolate->heap()->nan_value();
   } else {
-    return isolate->heap()->AllocateHeapNumber(pow(x, y));
+    double result = power_double_double(x, y);
+    if (isnan(result)) return isolate->heap()->nan_value();
+    return isolate->heap()->AllocateHeapNumber(result);
   }
 }
 
@@ -7386,7 +7490,7 @@
 
   // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
   // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
-  // agument holds for 32-bit smis).
+  // argument holds for 32-bit smis).
   if (!sign && exponent < kSmiValueSize - 2) {
     return Smi::FromInt(static_cast<int>(value + 0.5));
   }
@@ -7420,7 +7524,7 @@
   isolate->counters()->math_sqrt()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return isolate->heap()->AllocateHeapNumber(sqrt(x));
+  return isolate->heap()->AllocateHeapNumber(fast_sqrt(x));
 }
 
 
@@ -7434,370 +7538,51 @@
 }
 
 
-static int MakeDay(int year, int month, int day) {
-  static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
-                                       181, 212, 243, 273, 304, 334};
-  static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
-                                            182, 213, 244, 274, 305, 335};
-
-  year += month / 12;
-  month %= 12;
-  if (month < 0) {
-    year--;
-    month += 12;
-  }
-
-  ASSERT(month >= 0);
-  ASSERT(month < 12);
-
-  // year_delta is an arbitrary number such that:
-  // a) year_delta = -1 (mod 400)
-  // b) year + year_delta > 0 for years in the range defined by
-  //    ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
-  //    Jan 1 1970. This is required so that we don't run into integer
-  //    division of negative numbers.
-  // c) there shouldn't be an overflow for 32-bit integers in the following
-  //    operations.
-  static const int year_delta = 399999;
-  static const int base_day = 365 * (1970 + year_delta) +
-                              (1970 + year_delta) / 4 -
-                              (1970 + year_delta) / 100 +
-                              (1970 + year_delta) / 400;
-
-  int year1 = year + year_delta;
-  int day_from_year = 365 * year1 +
-                      year1 / 4 -
-                      year1 / 100 +
-                      year1 / 400 -
-                      base_day;
-
-  if (year % 4 || (year % 100 == 0 && year % 400 != 0)) {
-    return day_from_year + day_from_month[month] + day - 1;
-  }
-
-  return day_from_year + day_from_month_leap[month] + day - 1;
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
   NoHandleAllocation ha;
-  ASSERT(args.length() == 3);
-
-  CONVERT_SMI_ARG_CHECKED(year, 0);
-  CONVERT_SMI_ARG_CHECKED(month, 1);
-  CONVERT_SMI_ARG_CHECKED(date, 2);
-
-  return Smi::FromInt(MakeDay(year, month, date));
-}
-
-
-static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
-static const int kDaysIn4Years = 4 * 365 + 1;
-static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
-static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
-static const int kDays1970to2000 = 30 * 365 + 7;
-static const int kDaysOffset = 1000 * kDaysIn400Years + 5 * kDaysIn400Years -
-                               kDays1970to2000;
-static const int kYearsOffset = 400000;
-
-static const char kDayInYear[] = {
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30,
-      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
-      22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
-
-static const char kMonthInYear[] = {
-      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-      0, 0, 0, 0, 0, 0,
-      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-      1, 1, 1,
-      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-      2, 2, 2, 2, 2, 2,
-      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
-      3, 3, 3, 3, 3,
-      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
-      4, 4, 4, 4, 4, 4,
-      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-      5, 5, 5, 5, 5,
-      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
-      6, 6, 6, 6, 6, 6,
-      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-      7, 7, 7, 7, 7, 7,
-      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-      8, 8, 8, 8, 8,
-      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-      9, 9, 9, 9, 9, 9,
-      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-
-      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-      0, 0, 0, 0, 0, 0,
-      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-      1, 1, 1,
-      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-      2, 2, 2, 2, 2, 2,
-      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
-      3, 3, 3, 3, 3,
-      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
-      4, 4, 4, 4, 4, 4,
-      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-      5, 5, 5, 5, 5,
-      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
-      6, 6, 6, 6, 6, 6,
-      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-      7, 7, 7, 7, 7, 7,
-      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-      8, 8, 8, 8, 8,
-      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-      9, 9, 9, 9, 9, 9,
-      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-
-      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-      0, 0, 0, 0, 0, 0,
-      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-      1, 1, 1, 1,
-      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-      2, 2, 2, 2, 2, 2,
-      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
-      3, 3, 3, 3, 3,
-      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
-      4, 4, 4, 4, 4, 4,
-      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-      5, 5, 5, 5, 5,
-      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
-      6, 6, 6, 6, 6, 6,
-      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-      7, 7, 7, 7, 7, 7,
-      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-      8, 8, 8, 8, 8,
-      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-      9, 9, 9, 9, 9, 9,
-      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-
-      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-      0, 0, 0, 0, 0, 0,
-      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-      1, 1, 1,
-      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-      2, 2, 2, 2, 2, 2,
-      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
-      3, 3, 3, 3, 3,
-      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
-      4, 4, 4, 4, 4, 4,
-      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-      5, 5, 5, 5, 5,
-      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
-      6, 6, 6, 6, 6, 6,
-      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-      7, 7, 7, 7, 7, 7,
-      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-      8, 8, 8, 8, 8,
-      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-      9, 9, 9, 9, 9, 9,
-      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
-      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
-      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11};
-
-
-// This function works for dates from 1970 to 2099.
-static inline void DateYMDFromTimeAfter1970(int date,
-                                            int& year, int& month, int& day) {
-#ifdef DEBUG
-  int save_date = date;  // Need this for ASSERT in the end.
-#endif
-
-  year = 1970 + (4 * date + 2) / kDaysIn4Years;
-  date %= kDaysIn4Years;
-
-  month = kMonthInYear[date];
-  day = kDayInYear[date];
-
-  ASSERT(MakeDay(year, month, day) == save_date);
-}
-
-
-static inline void DateYMDFromTimeSlow(int date,
-                                       int& year, int& month, int& day) {
-#ifdef DEBUG
-  int save_date = date;  // Need this for ASSERT in the end.
-#endif
-
-  date += kDaysOffset;
-  year = 400 * (date / kDaysIn400Years) - kYearsOffset;
-  date %= kDaysIn400Years;
-
-  ASSERT(MakeDay(year, 0, 1) + date == save_date);
-
-  date--;
-  int yd1 = date / kDaysIn100Years;
-  date %= kDaysIn100Years;
-  year += 100 * yd1;
-
-  date++;
-  int yd2 = date / kDaysIn4Years;
-  date %= kDaysIn4Years;
-  year += 4 * yd2;
-
-  date--;
-  int yd3 = date / 365;
-  date %= 365;
-  year += yd3;
-
-  bool is_leap = (!yd1 || yd2) && !yd3;
-
-  ASSERT(date >= -1);
-  ASSERT(is_leap || (date >= 0));
-  ASSERT((date < 365) || (is_leap && (date < 366)));
-  ASSERT(is_leap == ((year % 4 == 0) && (year % 100 || (year % 400 == 0))));
-  ASSERT(is_leap || ((MakeDay(year, 0, 1) + date) == save_date));
-  ASSERT(!is_leap || ((MakeDay(year, 0, 1) + date + 1) == save_date));
-
-  if (is_leap) {
-    day = kDayInYear[2*365 + 1 + date];
-    month = kMonthInYear[2*365 + 1 + date];
-  } else {
-    day = kDayInYear[date];
-    month = kMonthInYear[date];
-  }
-
-  ASSERT(MakeDay(year, month, day) == save_date);
-}
-
-
-static inline void DateYMDFromTime(int date,
-                                   int& year, int& month, int& day) {
-  if (date >= 0 && date < 32 * kDaysIn4Years) {
-    DateYMDFromTimeAfter1970(date, year, month, day);
-  } else {
-    DateYMDFromTimeSlow(date, year, month, day);
-  }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateYMDFromTime) {
-  NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_DOUBLE_ARG_CHECKED(t, 0);
-  CONVERT_CHECKED(JSArray, res_array, args[1]);
+  CONVERT_SMI_ARG_CHECKED(year, 0);
+  CONVERT_SMI_ARG_CHECKED(month, 1);
 
-  int year, month, day;
-  DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day);
+  return Smi::FromInt(isolate->date_cache()->DaysFromYearMonth(year, month));
+}
 
-  RUNTIME_ASSERT(res_array->elements()->map() ==
-                 isolate->heap()->fixed_array_map());
-  FixedArray* elms = FixedArray::cast(res_array->elements());
-  RUNTIME_ASSERT(elms->length() == 3);
 
-  elms->set(0, Smi::FromInt(year));
-  elms->set(1, Smi::FromInt(month));
-  elms->set(2, Smi::FromInt(day));
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateSetValue) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 3);
 
-  return isolate->heap()->undefined_value();
+  CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 0);
+  CONVERT_DOUBLE_ARG_CHECKED(time, 1);
+  CONVERT_SMI_ARG_CHECKED(is_utc, 2);
+
+  DateCache* date_cache = isolate->date_cache();
+
+  Object* value = NULL;
+  bool is_value_nan = false;
+  if (isnan(time)) {
+    value = isolate->heap()->nan_value();
+    is_value_nan = true;
+  } else if (!is_utc &&
+             (time < -DateCache::kMaxTimeBeforeUTCInMs ||
+              time > DateCache::kMaxTimeBeforeUTCInMs)) {
+    value = isolate->heap()->nan_value();
+    is_value_nan = true;
+  } else {
+    time = is_utc ? time : date_cache->ToUTC(static_cast<int64_t>(time));
+    if (time < -DateCache::kMaxTimeInMs ||
+        time > DateCache::kMaxTimeInMs) {
+      value = isolate->heap()->nan_value();
+      is_value_nan = true;
+    } else  {
+      MaybeObject* maybe_result =
+          isolate->heap()->AllocateHeapNumber(DoubleToInteger(time));
+      if (!maybe_result->ToObject(&value)) return maybe_result;
+    }
+  }
+  date->SetValue(value, is_value_nan);
+  return *date;
 }
 
 
@@ -7846,14 +7631,14 @@
         --index;
       }
 
-      ScopeInfo<> scope_info(callee->shared()->scope_info());
+      Handle<ScopeInfo> scope_info(callee->shared()->scope_info());
       while (index >= 0) {
         // Detect duplicate names to the right in the parameter list.
-        Handle<String> name = scope_info.parameter_name(index);
-        int context_slot_count = scope_info.number_of_context_slots();
+        Handle<String> name(scope_info->ParameterName(index));
+        int context_local_count = scope_info->ContextLocalCount();
         bool duplicate = false;
         for (int j = index + 1; j < parameter_count; ++j) {
-          if (scope_info.parameter_name(j).is_identical_to(name)) {
+          if (scope_info->ParameterName(j) == *name) {
             duplicate = true;
             break;
           }
@@ -7868,17 +7653,16 @@
           // The context index goes in the parameter map with a hole in the
           // arguments array.
           int context_index = -1;
-          for (int j = Context::MIN_CONTEXT_SLOTS;
-               j < context_slot_count;
-               ++j) {
-            if (scope_info.context_slot_name(j).is_identical_to(name)) {
+          for (int j = 0; j < context_local_count; ++j) {
+            if (scope_info->ContextLocalName(j) == *name) {
               context_index = j;
               break;
             }
           }
           ASSERT(context_index >= 0);
           arguments->set_the_hole(index);
-          parameter_map->set(index + 2, Smi::FromInt(context_index));
+          parameter_map->set(index + 2, Smi::FromInt(
+              Context::MIN_CONTEXT_SLOTS + context_index));
         }
 
         --index;
@@ -7921,7 +7705,7 @@
 
     AssertNoAllocation no_gc;
     FixedArray* array = reinterpret_cast<FixedArray*>(obj);
-    array->set_map(isolate->heap()->fixed_array_map());
+    array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
     array->set_length(length);
 
     WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@@ -7937,9 +7721,9 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(Context, context, 0);
-  CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1);
-  CONVERT_BOOLEAN_CHECKED(pretenure, args[2]);
+  CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
+  CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 1);
+  CONVERT_BOOLEAN_ARG_CHECKED(pretenure, 2);
 
   // The caller ensures that we pretenure closures that are assigned
   // directly to properties.
@@ -7952,76 +7736,170 @@
 }
 
 
-static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc,
-                                                        int* total_argc) {
+// Find the arguments of the JavaScript function invocation that called
+// into C++ code. Collect these in a newly allocated array of handles (possibly
+// prefixed by a number of empty handles).
+static SmartArrayPointer<Handle<Object> > GetCallerArguments(
+    int prefix_argc,
+    int* total_argc) {
   // Find frame containing arguments passed to the caller.
   JavaScriptFrameIterator it;
   JavaScriptFrame* frame = it.frame();
   List<JSFunction*> functions(2);
   frame->GetFunctions(&functions);
   if (functions.length() > 1) {
-    int inlined_frame_index = functions.length() - 1;
-    JSFunction* inlined_function = functions[inlined_frame_index];
-    int args_count = inlined_function->shared()->formal_parameter_count();
-    ScopedVector<SlotRef> args_slots(args_count);
-    SlotRef::ComputeSlotMappingForArguments(frame,
-                                            inlined_frame_index,
-                                            &args_slots);
+    int inlined_jsframe_index = functions.length() - 1;
+    JSFunction* inlined_function = functions[inlined_jsframe_index];
+    Vector<SlotRef> args_slots =
+        SlotRef::ComputeSlotMappingForArguments(
+            frame,
+            inlined_jsframe_index,
+            inlined_function->shared()->formal_parameter_count());
 
-    *total_argc = bound_argc + args_count;
-    SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+    int args_count = args_slots.length();
+
+    *total_argc = prefix_argc + args_count;
+    SmartArrayPointer<Handle<Object> > param_data(
+        NewArray<Handle<Object> >(*total_argc));
     for (int i = 0; i < args_count; i++) {
       Handle<Object> val = args_slots[i].GetValue();
-      param_data[bound_argc + i] = val.location();
+      param_data[prefix_argc + i] = val;
     }
+
+    args_slots.Dispose();
+
     return param_data;
   } else {
     it.AdvanceToArgumentsFrame();
     frame = it.frame();
     int args_count = frame->ComputeParametersCount();
 
-    *total_argc = bound_argc + args_count;
-    SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+    *total_argc = prefix_argc + args_count;
+    SmartArrayPointer<Handle<Object> > param_data(
+        NewArray<Handle<Object> >(*total_argc));
     for (int i = 0; i < args_count; i++) {
       Handle<Object> val = Handle<Object>(frame->GetParameter(i));
-      param_data[bound_argc + i] = val.location();
+      param_data[prefix_argc + i] = val;
     }
     return param_data;
   }
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 4);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, bound_function, 0);
+  RUNTIME_ASSERT(args[3]->IsNumber());
+  Handle<Object> bindee = args.at<Object>(1);
+
+  // TODO(lrn): Create bound function in C++ code from premade shared info.
+  bound_function->shared()->set_bound(true);
+  // Get all arguments of calling function (Function.prototype.bind).
+  int argc = 0;
+  SmartArrayPointer<Handle<Object> > arguments = GetCallerArguments(0, &argc);
+  // Don't count the this-arg.
+  if (argc > 0) {
+    ASSERT(*arguments[0] == args[2]);
+    argc--;
+  } else {
+    ASSERT(args[2]->IsUndefined());
+  }
+  // Initialize array of bindings (function, this, and any existing arguments
+  // if the function was already bound).
+  Handle<FixedArray> new_bindings;
+  int i;
+  if (bindee->IsJSFunction() && JSFunction::cast(*bindee)->shared()->bound()) {
+    Handle<FixedArray> old_bindings(
+        JSFunction::cast(*bindee)->function_bindings());
+    new_bindings =
+        isolate->factory()->NewFixedArray(old_bindings->length() + argc);
+    bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex));
+    i = 0;
+    for (int n = old_bindings->length(); i < n; i++) {
+      new_bindings->set(i, old_bindings->get(i));
+    }
+  } else {
+    int array_size = JSFunction::kBoundArgumentsStartIndex + argc;
+    new_bindings = isolate->factory()->NewFixedArray(array_size);
+    new_bindings->set(JSFunction::kBoundFunctionIndex, *bindee);
+    new_bindings->set(JSFunction::kBoundThisIndex, args[2]);
+    i = 2;
+  }
+  // Copy arguments, skipping the first which is "this_arg".
+  for (int j = 0; j < argc; j++, i++) {
+    new_bindings->set(i, *arguments[j + 1]);
+  }
+  new_bindings->set_map_no_write_barrier(
+      isolate->heap()->fixed_cow_array_map());
+  bound_function->set_function_bindings(*new_bindings);
+
+  // Update length.
+  Handle<String> length_symbol = isolate->factory()->length_symbol();
+  Handle<Object> new_length(args.at<Object>(3));
+  PropertyAttributes attr =
+      static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
+  ForceSetProperty(bound_function, length_symbol, new_length, attr);
+  return *bound_function;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionGetBindings) {
+  HandleScope handles(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callable, 0);
+  if (callable->IsJSFunction()) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
+    if (function->shared()->bound()) {
+      Handle<FixedArray> bindings(function->function_bindings());
+      ASSERT(bindings->map() == isolate->heap()->fixed_cow_array_map());
+      return *isolate->factory()->NewJSArrayWithElements(bindings);
+    }
+  }
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  ASSERT(args.length() == 1);
   // First argument is a function to use as a constructor.
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  RUNTIME_ASSERT(function->shared()->bound());
 
-  // Second argument is either null or an array of bound arguments.
-  Handle<FixedArray> bound_args;
-  int bound_argc = 0;
-  if (!args[1]->IsNull()) {
-    CONVERT_ARG_CHECKED(JSArray, params, 1);
-    RUNTIME_ASSERT(params->HasFastElements());
-    bound_args = Handle<FixedArray>(FixedArray::cast(params->elements()));
-    bound_argc = Smi::cast(params->length())->value();
-  }
+  // The argument is a bound function. Extract its bound arguments
+  // and callable.
+  Handle<FixedArray> bound_args =
+      Handle<FixedArray>(FixedArray::cast(function->function_bindings()));
+  int bound_argc = bound_args->length() - JSFunction::kBoundArgumentsStartIndex;
+  Handle<Object> bound_function(
+      JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)));
+  ASSERT(!bound_function->IsJSFunction() ||
+         !Handle<JSFunction>::cast(bound_function)->shared()->bound());
 
   int total_argc = 0;
-  SmartArrayPointer<Object**> param_data =
-      GetNonBoundArguments(bound_argc, &total_argc);
+  SmartArrayPointer<Handle<Object> > param_data =
+      GetCallerArguments(bound_argc, &total_argc);
   for (int i = 0; i < bound_argc; i++) {
-    Handle<Object> val = Handle<Object>(bound_args->get(i));
-    param_data[i] = val.location();
+    param_data[i] = Handle<Object>(bound_args->get(
+        JSFunction::kBoundArgumentsStartIndex + i));
   }
 
+  if (!bound_function->IsJSFunction()) {
+    bool exception_thrown;
+    bound_function = Execution::TryGetConstructorDelegate(bound_function,
+                                                          &exception_thrown);
+    if (exception_thrown) return Failure::Exception();
+  }
+  ASSERT(bound_function->IsJSFunction());
+
   bool exception = false;
   Handle<Object> result =
-      Execution::New(function, total_argc, *param_data, &exception);
+      Execution::New(Handle<JSFunction>::cast(bound_function),
+                     total_argc, *param_data, &exception);
   if (exception) {
-      return Failure::Exception();
+    return Failure::Exception();
   }
-
   ASSERT(!result.is_null());
   return *result;
 }
@@ -8034,12 +7912,9 @@
     prototype = Handle<Object>(function->instance_prototype(), isolate);
   }
   if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
-    ConstructStubCompiler compiler;
-    MaybeObject* code = compiler.CompileConstructStub(*function);
-    if (!code->IsFailure()) {
-      function->shared()->set_construct_stub(
-          Code::cast(code->ToObjectUnchecked()));
-    }
+    ConstructStubCompiler compiler(isolate);
+    Handle<Code> code = compiler.CompileConstructStub(function);
+    function->shared()->set_construct_stub(*code);
   }
 }
 
@@ -8098,9 +7973,11 @@
   // available. We cannot use EnsureCompiled because that forces a
   // compilation through the shared function info which makes it
   // impossible for us to optimize.
-  Handle<SharedFunctionInfo> shared(function->shared(), isolate);
-  if (!function->is_compiled()) CompileLazy(function, CLEAR_EXCEPTION);
+  if (!function->is_compiled()) {
+    JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+  }
 
+  Handle<SharedFunctionInfo> shared(function->shared(), isolate);
   if (!function->has_initial_map() &&
       shared->IsInobjectSlackTrackingInProgress()) {
     // The tracking is already in progress for another function. We can only
@@ -8128,7 +8005,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   function->shared()->CompleteInobjectSlackTracking();
   TrySettingInlineConstructStub(isolate, function);
 
@@ -8151,7 +8028,7 @@
 
   // Compile the target function.
   ASSERT(!function->is_compiled());
-  if (!CompileLazy(function, KEEP_EXCEPTION)) {
+  if (!JSFunction::CompileLazy(function, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
 
@@ -8166,6 +8043,8 @@
   ASSERT(args.length() == 1);
   Handle<JSFunction> function = args.at<JSFunction>(0);
 
+  function->shared()->set_profiler_ticks(0);
+
   // If the function is not compiled ignore the lazy
   // recompilation. This can happen if the debugger is activated and
   // the function is returned to the not compiled state.
@@ -8188,7 +8067,9 @@
     function->ReplaceCode(function->shared()->code());
     return function->code();
   }
-  if (CompileOptimized(function, AstNode::kNoNumber, CLEAR_EXCEPTION)) {
+  if (JSFunction::CompileOptimized(function,
+                                   AstNode::kNoNumber,
+                                   CLEAR_EXCEPTION)) {
     return function->code();
   }
   if (FLAG_trace_opt) {
@@ -8201,25 +8082,33 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  RUNTIME_ASSERT(args[0]->IsSmi());
-  Deoptimizer::BailoutType type =
-      static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
-  Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
-  ASSERT(isolate->heap()->IsAllocationAllowed());
-  int frames = deoptimizer->output_count();
+class ActivationsFinder : public ThreadVisitor {
+ public:
+  explicit ActivationsFinder(JSFunction* function)
+      : function_(function), has_activations_(false) {}
 
-  deoptimizer->MaterializeHeapNumbers();
-  delete deoptimizer;
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    if (has_activations_) return;
 
-  JavaScriptFrameIterator it(isolate);
-  JavaScriptFrame* frame = NULL;
-  for (int i = 0; i < frames - 1; i++) it.Advance();
-  frame = it.frame();
+    for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+      JavaScriptFrame* frame = it.frame();
+      if (frame->is_optimized() && frame->function() == function_) {
+        has_activations_ = true;
+        return;
+      }
+    }
+  }
 
-  RUNTIME_ASSERT(frame->function()->IsJSFunction());
+  bool has_activations() { return has_activations_; }
+
+ private:
+  JSFunction* function_;
+  bool has_activations_;
+};
+
+
+static void MaterializeArgumentsObjectInFrame(Isolate* isolate,
+                                              JavaScriptFrame* frame) {
   Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
   Handle<Object> arguments;
   for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
@@ -8236,6 +8125,32 @@
       frame->SetExpression(i, *arguments);
     }
   }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  RUNTIME_ASSERT(args[0]->IsSmi());
+  Deoptimizer::BailoutType type =
+      static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
+  Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+  ASSERT(isolate->heap()->IsAllocationAllowed());
+  int jsframes = deoptimizer->jsframe_count();
+
+  deoptimizer->MaterializeHeapNumbers();
+  delete deoptimizer;
+
+  JavaScriptFrameIterator it(isolate);
+  for (int i = 0; i < jsframes - 1; i++) {
+    MaterializeArgumentsObjectInFrame(isolate, it.frame());
+    it.Advance();
+  }
+
+  JavaScriptFrame* frame = it.frame();
+  RUNTIME_ASSERT(frame->function()->IsJSFunction());
+  Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
+  MaterializeArgumentsObjectInFrame(isolate, frame);
 
   if (type == Deoptimizer::EAGER) {
     RUNTIME_ASSERT(function->IsOptimized());
@@ -8247,17 +8162,24 @@
     return isolate->heap()->undefined_value();
   }
 
-  // Count the number of optimized activations of the function.
-  int activations = 0;
+  // Find other optimized activations of the function.
+  bool has_other_activations = false;
   while (!it.done()) {
     JavaScriptFrame* frame = it.frame();
     if (frame->is_optimized() && frame->function() == *function) {
-      activations++;
+      has_other_activations = true;
+      break;
     }
     it.Advance();
   }
 
-  if (activations == 0) {
+  if (!has_other_activations) {
+    ActivationsFinder activations_finder(*function);
+    isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
+    has_other_activations = activations_finder.has_activations();
+  }
+
+  if (!has_other_activations) {
     if (FLAG_trace_deopt) {
       PrintF("[removing optimized code for: ");
       function->PrintName();
@@ -8281,7 +8203,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   if (!function->IsOptimized()) return isolate->heap()->undefined_value();
 
   Deoptimizer::DeoptimizeFunction(*function);
@@ -8301,10 +8223,22 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
   if (!function->IsOptimizable()) return isolate->heap()->undefined_value();
   function->MarkForLazyRecompilation();
+
+  Code* unoptimized = function->shared()->code();
+  if (args.length() == 2 &&
+      unoptimized->kind() == Code::FUNCTION) {
+    CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
+    CHECK(type->IsEqualTo(CStrVector("osr")));
+    isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
+    unoptimized->set_allow_osr_at_loop_nesting_level(
+        Code::kMaxLoopNestingMarker);
+  }
+
   return isolate->heap()->undefined_value();
 }
 
@@ -8312,13 +8246,15 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
+  // The least significant bit (after untagging) indicates whether the
+  // function is currently optimized, regardless of reason.
   if (!V8::UseCrankshaft()) {
     return Smi::FromInt(4);  // 4 == "never".
   }
   if (FLAG_always_opt) {
     return Smi::FromInt(3);  // 3 == "always".
   }
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   return function->IsOptimized() ? Smi::FromInt(1)   // 1 == "yes".
                                  : Smi::FromInt(2);  // 2 == "no".
 }
@@ -8327,7 +8263,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   return Smi::FromInt(function->shared()->opt_count());
 }
 
@@ -8335,7 +8271,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
 
   // We're not prepared to handle a function with arguments object.
   ASSERT(!function->shared()->uses_arguments());
@@ -8395,7 +8331,7 @@
     // Try to compile the optimized code.  A true return value from
     // CompileOptimized means that compilation succeeded, not necessarily
     // that optimization succeeded.
-    if (CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
+    if (JSFunction::CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
         function->IsOptimized()) {
       DeoptimizationInputData* data = DeoptimizationInputData::cast(
           function->code()->deoptimization_data());
@@ -8421,8 +8357,19 @@
     function->PrintName();
     PrintF("]\n");
   }
-  StackCheckStub check_stub;
-  Handle<Code> check_code = check_stub.GetCode();
+  Handle<Code> check_code;
+#if defined(V8_TARGET_ARCH_IA32) || \
+    defined(V8_TARGET_ARCH_ARM) || \
+    defined(V8_TARGET_ARCH_MIPS)
+  if (FLAG_count_based_interrupts) {
+    InterruptStub interrupt_stub;
+    check_code = interrupt_stub.GetCode();
+  } else  // NOLINT
+#endif
+  {  // NOLINT
+    StackCheckStub check_stub;
+    check_code = check_stub.GetCode();
+  }
   Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
   Deoptimizer::RevertStackCheckCode(*unoptimized,
                                     *check_code,
@@ -8452,17 +8399,50 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() >= 2);
+  int argc = args.length() - 2;
+  CONVERT_ARG_CHECKED(JSReceiver, fun, argc + 1);
+  Object* receiver = args[0];
+
+  // If there are too many arguments, allocate argv via malloc.
+  const int argv_small_size = 10;
+  Handle<Object> argv_small_buffer[argv_small_size];
+  SmartArrayPointer<Handle<Object> > argv_large_buffer;
+  Handle<Object>* argv = argv_small_buffer;
+  if (argc > argv_small_size) {
+    argv = new Handle<Object>[argc];
+    if (argv == NULL) return isolate->StackOverflow();
+    argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
+  }
+
+  for (int i = 0; i < argc; ++i) {
+     MaybeObject* maybe = args[1 + i];
+     Object* object;
+     if (!maybe->To<Object>(&object)) return maybe;
+     argv[i] = Handle<Object>(object);
+  }
+
+  bool threw;
+  Handle<JSReceiver> hfun(fun);
+  Handle<Object> hreceiver(receiver);
+  Handle<Object> result =
+      Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
+
+  if (threw) return Failure::Exception();
+  return *result;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 5);
-  CONVERT_CHECKED(JSReceiver, fun, args[0]);
-  Object* receiver = args[1];
-  CONVERT_CHECKED(JSObject, arguments, args[2]);
-  CONVERT_CHECKED(Smi, shift, args[3]);
-  CONVERT_CHECKED(Smi, arity, args[4]);
-
-  int offset = shift->value();
-  int argc = arity->value();
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, fun, 0);
+  Handle<Object> receiver = args.at<Object>(1);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2);
+  CONVERT_SMI_ARG_CHECKED(offset, 3);
+  CONVERT_SMI_ARG_CHECKED(argc, 4);
   ASSERT(offset >= 0);
   ASSERT(argc >= 0);
 
@@ -8478,17 +8458,12 @@
   }
 
   for (int i = 0; i < argc; ++i) {
-     MaybeObject* maybe = arguments->GetElement(offset + i);
-     Object* object;
-     if (!maybe->To<Object>(&object)) return maybe;
-     argv[i] = Handle<Object>(object);
+    argv[i] = Object::GetElement(arguments, offset + i);
   }
 
-  bool threw = false;
-  Handle<JSReceiver> hfun(fun);
-  Handle<Object> hreceiver(receiver);
-  Handle<Object> result = Execution::Call(
-      hfun, hreceiver, argc, reinterpret_cast<Object***>(argv), &threw, true);
+  bool threw;
+  Handle<Object> result =
+      Execution::Call(fun, receiver, argc, argv, &threw, true);
 
   if (threw) return Failure::Exception();
   return *result;
@@ -8515,8 +8490,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSFunction, function, args[0]);
-  int length = function->shared()->scope_info()->NumberOfContextSlots();
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  int length = function->shared()->scope_info()->ContextLength();
   Object* result;
   { MaybeObject* maybe_result =
         isolate->heap()->AllocateFunctionContext(length, function);
@@ -8602,7 +8577,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  SerializedScopeInfo* scope_info = SerializedScopeInfo::cast(args[0]);
+  ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
   JSFunction* function;
   if (args[1]->IsSmi()) {
     // A smi sentinel indicates a context nested inside global code rather
@@ -8627,8 +8602,8 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(Context, context, 0);
-  CONVERT_ARG_CHECKED(String, name, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
 
   int index;
   PropertyAttributes attributes;
@@ -8651,18 +8626,10 @@
   }
 
   // The slot was found in a JSObject, either a context extension object,
-  // the global object, or an arguments object.  Try to delete it
-  // (respecting DONT_DELETE).  For consistency with V8's usual behavior,
-  // which allows deleting all parameters in functions that mention
-  // 'arguments', we do this even for the case of slots found on an
-  // arguments object.  The slot was found on an arguments object if the
-  // index is non-negative.
+  // the global object, or the subject of a with.  Try to delete it
+  // (respecting DONT_DELETE).
   Handle<JSObject> object = Handle<JSObject>::cast(holder);
-  if (index >= 0) {
-    return object->DeleteElement(index, JSReceiver::NORMAL_DELETION);
-  } else {
-    return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
-  }
+  return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
 }
 
 
@@ -8747,52 +8714,53 @@
                                           &attributes,
                                           &binding_flags);
 
-  // If the index is non-negative, the slot has been found in a local
-  // variable or a parameter. Read it from the context object or the
-  // arguments object.
+  // If the index is non-negative, the slot has been found in a context.
   if (index >= 0) {
-    // If the "property" we were looking for is a local variable or an
-    // argument in a context, the receiver is the global object; see
-    // ECMA-262, 3rd., 10.1.6 and 10.2.3.
+    ASSERT(holder->IsContext());
+    // If the "property" we were looking for is a local variable, the
+    // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
     //
-    // Use the hole as the receiver to signal that the receiver is
-    // implicit and that the global receiver should be used.
+    // Use the hole as the receiver to signal that the receiver is implicit
+    // and that the global receiver should be used (as distinguished from an
+    // explicit receiver that happens to be a global object).
     Handle<Object> receiver = isolate->factory()->the_hole_value();
-    MaybeObject* value = (holder->IsContext())
-        ? Context::cast(*holder)->get(index)
-        : JSObject::cast(*holder)->GetElement(index);
+    Object* value = Context::cast(*holder)->get(index);
     // Check for uninitialized bindings.
-    if (holder->IsContext() &&
-        binding_flags == MUTABLE_CHECK_INITIALIZED &&
-        value->IsTheHole()) {
-      Handle<Object> reference_error =
-          isolate->factory()->NewReferenceError("not_defined",
-                                                HandleVector(&name, 1));
-      return MakePair(isolate->Throw(*reference_error), NULL);
-    } else {
-      return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
+    switch (binding_flags) {
+      case MUTABLE_CHECK_INITIALIZED:
+      case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
+        if (value->IsTheHole()) {
+          Handle<Object> reference_error =
+              isolate->factory()->NewReferenceError("not_defined",
+                                                    HandleVector(&name, 1));
+          return MakePair(isolate->Throw(*reference_error), NULL);
+        }
+        // FALLTHROUGH
+      case MUTABLE_IS_INITIALIZED:
+      case IMMUTABLE_IS_INITIALIZED:
+      case IMMUTABLE_IS_INITIALIZED_HARMONY:
+        ASSERT(!value->IsTheHole());
+        return MakePair(value, *receiver);
+      case IMMUTABLE_CHECK_INITIALIZED:
+        return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
+      case MISSING_BINDING:
+        UNREACHABLE();
+        return MakePair(NULL, NULL);
     }
   }
 
-  // If the holder is found, we read the property from it.
-  if (!holder.is_null() && holder->IsJSObject()) {
-    ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
-    JSObject* object = JSObject::cast(*holder);
-    Object* receiver;
-    if (object->IsGlobalObject()) {
-      receiver = GlobalObject::cast(object)->global_receiver();
-    } else if (context->is_exception_holder(*holder)) {
-      // Use the hole as the receiver to signal that the receiver is
-      // implicit and that the global receiver should be used.
-      receiver = isolate->heap()->the_hole_value();
-    } else {
-      receiver = ComputeReceiverForNonGlobal(isolate, object);
-    }
-
+  // Otherwise, if the slot was found the holder is a context extension
+  // object, subject of a with, or a global object.  We read the named
+  // property from it.
+  if (!holder.is_null()) {
+    Handle<JSObject> object = Handle<JSObject>::cast(holder);
+    ASSERT(object->HasProperty(*name));
     // GetProperty below can cause GC.
-    Handle<Object> receiver_handle(receiver);
+    Handle<Object> receiver_handle(object->IsGlobalObject()
+        ? GlobalObject::cast(*object)->global_receiver()
+        : ComputeReceiverForNonGlobal(isolate, *object));
 
-    // No need to unhole the value here. This is taken care of by the
+    // No need to unhole the value here.  This is taken care of by the
     // GetProperty function.
     MaybeObject* value = object->GetProperty(*name);
     return MakePair(value, *receiver_handle);
@@ -8827,12 +8795,11 @@
   ASSERT(args.length() == 4);
 
   Handle<Object> value(args[0], isolate);
-  CONVERT_ARG_CHECKED(Context, context, 1);
-  CONVERT_ARG_CHECKED(String, name, 2);
-  CONVERT_SMI_ARG_CHECKED(strict_unchecked, 3);
-  RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
-                 strict_unchecked == kNonStrictMode);
-  StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
+  CONVERT_ARG_HANDLE_CHECKED(Context, context, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
+  CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
+  StrictModeFlag strict_mode = (language_mode == CLASSIC_MODE)
+      ? kNonStrictMode : kStrictMode;
 
   int index;
   PropertyAttributes attributes;
@@ -8845,45 +8812,37 @@
                                           &binding_flags);
 
   if (index >= 0) {
-    if (holder->IsContext()) {
-      Handle<Context> context = Handle<Context>::cast(holder);
-      if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
-          context->get(index)->IsTheHole()) {
-        Handle<Object> error =
-            isolate->factory()->NewReferenceError("not_defined",
-                                                  HandleVector(&name, 1));
-        return isolate->Throw(*error);
-      }
-      // Ignore if read_only variable.
-      if ((attributes & READ_ONLY) == 0) {
-        // Context is a fixed array and set cannot fail.
-        context->set(index, *value);
-      } else if (strict_mode == kStrictMode) {
-        // Setting read only property in strict mode.
-        Handle<Object> error =
-            isolate->factory()->NewTypeError("strict_cannot_assign",
-                                             HandleVector(&name, 1));
-        return isolate->Throw(*error);
-      }
-    } else {
-      ASSERT((attributes & READ_ONLY) == 0);
-      Handle<Object> result =
-          SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode);
-      if (result.is_null()) {
-        ASSERT(isolate->has_pending_exception());
-        return Failure::Exception();
-      }
+    // The property was found in a context slot.
+    Handle<Context> context = Handle<Context>::cast(holder);
+    if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
+        context->get(index)->IsTheHole()) {
+      Handle<Object> error =
+          isolate->factory()->NewReferenceError("not_defined",
+                                                HandleVector(&name, 1));
+      return isolate->Throw(*error);
+    }
+    // Ignore if read_only variable.
+    if ((attributes & READ_ONLY) == 0) {
+      // Context is a fixed array and set cannot fail.
+      context->set(index, *value);
+    } else if (strict_mode == kStrictMode) {
+      // Setting read only property in strict mode.
+      Handle<Object> error =
+          isolate->factory()->NewTypeError("strict_cannot_assign",
+                                           HandleVector(&name, 1));
+      return isolate->Throw(*error);
     }
     return *value;
   }
 
-  // Slow case: The property is not in a FixedArray context.
-  // It is either in an JSObject extension context or it was not found.
-  Handle<JSObject> context_ext;
+  // Slow case: The property is not in a context slot.  It is either in a
+  // context extension object, a property of the subject of a with, or a
+  // property of the global object.
+  Handle<JSObject> object;
 
   if (!holder.is_null()) {
-    // The property exists in the extension context.
-    context_ext = Handle<JSObject>::cast(holder);
+    // The property exists on the holder.
+    object = Handle<JSObject>::cast(holder);
   } else {
     // The property was not found.
     ASSERT(attributes == ABSENT);
@@ -8891,22 +8850,21 @@
     if (strict_mode == kStrictMode) {
       // Throw in strict mode (assignment to undefined variable).
       Handle<Object> error =
-        isolate->factory()->NewReferenceError(
-            "not_defined", HandleVector(&name, 1));
+          isolate->factory()->NewReferenceError(
+              "not_defined", HandleVector(&name, 1));
       return isolate->Throw(*error);
     }
-    // In non-strict mode, the property is stored in the global context.
+    // In non-strict mode, the property is added to the global object.
     attributes = NONE;
-    context_ext = Handle<JSObject>(isolate->context()->global());
+    object = Handle<JSObject>(isolate->context()->global());
   }
 
-  // Set the property, but ignore if read_only variable on the context
-  // extension object itself.
+  // Set the property if it's not read only or doesn't yet exist.
   if ((attributes & READ_ONLY) == 0 ||
-      (context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
+      (object->GetLocalPropertyAttribute(*name) == ABSENT)) {
     RETURN_IF_EMPTY_HANDLE(
         isolate,
-        SetProperty(context_ext, name, value, NONE, strict_mode));
+        JSReceiver::SetProperty(object, name, value, NONE, strict_mode));
   } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
     // Setting read only property in strict mode.
     Handle<Object> error =
@@ -8961,43 +8919,13 @@
     return isolate->StackOverflow();
   }
 
-  return Execution::HandleStackGuardInterrupt();
+  return Execution::HandleStackGuardInterrupt(isolate);
 }
 
 
-// NOTE: These PrintXXX functions are defined for all builds (not just
-// DEBUG builds) because we may want to be able to trace function
-// calls in all modes.
-static void PrintString(String* str) {
-  // not uncommon to have empty strings
-  if (str->length() > 0) {
-    SmartArrayPointer<char> s =
-        str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    PrintF("%s", *s);
-  }
-}
-
-
-static void PrintObject(Object* obj) {
-  if (obj->IsSmi()) {
-    PrintF("%d", Smi::cast(obj)->value());
-  } else if (obj->IsString() || obj->IsSymbol()) {
-    PrintString(String::cast(obj));
-  } else if (obj->IsNumber()) {
-    PrintF("%g", obj->Number());
-  } else if (obj->IsFailure()) {
-    PrintF("<failure>");
-  } else if (obj->IsUndefined()) {
-    PrintF("<undefined>");
-  } else if (obj->IsNull()) {
-    PrintF("<null>");
-  } else if (obj->IsTrue()) {
-    PrintF("<true>");
-  } else if (obj->IsFalse()) {
-    PrintF("<false>");
-  } else {
-    PrintF("%p", reinterpret_cast<void*>(obj));
-  }
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
+  ASSERT(args.length() == 0);
+  return Execution::HandleStackGuardInterrupt(isolate);
 }
 
 
@@ -9019,33 +8947,12 @@
   }
 
   if (result == NULL) {
-    // constructor calls
-    JavaScriptFrameIterator it;
-    JavaScriptFrame* frame = it.frame();
-    if (frame->IsConstructor()) PrintF("new ");
-    // function name
-    Object* fun = frame->function();
-    if (fun->IsJSFunction()) {
-      PrintObject(JSFunction::cast(fun)->shared()->name());
-    } else {
-      PrintObject(fun);
-    }
-    // function arguments
-    // (we are intentionally only printing the actually
-    // supplied parameters, not all parameters required)
-    PrintF("(this=");
-    PrintObject(frame->receiver());
-    const int length = frame->ComputeParametersCount();
-    for (int i = 0; i < length; i++) {
-      PrintF(", ");
-      PrintObject(frame->GetParameter(i));
-    }
-    PrintF(") {\n");
-
+    JavaScriptFrame::PrintTop(stdout, true, false);
+    PrintF(" {\n");
   } else {
     // function result
     PrintF("} -> ");
-    PrintObject(result);
+    result->ShortPrint();
     PrintF("\n");
   }
 }
@@ -9122,10 +9029,14 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(String, str, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
   FlattenString(str);
 
-  CONVERT_ARG_CHECKED(JSArray, output, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
+
+  MaybeObject* maybe_result_array =
+      output->EnsureCanContainHeapObjectElements();
+  if (maybe_result_array->IsFailure()) return maybe_result_array;
   RUNTIME_ASSERT(output->HasFastElements());
 
   AssertNoAllocation no_allocation;
@@ -9158,25 +9069,20 @@
   ASSERT(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  const char* zone = OS::LocalTimezone(x);
+  int64_t time = isolate->date_cache()->EquivalentTime(static_cast<int64_t>(x));
+  const char* zone = OS::LocalTimezone(static_cast<double>(time));
   return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimeOffset) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 0);
-
-  return isolate->heap()->NumberFromDouble(OS::LocalTimeOffset());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateDaylightSavingsOffset) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return isolate->heap()->NumberFromDouble(OS::DaylightSavingsOffset(x));
+  int64_t time = isolate->date_cache()->ToUTC(static_cast<int64_t>(x));
+
+  return isolate->heap()->NumberFromDouble(static_cast<double>(time));
 }
 
 
@@ -9191,10 +9097,10 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
   HandleScope scope(isolate);
   ASSERT_EQ(1, args.length());
-  CONVERT_ARG_CHECKED(String, source, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
 
   source = Handle<String>(source->TryFlattenGetString());
-  // Optimized fast case where we only have ascii characters.
+  // Optimized fast case where we only have ASCII characters.
   Handle<Object> result;
   if (source->IsSeqAsciiString()) {
     result = JsonParser<true>::Parse(source);
@@ -9212,43 +9118,40 @@
 
 bool CodeGenerationFromStringsAllowed(Isolate* isolate,
                                       Handle<Context> context) {
-  if (context->allow_code_gen_from_strings()->IsFalse()) {
-    // Check with callback if set.
-    AllowCodeGenerationFromStringsCallback callback =
-        isolate->allow_code_gen_callback();
-    if (callback == NULL) {
-      // No callback set and code generation disallowed.
-      return false;
-    } else {
-      // Callback set. Let it decide if code generation is allowed.
-      VMState state(isolate, EXTERNAL);
-      return callback(v8::Utils::ToLocal(context));
-    }
+  ASSERT(context->allow_code_gen_from_strings()->IsFalse());
+  // Check with callback if set.
+  AllowCodeGenerationFromStringsCallback callback =
+      isolate->allow_code_gen_callback();
+  if (callback == NULL) {
+    // No callback set and code generation disallowed.
+    return false;
+  } else {
+    // Callback set. Let it decide if code generation is allowed.
+    VMState state(isolate, EXTERNAL);
+    return callback(v8::Utils::ToLocal(context));
   }
-  return true;
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
   HandleScope scope(isolate);
   ASSERT_EQ(1, args.length());
-  CONVERT_ARG_CHECKED(String, source, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
 
   // Extract global context.
   Handle<Context> context(isolate->context()->global_context());
 
   // Check if global context allows code generation from
   // strings. Throw an exception if it doesn't.
-  if (!CodeGenerationFromStringsAllowed(isolate, context)) {
+  if (context->allow_code_gen_from_strings()->IsFalse() &&
+      !CodeGenerationFromStringsAllowed(isolate, context)) {
     return isolate->Throw(*isolate->factory()->NewError(
         "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
   }
 
   // Compile source string in the global context.
-  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
-                                                            context,
-                                                            true,
-                                                            kNonStrictMode);
+  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
+      source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> fun =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
@@ -9261,13 +9164,15 @@
 static ObjectPair CompileGlobalEval(Isolate* isolate,
                                     Handle<String> source,
                                     Handle<Object> receiver,
-                                    StrictModeFlag strict_mode) {
+                                    LanguageMode language_mode,
+                                    int scope_position) {
   Handle<Context> context = Handle<Context>(isolate->context());
   Handle<Context> global_context = Handle<Context>(context->global_context());
 
   // Check if global context allows code generation from
   // strings. Throw an exception if it doesn't.
-  if (!CodeGenerationFromStringsAllowed(isolate, global_context)) {
+  if (global_context->allow_code_gen_from_strings()->IsFalse() &&
+      !CodeGenerationFromStringsAllowed(isolate, global_context)) {
     isolate->Throw(*isolate->factory()->NewError(
         "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
     return MakePair(Failure::Exception(), NULL);
@@ -9279,7 +9184,8 @@
       source,
       Handle<Context>(isolate->context()),
       context->IsGlobalContext(),
-      strict_mode);
+      language_mode,
+      scope_position);
   if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
   Handle<JSFunction> compiled =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -9289,91 +9195,28 @@
 
 
 RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
-  ASSERT(args.length() == 4);
+  ASSERT(args.length() == 5);
 
   HandleScope scope(isolate);
   Handle<Object> callee = args.at<Object>(0);
-  Handle<Object> receiver;  // Will be overwritten.
 
-  // Compute the calling context.
-  Handle<Context> context = Handle<Context>(isolate->context(), isolate);
-#ifdef DEBUG
-  // Make sure Isolate::context() agrees with the old code that traversed
-  // the stack frames to compute the context.
-  StackFrameLocator locator;
-  JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
-  ASSERT(Context::cast(frame->context()) == *context);
-#endif
-
-  // Find where the 'eval' symbol is bound. It is unaliased only if
-  // it is bound in the global context.
-  int index = -1;
-  PropertyAttributes attributes = ABSENT;
-  BindingFlags binding_flags;
-  while (true) {
-    receiver = context->Lookup(isolate->factory()->eval_symbol(),
-                               FOLLOW_PROTOTYPE_CHAIN,
-                               &index,
-                               &attributes,
-                               &binding_flags);
-    // Stop search when eval is found or when the global context is
-    // reached.
-    if (attributes != ABSENT || context->IsGlobalContext()) break;
-    context = Handle<Context>(context->previous(), isolate);
-  }
-
-  // If eval could not be resolved, it has been deleted and we need to
-  // throw a reference error.
-  if (attributes == ABSENT) {
-    Handle<Object> name = isolate->factory()->eval_symbol();
-    Handle<Object> reference_error =
-        isolate->factory()->NewReferenceError("not_defined",
-                                              HandleVector(&name, 1));
-    return MakePair(isolate->Throw(*reference_error), NULL);
-  }
-
-  if (!context->IsGlobalContext()) {
-    // 'eval' is not bound in the global context. Just call the function
-    // with the given arguments. This is not necessarily the global eval.
-    if (receiver->IsContext() || receiver->IsJSContextExtensionObject()) {
-      receiver = isolate->factory()->the_hole_value();
-    }
-    return MakePair(*callee, *receiver);
-  }
-
-  // 'eval' is bound in the global context, but it may have been overwritten.
-  // Compare it to the builtin 'GlobalEval' function to make sure.
+  // If "eval" didn't refer to the original GlobalEval, it's not a
+  // direct call to eval.
+  // (And even if it is, but the first argument isn't a string, just let
+  // execution default to an indirect call to eval, which will also return
+  // the first argument without doing anything).
   if (*callee != isolate->global_context()->global_eval_fun() ||
       !args[1]->IsString()) {
     return MakePair(*callee, isolate->heap()->the_hole_value());
   }
 
-  ASSERT(args[3]->IsSmi());
+  CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
+  ASSERT(args[4]->IsSmi());
   return CompileGlobalEval(isolate,
                            args.at<String>(1),
                            args.at<Object>(2),
-                           static_cast<StrictModeFlag>(args.smi_at(3)));
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEvalNoLookup) {
-  ASSERT(args.length() == 4);
-
-  HandleScope scope(isolate);
-  Handle<Object> callee = args.at<Object>(0);
-
-  // 'eval' is bound in the global context, but it may have been overwritten.
-  // Compare it to the builtin 'GlobalEval' function to make sure.
-  if (*callee != isolate->global_context()->global_eval_fun() ||
-      !args[1]->IsString()) {
-    return MakePair(*callee, isolate->heap()->the_hole_value());
-  }
-
-  ASSERT(args[3]->IsSmi());
-  return CompileGlobalEval(isolate,
-                           args.at<String>(1),
-                           args.at<Object>(2),
-                           static_cast<StrictModeFlag>(args.smi_at(3)));
+                           language_mode,
+                           args.smi_at(4));
 }
 
 
@@ -9384,11 +9227,11 @@
   // as specified in ECMA262, 15.3.5.2.
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunction, func, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
 
-  Handle<Map> map = func->shared()->strict_mode()
-                        ? isolate->strict_mode_function_instance_map()
-                        : isolate->function_instance_map();
+  Handle<Map> map = func->shared()->is_classic_mode()
+      ? isolate->function_instance_map()
+      : isolate->strict_mode_function_instance_map();
 
   ASSERT(func->map()->instance_type() == map->instance_type());
   ASSERT(func->map()->instance_size() == map->instance_size());
@@ -9402,7 +9245,7 @@
   // Use as fallback for allocation in generated code when NewSpace
   // is full.
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(Smi, size_smi, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
   int size = size_smi->value();
   RUNTIME_ASSERT(IsAligned(size, kPointerSize));
   RUNTIME_ASSERT(size > 0);
@@ -9424,9 +9267,9 @@
 // false otherwise.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
   ASSERT(args.length() == 2);
-  CONVERT_CHECKED(JSArray, array, args[0]);
-  CONVERT_CHECKED(JSObject, element, args[1]);
-  RUNTIME_ASSERT(array->HasFastElements());
+  CONVERT_ARG_CHECKED(JSArray, array, 0);
+  CONVERT_ARG_CHECKED(JSObject, element, 1);
+  RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements());
   int length = Smi::cast(array->length())->value();
   FixedArray* elements = FixedArray::cast(array->elements());
   for (int i = 0; i < length; i++) {
@@ -9510,9 +9353,11 @@
         isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
     Handle<Map> map;
     if (fast_elements_) {
-      map = isolate_->factory()->GetFastElementsMap(Handle<Map>(array->map()));
+      map = isolate_->factory()->GetElementsTransitionMap(array,
+                                                          FAST_ELEMENTS);
     } else {
-      map = isolate_->factory()->GetSlowElementsMap(Handle<Map>(array->map()));
+      map = isolate_->factory()->GetElementsTransitionMap(array,
+                                                          DICTIONARY_ELEMENTS);
     }
     array->set_map(*map);
     array->set_length(*length);
@@ -9568,6 +9413,7 @@
   uint32_t length = static_cast<uint32_t>(array->length()->Number());
   int element_count = 0;
   switch (array->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Fast elements can't have lengths that are not representable by
       // a 32-bit signed integer.
@@ -9579,6 +9425,10 @@
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS:
+      // TODO(1810): Decide if it's worthwhile to implement this.
+      UNREACHABLE();
+      break;
     case DICTIONARY_ELEMENTS: {
       Handle<SeededNumberDictionary> dictionary(
           SeededNumberDictionary::cast(array->elements()));
@@ -9591,7 +9441,16 @@
       }
       break;
     }
-    default:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
       // External arrays are always dense.
       return length;
   }
@@ -9657,6 +9516,7 @@
                                   List<uint32_t>* indices) {
   ElementsKind kind = object->GetElementsKind();
   switch (kind) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       Handle<FixedArray> elements(FixedArray::cast(object->elements()));
       uint32_t length = static_cast<uint32_t>(elements->length());
@@ -9668,6 +9528,11 @@
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      // TODO(1810): Decide if it's worthwhile to implement this.
+      UNREACHABLE();
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       Handle<SeededNumberDictionary> dict(
           SeededNumberDictionary::cast(object->elements()));
@@ -9777,6 +9642,7 @@
                             ArrayConcatVisitor* visitor) {
   uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
   switch (receiver->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Run through the elements FixedArray and use HasElement and GetElement
       // to check the prototype for missing elements.
@@ -9791,13 +9657,18 @@
         } else if (receiver->HasElement(j)) {
           // Call GetElement on receiver, not its prototype, or getters won't
           // have the correct receiver.
-          element_value = GetElement(receiver, j);
-          if (element_value.is_null()) return false;
+          element_value = Object::GetElement(receiver, j);
+          RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
           visitor->visit(j, element_value);
         }
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      // TODO(1810): Decide if it's worthwhile to implement this.
+      UNREACHABLE();
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
       List<uint32_t> indices(dict->Capacity() / 2);
@@ -9810,8 +9681,8 @@
       while (j < n) {
         HandleScope loop_scope;
         uint32_t index = indices[j];
-        Handle<Object> element = GetElement(receiver, index);
-        if (element.is_null()) return false;
+        Handle<Object> element = Object::GetElement(receiver, index);
+        RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false);
         visitor->visit(index, element);
         // Skip to next different index (i.e., omit duplicates).
         do {
@@ -9888,7 +9759,7 @@
   ASSERT(args.length() == 1);
   HandleScope handle_scope(isolate);
 
-  CONVERT_ARG_CHECKED(JSArray, arguments, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0);
   int argument_count = static_cast<int>(arguments->length()->Number());
   RUNTIME_ASSERT(arguments->HasFastElements());
   Handle<FixedArray> elements(FixedArray::cast(arguments->elements()));
@@ -9908,6 +9779,13 @@
       uint32_t element_estimate;
       if (obj->IsJSArray()) {
         Handle<JSArray> array(Handle<JSArray>::cast(obj));
+        // TODO(1810): Find out if it's worthwhile to properly support
+        // arbitrary ElementsKinds. For now, pessimistically transition to
+        // FAST_ELEMENTS.
+        if (array->HasFastDoubleElements()) {
+          array = Handle<JSArray>::cast(
+              JSObject::TransitionElementsKind(array, FAST_ELEMENTS));
+        }
         length_estimate =
             static_cast<uint32_t>(array->length()->Number());
         element_estimate =
@@ -9976,7 +9854,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(String, string, args[0]);
+  CONVERT_ARG_CHECKED(String, string, 0);
   StringInputBuffer buffer(string);
   while (buffer.has_more()) {
     uint16_t character = buffer.GetNext();
@@ -9992,7 +9870,7 @@
 // Returns the number of non-undefined elements collected.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
   ASSERT(args.length() == 2);
-  CONVERT_CHECKED(JSObject, object, args[0]);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
   return object->PrepareElementsForSort(limit);
 }
@@ -10001,19 +9879,21 @@
 // Move contents of argument 0 (an array) to argument 1 (an array)
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
   ASSERT(args.length() == 2);
-  CONVERT_CHECKED(JSArray, from, args[0]);
-  CONVERT_CHECKED(JSArray, to, args[1]);
+  CONVERT_ARG_CHECKED(JSArray, from, 0);
+  CONVERT_ARG_CHECKED(JSArray, to, 1);
   FixedArrayBase* new_elements = from->elements();
   MaybeObject* maybe_new_map;
+  ElementsKind elements_kind;
   if (new_elements->map() == isolate->heap()->fixed_array_map() ||
       new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
-    maybe_new_map = to->map()->GetFastElementsMap();
+    elements_kind = FAST_ELEMENTS;
   } else if (new_elements->map() ==
              isolate->heap()->fixed_double_array_map()) {
-    maybe_new_map = to->map()->GetFastDoubleElementsMap();
+    elements_kind = FAST_DOUBLE_ELEMENTS;
   } else {
-    maybe_new_map = to->map()->GetSlowElementsMap();
+    elements_kind = DICTIONARY_ELEMENTS;
   }
+  maybe_new_map = to->GetElementsTransitionMap(isolate, elements_kind);
   Object* new_map;
   if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
   to->set_map(Map::cast(new_map));
@@ -10031,7 +9911,7 @@
 // How many elements does this object/array have?
 RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(JSObject, object, args[0]);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
   HeapObject* elements = object->elements();
   if (elements->IsDictionary()) {
     int result = SeededNumberDictionary::cast(elements)->NumberOfElements();
@@ -10049,7 +9929,7 @@
 
   ASSERT_EQ(3, args.length());
 
-  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   Handle<Object> key1 = args.at<Object>(1);
   Handle<Object> key2 = args.at<Object>(2);
 
@@ -10060,15 +9940,15 @@
   }
 
   Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
-  Handle<Object> tmp1 = GetElement(jsobject, index1);
+  Handle<Object> tmp1 = Object::GetElement(jsobject, index1);
   RETURN_IF_EMPTY_HANDLE(isolate, tmp1);
-  Handle<Object> tmp2 = GetElement(jsobject, index2);
+  Handle<Object> tmp2 = Object::GetElement(jsobject, index2);
   RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
 
-  RETURN_IF_EMPTY_HANDLE(isolate,
-                         SetElement(jsobject, index1, tmp2, kStrictMode));
-  RETURN_IF_EMPTY_HANDLE(isolate,
-                         SetElement(jsobject, index2, tmp1, kStrictMode));
+  RETURN_IF_EMPTY_HANDLE(
+      isolate, JSObject::SetElement(jsobject, index1, tmp2, NONE, kStrictMode));
+  RETURN_IF_EMPTY_HANDLE(
+      isolate, JSObject::SetElement(jsobject, index2, tmp1, NONE, kStrictMode));
 
   return isolate->heap()->undefined_value();
 }
@@ -10082,12 +9962,16 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSObject, array, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
   if (array->elements()->IsDictionary()) {
     // Create an array and get all the keys into it, then remove all the
     // keys that are not integers in the range 0 to length-1.
-    Handle<FixedArray> keys = GetKeysInFixedArrayFor(array, INCLUDE_PROTOS);
+    bool threw = false;
+    Handle<FixedArray> keys =
+        GetKeysInFixedArrayFor(array, INCLUDE_PROTOS, &threw);
+    if (threw) return Failure::Exception();
+
     int keys_length = keys->length();
     for (int i = 0; i < keys_length; i++) {
       Object* key = keys->get(i);
@@ -10099,7 +9983,9 @@
     }
     return *isolate->factory()->NewJSArrayWithElements(keys);
   } else {
-    ASSERT(array->HasFastElements() || array->HasFastDoubleElements());
+    ASSERT(array->HasFastElements() ||
+           array->HasFastSmiOnlyElements() ||
+           array->HasFastDoubleElements());
     Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
     // -1 means start of array.
     single_interval->set(0, Smi::FromInt(-1));
@@ -10115,37 +10001,13 @@
 }
 
 
-// DefineAccessor takes an optional final argument which is the
-// property attributes (eg, DONT_ENUM, DONT_DELETE).  IMPORTANT: due
-// to the way accessors are implemented, it is set for both the getter
-// and setter on the first call to DefineAccessor and ignored on
-// subsequent calls.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineAccessor) {
-  RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
-  // Compute attributes.
-  PropertyAttributes attributes = NONE;
-  if (args.length() == 5) {
-    CONVERT_CHECKED(Smi, attrs, args[4]);
-    int value = attrs->value();
-    // Only attribute bits should be set.
-    ASSERT((value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
-    attributes = static_cast<PropertyAttributes>(value);
-  }
-
-  CONVERT_CHECKED(JSObject, obj, args[0]);
-  CONVERT_CHECKED(String, name, args[1]);
-  CONVERT_CHECKED(Smi, flag, args[2]);
-  CONVERT_CHECKED(JSFunction, fun, args[3]);
-  return obj->DefineAccessor(name, flag->value() == 0, fun, attributes);
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
   ASSERT(args.length() == 3);
-  CONVERT_CHECKED(JSObject, obj, args[0]);
-  CONVERT_CHECKED(String, name, args[1]);
-  CONVERT_CHECKED(Smi, flag, args[2]);
-  return obj->LookupAccessor(name, flag->value() == 0);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
+  CONVERT_SMI_ARG_CHECKED(flag, 2);
+  AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
+  return obj->LookupAccessor(name, component);
 }
 
 
@@ -10163,8 +10025,8 @@
 }
 
 
-static StackFrame::Id UnwrapFrameId(Smi* wrapped) {
-  return static_cast<StackFrame::Id>(wrapped->value() << 2);
+static StackFrame::Id UnwrapFrameId(int wrapped) {
+  return static_cast<StackFrame::Id>(wrapped << 2);
 }
 
 
@@ -10218,8 +10080,8 @@
     case CALLBACKS: {
       Object* structure = result->GetCallbackObject();
       if (structure->IsForeign() || structure->IsAccessorInfo()) {
-        MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
-            receiver, structure, name, result->holder());
+        MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
+            receiver, structure, name);
         if (!maybe_value->ToObject(&value)) {
           if (maybe_value->IsRetryAfterGC()) return maybe_value;
           ASSERT(maybe_value->IsException());
@@ -10241,10 +10103,11 @@
     case CONSTANT_TRANSITION:
     case NULL_DESCRIPTOR:
       return heap->undefined_value();
-    default:
+    case HANDLER:
       UNREACHABLE();
+      return heap->undefined_value();
   }
-  UNREACHABLE();
+  UNREACHABLE();  // keep the compiler happy
   return heap->undefined_value();
 }
 
@@ -10266,8 +10129,8 @@
 
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
-  CONVERT_ARG_CHECKED(String, name, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
 
   // Make sure to set the current context to the context before the debugger was
   // entered (if the debugger is entered). The reason for switching context here
@@ -10310,7 +10173,7 @@
   // Try local lookup on each of the objects.
   Handle<JSObject> jsproto = obj;
   for (int i = 0; i < length; i++) {
-    LookupResult result;
+    LookupResult result(isolate);
     jsproto->LocalLookup(*name, &result);
     if (result.IsProperty()) {
       // LookupResult is not GC safe as it holds raw object pointers.
@@ -10337,15 +10200,16 @@
       // If the callback object is a fixed array then it contains JavaScript
       // getter and/or setter.
       bool hasJavaScriptAccessors = result_type == CALLBACKS &&
-                                    result_callback_obj->IsFixedArray();
+                                    result_callback_obj->IsAccessorPair();
       Handle<FixedArray> details =
           isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
       details->set(0, *value);
       details->set(1, property_details);
       if (hasJavaScriptAccessors) {
+        AccessorPair* accessors = AccessorPair::cast(*result_callback_obj);
         details->set(2, isolate->heap()->ToBoolean(caught_exception));
-        details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
-        details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
+        details->set(3, accessors->GetComponent(ACCESSOR_GETTER));
+        details->set(4, accessors->GetComponent(ACCESSOR_SETTER));
       }
 
       return *isolate->factory()->NewJSArrayWithElements(details);
@@ -10364,10 +10228,10 @@
 
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
-  CONVERT_ARG_CHECKED(String, name, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
 
-  LookupResult result;
+  LookupResult result(isolate);
   obj->Lookup(*name, &result);
   if (result.IsProperty()) {
     return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
@@ -10380,9 +10244,8 @@
 // args[0]: smi with property details.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(Smi, details, args[0]);
-  PropertyType type = PropertyDetails(details).type();
-  return Smi::FromInt(static_cast<int>(type));
+  CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
+  return Smi::FromInt(static_cast<int>(details.type()));
 }
 
 
@@ -10390,9 +10253,8 @@
 // args[0]: smi with property details.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(Smi, details, args[0]);
-  PropertyAttributes attributes = PropertyDetails(details).attributes();
-  return Smi::FromInt(static_cast<int>(attributes));
+  CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
+  return Smi::FromInt(static_cast<int>(details.attributes()));
 }
 
 
@@ -10400,9 +10262,8 @@
 // args[0]: smi with property details.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
   ASSERT(args.length() == 1);
-  CONVERT_CHECKED(Smi, details, args[0]);
-  int index = PropertyDetails(details).index();
-  return Smi::FromInt(index);
+  CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
+  return Smi::FromInt(details.index());
 }
 
 
@@ -10412,9 +10273,9 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   RUNTIME_ASSERT(obj->HasNamedInterceptor());
-  CONVERT_ARG_CHECKED(String, name, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
 
   PropertyAttributes attributes;
   return obj->GetPropertyWithInterceptor(*obj, *name, &attributes);
@@ -10427,7 +10288,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   RUNTIME_ASSERT(obj->HasIndexedInterceptor());
   CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
 
@@ -10478,15 +10339,16 @@
 class FrameInspector {
  public:
   FrameInspector(JavaScriptFrame* frame,
-                 int inlined_frame_index,
+                 int inlined_jsframe_index,
                  Isolate* isolate)
       : frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
     // Calculate the deoptimized frame.
     if (frame->is_optimized()) {
       deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
-          frame, inlined_frame_index, isolate);
+          frame, inlined_jsframe_index, isolate);
     }
     has_adapted_arguments_ = frame_->has_adapted_arguments();
+    is_bottommost_ = inlined_jsframe_index == 0;
     is_optimized_ = frame_->is_optimized();
   }
 
@@ -10519,6 +10381,16 @@
         ? deoptimized_frame_->GetExpression(index)
         : frame_->GetExpression(index);
   }
+  int GetSourcePosition() {
+    return is_optimized_
+        ? deoptimized_frame_->GetSourcePosition()
+        : frame_->LookupCode()->SourcePosition(frame_->pc());
+  }
+  bool IsConstructor() {
+    return is_optimized_ && !is_bottommost_
+        ? deoptimized_frame_->HasConstructStub()
+        : frame_->IsConstructor();
+  }
 
   // To inspect all the provided arguments the frame might need to be
   // replaced with the arguments frame.
@@ -10534,6 +10406,7 @@
   DeoptimizedFrameInfo* deoptimized_frame_;
   Isolate* isolate_;
   bool is_optimized_;
+  bool is_bottommost_;
   bool has_adapted_arguments_;
 
   DISALLOW_COPY_AND_ASSIGN(FrameInspector);
@@ -10551,6 +10424,18 @@
 static const int kFrameDetailsFlagsIndex = 8;
 static const int kFrameDetailsFirstDynamicIndex = 9;
 
+
+static SaveContext* FindSavedContextForFrame(Isolate* isolate,
+                                             JavaScriptFrame* frame) {
+  SaveContext* save = isolate->save_context();
+  while (save != NULL && !save->IsBelowFrame(frame)) {
+    save = save->prev();
+  }
+  ASSERT(save != NULL);
+  return save;
+}
+
+
 // Return an array with frame details
 // args[0]: number: break id
 // args[1]: number: frame index
@@ -10588,8 +10473,6 @@
     return heap->undefined_value();
   }
 
-  int inlined_frame_index = 0;  // Inlined frame index in optimized frame.
-
   int count = 0;
   JavaScriptFrameIterator it(isolate, id);
   for (; !it.done(); it.Advance()) {
@@ -10598,38 +10481,33 @@
   }
   if (it.done()) return heap->undefined_value();
 
-  if (it.frame()->is_optimized()) {
-    inlined_frame_index =
+  bool is_optimized = it.frame()->is_optimized();
+
+  int inlined_jsframe_index = 0;  // Inlined frame index in optimized frame.
+  if (is_optimized) {
+    inlined_jsframe_index =
         it.frame()->GetInlineCount() - (index - count) - 1;
   }
-  FrameInspector frame_inspector(it.frame(), inlined_frame_index, isolate);
+  FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate);
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
-  SaveContext* save = isolate->save_context();
-  while (save != NULL && !save->below(it.frame())) {
-    save = save->prev();
-  }
-  ASSERT(save != NULL);
+  SaveContext* save = FindSavedContextForFrame(isolate, it.frame());
 
   // Get the frame id.
   Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
 
-  // Find source position.
-  int position =
-      it.frame()->LookupCode()->SourcePosition(it.frame()->pc());
+  // Find source position in unoptimized code.
+  int position = frame_inspector.GetSourcePosition();
 
-  // Check for constructor frame. Inlined frames cannot be construct calls.
-  bool inlined_frame =
-      it.frame()->is_optimized() && inlined_frame_index != 0;
-  bool constructor = !inlined_frame && it.frame()->IsConstructor();
+  // Check for constructor frame.
+  bool constructor = frame_inspector.IsConstructor();
 
   // Get scope info and read from it for local variable information.
-  Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
+  Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
   Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<SerializedScopeInfo> scope_info(shared->scope_info());
-  ASSERT(*scope_info != SerializedScopeInfo::Empty());
-  ScopeInfo<> info(*scope_info);
+  Handle<ScopeInfo> scope_info(shared->scope_info());
+  ASSERT(*scope_info != ScopeInfo::Empty());
 
   // Get the locals names and values into a temporary array.
   //
@@ -10637,31 +10515,33 @@
   // (e.g. .result)?  For users of the debugger, they will probably be
   // confusing.
   Handle<FixedArray> locals =
-      isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
+      isolate->factory()->NewFixedArray(scope_info->LocalCount() * 2);
 
   // Fill in the values of the locals.
   int i = 0;
-  for (; i < info.number_of_stack_slots(); ++i) {
+  for (; i < scope_info->StackLocalCount(); ++i) {
     // Use the value from the stack.
-    locals->set(i * 2, *info.LocalName(i));
+    locals->set(i * 2, scope_info->LocalName(i));
     locals->set(i * 2 + 1, frame_inspector.GetExpression(i));
   }
-  if (i < info.NumberOfLocals()) {
+  if (i < scope_info->LocalCount()) {
     // Get the context containing declarations.
     Handle<Context> context(
         Context::cast(it.frame()->context())->declaration_context());
-    for (; i < info.NumberOfLocals(); ++i) {
-      Handle<String> name = info.LocalName(i);
+    for (; i < scope_info->LocalCount(); ++i) {
+      Handle<String> name(scope_info->LocalName(i));
+      VariableMode mode;
+      InitializationFlag init_flag;
       locals->set(i * 2, *name);
-      locals->set(i * 2 + 1,
-                  context->get(scope_info->ContextSlotIndex(*name, NULL)));
+      locals->set(i * 2 + 1, context->get(
+          scope_info->ContextSlotIndex(*name, &mode, &init_flag)));
     }
   }
 
   // Check whether this frame is positioned at return. If not top
   // frame or if the frame is optimized it cannot be at a return.
   bool at_return = false;
-  if (!it.frame()->is_optimized() && index == 0) {
+  if (!is_optimized && index == 0) {
     at_return = isolate->debug()->IsBreakAtReturn(it.frame());
   }
 
@@ -10701,26 +10581,21 @@
   // the provided parameters whereas the function frame always have the number
   // of arguments matching the functions parameters. The rest of the
   // information (except for what is collected above) is the same.
-  if (it.frame()->has_adapted_arguments()) {
+  if ((inlined_jsframe_index == 0) && it.frame()->has_adapted_arguments()) {
     it.AdvanceToArgumentsFrame();
     frame_inspector.SetArgumentsFrame(it.frame());
   }
 
   // Find the number of arguments to fill. At least fill the number of
   // parameters for the function and fill more if more parameters are provided.
-  int argument_count = info.number_of_parameters();
+  int argument_count = scope_info->ParameterCount();
   if (argument_count < frame_inspector.GetParametersCount()) {
     argument_count = frame_inspector.GetParametersCount();
   }
-#ifdef DEBUG
-  if (it.frame()->is_optimized()) {
-    ASSERT_EQ(argument_count, frame_inspector.GetParametersCount());
-  }
-#endif
 
   // Calculate the size of the result.
   int details_size = kFrameDetailsFirstDynamicIndex +
-                     2 * (argument_count + info.NumberOfLocals()) +
+                     2 * (argument_count + scope_info->LocalCount()) +
                      (at_return ? 1 : 0);
   Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
 
@@ -10735,7 +10610,7 @@
 
   // Add the locals count
   details->set(kFrameDetailsLocalCountIndex,
-               Smi::FromInt(info.NumberOfLocals()));
+               Smi::FromInt(scope_info->LocalCount()));
 
   // Add the source position.
   if (position != RelocInfo::kNoPosition) {
@@ -10758,9 +10633,9 @@
   if (*save->context() == *isolate->debug()->debug_context()) {
     flags |= 1 << 0;
   }
-  if (it.frame()->is_optimized()) {
+  if (is_optimized) {
     flags |= 1 << 1;
-    flags |= inlined_frame_index << 2;
+    flags |= inlined_jsframe_index << 2;
   }
   details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
 
@@ -10770,14 +10645,14 @@
   // Add arguments name and value.
   for (int i = 0; i < argument_count; i++) {
     // Name of the argument.
-    if (i < info.number_of_parameters()) {
-      details->set(details_index++, *info.parameter_name(i));
+    if (i < scope_info->ParameterCount()) {
+      details->set(details_index++, scope_info->ParameterName(i));
     } else {
       details->set(details_index++, heap->undefined_value());
     }
 
     // Parameter value.
-    if (i < it.frame()->ComputeParametersCount()) {
+    if (i < frame_inspector.GetParametersCount()) {
       // Get the value from the stack.
       details->set(details_index++, frame_inspector.GetParameter(i));
     } else {
@@ -10786,7 +10661,7 @@
   }
 
   // Add locals name and value from the temporary copy from the function frame.
-  for (int i = 0; i < info.NumberOfLocals() * 2; i++) {
+  for (int i = 0; i < scope_info->LocalCount() * 2; i++) {
     details->set(details_index++, locals->get(i));
   }
 
@@ -10799,7 +10674,9 @@
   // THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
   // THE FRAME ITERATOR TO WRAP THE RECEIVER.
   Handle<Object> receiver(it.frame()->receiver(), isolate);
-  if (!receiver->IsJSObject() && !shared->strict_mode() && !shared->native()) {
+  if (!receiver->IsJSObject() &&
+      shared->is_classic_mode() &&
+      !shared->native()) {
     // If the receiver is not a JSObject and the function is not a
     // builtin or strict-mode we have hit an optimization where a
     // value object is not converted into a wrapped JS objects. To
@@ -10822,21 +10699,20 @@
 // Copy all the context locals into an object used to materialize a scope.
 static bool CopyContextLocalsToScopeObject(
     Isolate* isolate,
-    Handle<SerializedScopeInfo> serialized_scope_info,
-    ScopeInfo<>& scope_info,
+    Handle<ScopeInfo> scope_info,
     Handle<Context> context,
     Handle<JSObject> scope_object) {
   // Fill all context locals to the context extension.
-  for (int i = Context::MIN_CONTEXT_SLOTS;
-       i < scope_info.number_of_context_slots();
-       i++) {
-    int context_index = serialized_scope_info->ContextSlotIndex(
-        *scope_info.context_slot_name(i), NULL);
+  for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
+    VariableMode mode;
+    InitializationFlag init_flag;
+    int context_index = scope_info->ContextSlotIndex(
+        scope_info->ContextLocalName(i), &mode, &init_flag);
 
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(scope_object,
-                    scope_info.context_slot_name(i),
+                    Handle<String>(scope_info->ContextLocalName(i)),
                     Handle<Object>(context->get(context_index), isolate),
                     NONE,
                     kNonStrictMode),
@@ -10849,15 +10725,13 @@
 
 // Create a plain JSObject which materializes the local scope for the specified
 // frame.
-static Handle<JSObject> MaterializeLocalScope(
+static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
     Isolate* isolate,
     JavaScriptFrame* frame,
-    int inlined_frame_index) {
-  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+    FrameInspector* frame_inspector) {
+  Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
   Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
-  ScopeInfo<> scope_info(*serialized_scope_info);
-  FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
+  Handle<ScopeInfo> scope_info(shared->scope_info());
 
   // Allocate and initialize a JSObject with all the arguments, stack locals
   // heap locals and extension properties of the debugged function.
@@ -10865,36 +10739,39 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // First fill all parameters.
-  for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
+  for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+    Handle<Object> value(
+        i < frame_inspector->GetParametersCount() ?
+        frame_inspector->GetParameter(i) : isolate->heap()->undefined_value());
+
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(local_scope,
-                    scope_info.parameter_name(i),
-                    Handle<Object>(frame_inspector.GetParameter(i)),
+                    Handle<String>(scope_info->ParameterName(i)),
+                    value,
                     NONE,
                     kNonStrictMode),
         Handle<JSObject>());
   }
 
   // Second fill all stack locals.
-  for (int i = 0; i < scope_info.number_of_stack_slots(); ++i) {
+  for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(local_scope,
-                    scope_info.stack_slot_name(i),
-                    Handle<Object>(frame_inspector.GetExpression(i)),
+                    Handle<String>(scope_info->StackLocalName(i)),
+                    Handle<Object>(frame_inspector->GetExpression(i)),
                     NONE,
                     kNonStrictMode),
         Handle<JSObject>());
   }
 
-  if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+  if (scope_info->HasContext()) {
     // Third fill all context locals.
     Handle<Context> frame_context(Context::cast(frame->context()));
     Handle<Context> function_context(frame_context->declaration_context());
-    if (!CopyContextLocalsToScopeObject(isolate,
-                                        serialized_scope_info, scope_info,
-                                        function_context, local_scope)) {
+    if (!CopyContextLocalsToScopeObject(
+            isolate, scope_info, function_context, local_scope)) {
       return Handle<JSObject>();
     }
 
@@ -10904,7 +10781,11 @@
       if (function_context->has_extension() &&
           !function_context->IsGlobalContext()) {
         Handle<JSObject> ext(JSObject::cast(function_context->extension()));
-        Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+        bool threw = false;
+        Handle<FixedArray> keys =
+            GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
+        if (threw) return Handle<JSObject>();
+
         for (int i = 0; i < keys->length(); i++) {
           // Names of variables introduced by eval are strings.
           ASSERT(keys->get(i)->IsString());
@@ -10926,6 +10807,17 @@
 }
 
 
+static Handle<JSObject> MaterializeLocalScope(
+    Isolate* isolate,
+    JavaScriptFrame* frame,
+    int inlined_jsframe_index) {
+  FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
+  return MaterializeLocalScopeWithFrameInspector(isolate,
+                                                 frame,
+                                                 &frame_inspector);
+}
+
+
 // Create a plain JSObject which materializes the closure content for the
 // context.
 static Handle<JSObject> MaterializeClosure(Isolate* isolate,
@@ -10933,18 +10825,16 @@
   ASSERT(context->IsFunctionContext());
 
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
-  Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
-  ScopeInfo<> scope_info(*serialized_scope_info);
+  Handle<ScopeInfo> scope_info(shared->scope_info());
 
-  // Allocate and initialize a JSObject with all the content of theis function
+  // Allocate and initialize a JSObject with all the content of this function
   // closure.
   Handle<JSObject> closure_scope =
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // Fill all context locals to the context extension.
-  if (!CopyContextLocalsToScopeObject(isolate,
-                                      serialized_scope_info, scope_info,
-                                      context, closure_scope)) {
+  if (!CopyContextLocalsToScopeObject(
+          isolate, scope_info, context, closure_scope)) {
     return Handle<JSObject>();
   }
 
@@ -10952,7 +10842,11 @@
   // be variables introduced by eval.
   if (context->has_extension()) {
     Handle<JSObject> ext(JSObject::cast(context->extension()));
-    Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+    bool threw = false;
+    Handle<FixedArray> keys =
+        GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
+    if (threw) return Handle<JSObject>();
+
     for (int i = 0; i < keys->length(); i++) {
       // Names of variables introduced by eval are strings.
       ASSERT(keys->get(i)->IsString());
@@ -10995,9 +10889,7 @@
     Isolate* isolate,
     Handle<Context> context) {
   ASSERT(context->IsBlockContext());
-  Handle<SerializedScopeInfo> serialized_scope_info(
-      SerializedScopeInfo::cast(context->extension()));
-  ScopeInfo<> scope_info(*serialized_scope_info);
+  Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
 
   // Allocate and initialize a JSObject with all the arguments, stack locals
   // heap locals and extension properties of the debugged function.
@@ -11005,21 +10897,42 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // Fill all context locals.
-  if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
-    if (!CopyContextLocalsToScopeObject(isolate,
-                                        serialized_scope_info, scope_info,
-                                        context, block_scope)) {
-      return Handle<JSObject>();
-    }
+  if (!CopyContextLocalsToScopeObject(
+          isolate, scope_info, context, block_scope)) {
+    return Handle<JSObject>();
   }
 
   return block_scope;
 }
 
 
-// Iterate over the actual scopes visible from a stack frame. All scopes are
+// Create a plain JSObject which materializes the module scope for the specified
+// module context.
+static Handle<JSObject> MaterializeModuleScope(
+    Isolate* isolate,
+    Handle<Context> context) {
+  ASSERT(context->IsModuleContext());
+  Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+
+  // Allocate and initialize a JSObject with all the members of the debugged
+  // module.
+  Handle<JSObject> module_scope =
+      isolate->factory()->NewJSObject(isolate->object_function());
+
+  // Fill all context locals.
+  if (!CopyContextLocalsToScopeObject(
+          isolate, scope_info, context, module_scope)) {
+    return Handle<JSObject>();
+  }
+
+  return module_scope;
+}
+
+
+// Iterate over the actual scopes visible from a stack frame. The iteration
+// proceeds from the innermost visible nested scope outwards. All scopes are
 // backed by an actual context except the local scope, which is inserted
-// "artifically" in the context chain.
+// "artificially" in the context chain.
 class ScopeIterator {
  public:
   enum ScopeType {
@@ -11028,38 +10941,95 @@
     ScopeTypeWith,
     ScopeTypeClosure,
     ScopeTypeCatch,
-    ScopeTypeBlock
+    ScopeTypeBlock,
+    ScopeTypeModule
   };
 
   ScopeIterator(Isolate* isolate,
                 JavaScriptFrame* frame,
-                int inlined_frame_index)
+                int inlined_jsframe_index)
     : isolate_(isolate),
       frame_(frame),
-      inlined_frame_index_(inlined_frame_index),
+      inlined_jsframe_index_(inlined_jsframe_index),
       function_(JSFunction::cast(frame->function())),
       context_(Context::cast(frame->context())),
-      local_done_(false),
-      at_local_(false) {
+      nested_scope_chain_(4) {
 
-    // Check whether the first scope is actually a local scope.
-    // If there is a stack slot for .result then this local scope has been
-    // created for evaluating top level code and it is not a real local scope.
-    // Checking for the existence of .result seems fragile, but the scope info
-    // saved with the code object does not otherwise have that information.
-    int index = function_->shared()->scope_info()->
-        StackSlotIndex(isolate_->heap()->result_symbol());
-    if (index >= 0) {
-      local_done_ = true;
-    } else if (context_->IsGlobalContext() ||
-               context_->IsFunctionContext()) {
-      at_local_ = true;
-    } else if (context_->closure() != *function_) {
-      // The context_ is a block or with or catch block from the outer function.
-      ASSERT(context_->IsWithContext() ||
-             context_->IsCatchContext() ||
-             context_->IsBlockContext());
-      at_local_ = true;
+    // Catch the case when the debugger stops in an internal function.
+    Handle<SharedFunctionInfo> shared_info(function_->shared());
+    Handle<ScopeInfo> scope_info(shared_info->scope_info());
+    if (shared_info->script() == isolate->heap()->undefined_value()) {
+      while (context_->closure() == *function_) {
+        context_ = Handle<Context>(context_->previous(), isolate_);
+      }
+      return;
+    }
+
+    // Get the debug info (create it if it does not exist).
+    if (!isolate->debug()->EnsureDebugInfo(shared_info)) {
+      // Return if ensuring debug info failed.
+      return;
+    }
+    Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
+
+    // Find the break point where execution has stopped.
+    BreakLocationIterator break_location_iterator(debug_info,
+                                                  ALL_BREAK_LOCATIONS);
+    break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+    if (break_location_iterator.IsExit()) {
+      // We are within the return sequence. At the momemt it is not possible to
+      // get a source position which is consistent with the current scope chain.
+      // Thus all nested with, catch and block contexts are skipped and we only
+      // provide the function scope.
+      if (scope_info->HasContext()) {
+        context_ = Handle<Context>(context_->declaration_context(), isolate_);
+      } else {
+        while (context_->closure() == *function_) {
+          context_ = Handle<Context>(context_->previous(), isolate_);
+        }
+      }
+      if (scope_info->Type() != EVAL_SCOPE) nested_scope_chain_.Add(scope_info);
+    } else {
+      // Reparse the code and analyze the scopes.
+      ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+      Handle<Script> script(Script::cast(shared_info->script()));
+      Scope* scope = NULL;
+
+      // Check whether we are in global, eval or function code.
+      Handle<ScopeInfo> scope_info(shared_info->scope_info());
+      if (scope_info->Type() != FUNCTION_SCOPE) {
+        // Global or eval code.
+        CompilationInfo info(script);
+        if (scope_info->Type() == GLOBAL_SCOPE) {
+          info.MarkAsGlobal();
+        } else {
+          ASSERT(scope_info->Type() == EVAL_SCOPE);
+          info.MarkAsEval();
+          info.SetCallingContext(Handle<Context>(function_->context()));
+        }
+        if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
+          scope = info.function()->scope();
+        }
+      } else {
+        // Function code
+        CompilationInfo info(shared_info);
+        if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
+          scope = info.function()->scope();
+        }
+      }
+
+      // Retrieve the scope chain for the current position.
+      if (scope != NULL) {
+        int source_position = shared_info->code()->SourcePosition(frame_->pc());
+        scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
+      } else {
+        // A failed reparse indicates that the preparser has diverged from the
+        // parser or that the preparse data given to the initial parse has been
+        // faulty. We fail in debug mode but in release mode we only provide the
+        // information we get from the context chain but nothing about
+        // completely stack allocated scopes or stack allocated locals.
+        UNREACHABLE();
+      }
     }
   }
 
@@ -11068,40 +11038,52 @@
 
   // Move to the next scope.
   void Next() {
-    // If at a local scope mark the local scope as passed.
-    if (at_local_) {
-      at_local_ = false;
-      local_done_ = true;
-
-      // If the current context is not associated with the local scope the
-      // current context is the next real scope, so don't move to the next
-      // context in this case.
-      if (context_->closure() != *function_) {
-        return;
-      }
-    }
-
-    // The global scope is always the last in the chain.
-    if (context_->IsGlobalContext()) {
+    ScopeType scope_type = Type();
+    if (scope_type == ScopeTypeGlobal) {
+      // The global scope is always the last in the chain.
+      ASSERT(context_->IsGlobalContext());
       context_ = Handle<Context>();
       return;
     }
-
-    // Move to the next context.
-    context_ = Handle<Context>(context_->previous(), isolate_);
-
-    // If passing the local scope indicate that the current scope is now the
-    // local scope.
-    if (!local_done_ &&
-        (context_->IsGlobalContext() || context_->IsFunctionContext())) {
-      at_local_ = true;
+    if (nested_scope_chain_.is_empty()) {
+      context_ = Handle<Context>(context_->previous(), isolate_);
+    } else {
+      if (nested_scope_chain_.last()->HasContext()) {
+        ASSERT(context_->previous() != NULL);
+        context_ = Handle<Context>(context_->previous(), isolate_);
+      }
+      nested_scope_chain_.RemoveLast();
     }
   }
 
   // Return the type of the current scope.
   ScopeType Type() {
-    if (at_local_) {
-      return ScopeTypeLocal;
+    if (!nested_scope_chain_.is_empty()) {
+      Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
+      switch (scope_info->Type()) {
+        case FUNCTION_SCOPE:
+          ASSERT(context_->IsFunctionContext() ||
+                 !scope_info->HasContext());
+          return ScopeTypeLocal;
+        case MODULE_SCOPE:
+          ASSERT(context_->IsModuleContext());
+          return ScopeTypeModule;
+        case GLOBAL_SCOPE:
+          ASSERT(context_->IsGlobalContext());
+          return ScopeTypeGlobal;
+        case WITH_SCOPE:
+          ASSERT(context_->IsWithContext());
+          return ScopeTypeWith;
+        case CATCH_SCOPE:
+          ASSERT(context_->IsCatchContext());
+          return ScopeTypeCatch;
+        case BLOCK_SCOPE:
+          ASSERT(!scope_info->HasContext() ||
+                 context_->IsBlockContext());
+          return ScopeTypeBlock;
+        case EVAL_SCOPE:
+          UNREACHABLE();
+      }
     }
     if (context_->IsGlobalContext()) {
       ASSERT(context_->global()->IsGlobalObject());
@@ -11116,6 +11098,9 @@
     if (context_->IsBlockContext()) {
       return ScopeTypeBlock;
     }
+    if (context_->IsModuleContext()) {
+      return ScopeTypeModule;
+    }
     ASSERT(context_->IsWithContext());
     return ScopeTypeWith;
   }
@@ -11127,7 +11112,8 @@
         return Handle<JSObject>(CurrentContext()->global());
       case ScopeIterator::ScopeTypeLocal:
         // Materialize the content of the local scope into a JSObject.
-        return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_);
+        ASSERT(nested_scope_chain_.length() == 1);
+        return MaterializeLocalScope(isolate_, frame_, inlined_jsframe_index_);
       case ScopeIterator::ScopeTypeWith:
         // Return the with object.
         return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
@@ -11138,18 +11124,35 @@
         return MaterializeClosure(isolate_, CurrentContext());
       case ScopeIterator::ScopeTypeBlock:
         return MaterializeBlockScope(isolate_, CurrentContext());
+      case ScopeIterator::ScopeTypeModule:
+        return MaterializeModuleScope(isolate_, CurrentContext());
     }
     UNREACHABLE();
     return Handle<JSObject>();
   }
 
+  Handle<ScopeInfo> CurrentScopeInfo() {
+    if (!nested_scope_chain_.is_empty()) {
+      return nested_scope_chain_.last();
+    } else if (context_->IsBlockContext()) {
+      return Handle<ScopeInfo>(ScopeInfo::cast(context_->extension()));
+    } else if (context_->IsFunctionContext()) {
+      return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
+    }
+    return Handle<ScopeInfo>::null();
+  }
+
   // Return the context for this scope. For the local context there might not
   // be an actual context.
   Handle<Context> CurrentContext() {
-    if (at_local_ && context_->closure() != *function_) {
+    if (Type() == ScopeTypeGlobal ||
+        nested_scope_chain_.is_empty()) {
+      return context_;
+    } else if (nested_scope_chain_.last()->HasContext()) {
+      return context_;
+    } else {
       return Handle<Context>();
     }
-    return context_;
   }
 
 #ifdef DEBUG
@@ -11163,8 +11166,7 @@
 
       case ScopeIterator::ScopeTypeLocal: {
         PrintF("Local:\n");
-        ScopeInfo<> scope_info(function_->shared()->scope_info());
-        scope_info.Print();
+        function_->shared()->scope_info()->Print();
         if (!CurrentContext().is_null()) {
           CurrentContext()->Print();
           if (CurrentContext()->has_extension()) {
@@ -11209,11 +11211,10 @@
  private:
   Isolate* isolate_;
   JavaScriptFrame* frame_;
-  int inlined_frame_index_;
+  int inlined_jsframe_index_;
   Handle<JSFunction> function_;
   Handle<Context> context_;
-  bool local_done_;
-  bool at_local_;
+  List<Handle<ScopeInfo> > nested_scope_chain_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
 };
@@ -11229,7 +11230,7 @@
       RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
-  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+  CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
 
   // Get the frame where the debugging is performed.
   StackFrame::Id id = UnwrapFrameId(wrapped_id);
@@ -11271,8 +11272,8 @@
       RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
-  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
-  CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
+  CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+  CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
   CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
 
   // Get the frame where the debugging is performed.
@@ -11282,7 +11283,7 @@
 
   // Find the requested scope.
   int n = 0;
-  ScopeIterator it(isolate, frame, inlined_frame_index);
+  ScopeIterator it(isolate, frame, inlined_jsframe_index);
   for (; !it.Done() && n < index; it.Next()) {
     n++;
   }
@@ -11411,7 +11412,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_BOOLEAN_CHECKED(disable_break, args[0]);
+  CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 0);
   isolate->debug()->set_disable_break(disable_break);
   return  isolate->heap()->undefined_value();
 }
@@ -11421,7 +11422,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
   Handle<SharedFunctionInfo> shared(fun->shared());
   // Find the number of break points
   Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
@@ -11439,7 +11440,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
   Handle<SharedFunctionInfo> shared(fun->shared());
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
   RUNTIME_ASSERT(source_position >= 0);
@@ -11469,48 +11470,53 @@
   int target_start_position = RelocInfo::kNoPosition;
   Handle<SharedFunctionInfo> target;
   while (!done) {
-    HeapIterator iterator;
-    for (HeapObject* obj = iterator.next();
-         obj != NULL; obj = iterator.next()) {
-      if (obj->IsSharedFunctionInfo()) {
-        Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
-        if (shared->script() == *script) {
-          // If the SharedFunctionInfo found has the requested script data and
-          // contains the source position it is a candidate.
-          int start_position = shared->function_token_position();
-          if (start_position == RelocInfo::kNoPosition) {
-            start_position = shared->start_position();
-          }
-          if (start_position <= position &&
-              position <= shared->end_position()) {
-            // If there is no candidate or this function is within the current
-            // candidate this is the new candidate.
-            if (target.is_null()) {
-              target_start_position = start_position;
-              target = shared;
-            } else {
-              if (target_start_position == start_position &&
-                  shared->end_position() == target->end_position()) {
-                  // If a top-level function contain only one function
-                  // declartion the source for the top-level and the function is
-                  // the same. In that case prefer the non top-level function.
-                if (!shared->is_toplevel()) {
+    { // Extra scope for iterator and no-allocation.
+      isolate->heap()->EnsureHeapIsIterable();
+      AssertNoAllocation no_alloc_during_heap_iteration;
+      HeapIterator iterator;
+      for (HeapObject* obj = iterator.next();
+           obj != NULL; obj = iterator.next()) {
+        if (obj->IsSharedFunctionInfo()) {
+          Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
+          if (shared->script() == *script) {
+            // If the SharedFunctionInfo found has the requested script data and
+            // contains the source position it is a candidate.
+            int start_position = shared->function_token_position();
+            if (start_position == RelocInfo::kNoPosition) {
+              start_position = shared->start_position();
+            }
+            if (start_position <= position &&
+                position <= shared->end_position()) {
+              // If there is no candidate or this function is within the current
+              // candidate this is the new candidate.
+              if (target.is_null()) {
+                target_start_position = start_position;
+                target = shared;
+              } else {
+                if (target_start_position == start_position &&
+                    shared->end_position() == target->end_position()) {
+                    // If a top-level function contain only one function
+                    // declartion the source for the top-level and the
+                    // function is the same. In that case prefer the non
+                    // top-level function.
+                  if (!shared->is_toplevel()) {
+                    target_start_position = start_position;
+                    target = shared;
+                  }
+                } else if (target_start_position <= start_position &&
+                           shared->end_position() <= target->end_position()) {
+                  // This containment check includes equality as a function
+                  // inside a top-level function can share either start or end
+                  // position with the top-level function.
                   target_start_position = start_position;
                   target = shared;
                 }
-              } else if (target_start_position <= start_position &&
-                         shared->end_position() <= target->end_position()) {
-                // This containment check includes equality as a function inside
-                // a top-level function can share either start or end position
-                // with the top-level function.
-                target_start_position = start_position;
-                target = shared;
               }
             }
           }
         }
-      }
-    }
+      }  // End for loop.
+    }  // End No allocation scope.
 
     if (target.is_null()) {
       return isolate->heap()->undefined_value();
@@ -11523,9 +11529,9 @@
     if (!done) {
       // If the candidate is not compiled compile it to reveal any inner
       // functions which might contain the requested source position.
-      CompileLazyShared(target, KEEP_EXCEPTION);
+      SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
     }
-  }
+  }  // End while loop.
 
   return *target;
 }
@@ -11540,7 +11546,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(JSValue, wrapper, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
   RUNTIME_ASSERT(source_position >= 0);
   Handle<Object> break_point_object_arg = args.at<Object>(2);
@@ -11590,7 +11596,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   RUNTIME_ASSERT(args[0]->IsNumber());
-  CONVERT_BOOLEAN_CHECKED(enable, args[1]);
+  CONVERT_BOOLEAN_ARG_CHECKED(enable, 1);
 
   // If the number doesn't match an enum value, the ChangeBreakOnException
   // function will default to affecting caught exceptions.
@@ -11671,46 +11677,65 @@
 
 // Creates a copy of the with context chain. The copy of the context chain is
 // is linked to the function context supplied.
-static Handle<Context> CopyWithContextChain(Isolate* isolate,
-                                            Handle<JSFunction> function,
-                                            Handle<Context> current,
-                                            Handle<Context> base) {
-  // At the end of the chain. Return the base context to link to.
-  if (current->IsFunctionContext() || current->IsGlobalContext()) {
-    return base;
+static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
+                                                   Handle<JSFunction> function,
+                                                   Handle<Context> base,
+                                                   JavaScriptFrame* frame,
+                                                   int inlined_jsframe_index) {
+  HandleScope scope(isolate);
+  List<Handle<ScopeInfo> > scope_chain;
+  List<Handle<Context> > context_chain;
+
+  ScopeIterator it(isolate, frame, inlined_jsframe_index);
+  for (; it.Type() != ScopeIterator::ScopeTypeGlobal &&
+         it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) {
+    ASSERT(!it.Done());
+    scope_chain.Add(it.CurrentScopeInfo());
+    context_chain.Add(it.CurrentContext());
   }
 
-  // Recursively copy the with and catch contexts.
-  HandleScope scope(isolate);
-  Handle<Context> previous(current->previous());
-  Handle<Context> new_previous =
-      CopyWithContextChain(isolate, function, previous, base);
-  Handle<Context> new_current;
-  if (current->IsCatchContext()) {
-    Handle<String> name(String::cast(current->extension()));
-    Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
-    new_current =
-        isolate->factory()->NewCatchContext(function,
-                                            new_previous,
-                                            name,
-                                            thrown_object);
-  } else if (current->IsBlockContext()) {
-    Handle<SerializedScopeInfo> scope_info(
-        SerializedScopeInfo::cast(current->extension()));
-    new_current =
-        isolate->factory()->NewBlockContext(function, new_previous, scope_info);
-    // Copy context slots.
-    int num_context_slots = scope_info->NumberOfContextSlots();
-    for (int i = Context::MIN_CONTEXT_SLOTS; i < num_context_slots; ++i) {
-      new_current->set(i, current->get(i));
+  // At the end of the chain. Return the base context to link to.
+  Handle<Context> context = base;
+
+  // Iteratively copy and or materialize the nested contexts.
+  while (!scope_chain.is_empty()) {
+    Handle<ScopeInfo> scope_info = scope_chain.RemoveLast();
+    Handle<Context> current = context_chain.RemoveLast();
+    ASSERT(!(scope_info->HasContext() & current.is_null()));
+
+    if (scope_info->Type() == CATCH_SCOPE) {
+      Handle<String> name(String::cast(current->extension()));
+      Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
+      context =
+          isolate->factory()->NewCatchContext(function,
+                                              context,
+                                              name,
+                                              thrown_object);
+    } else if (scope_info->Type() == BLOCK_SCOPE) {
+      // Materialize the contents of the block scope into a JSObject.
+      Handle<JSObject> block_scope_object =
+          MaterializeBlockScope(isolate, current);
+      if (block_scope_object.is_null()) {
+        return Handle<Context>::null();
+      }
+      // Allocate a new function context for the debug evaluation and set the
+      // extension object.
+      Handle<Context> new_context =
+          isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
+                                                 function);
+      new_context->set_extension(*block_scope_object);
+      new_context->set_previous(*context);
+      context = new_context;
+    } else {
+      ASSERT(scope_info->Type() == WITH_SCOPE);
+      ASSERT(current->IsWithContext());
+      Handle<JSObject> extension(JSObject::cast(current->extension()));
+      context =
+          isolate->factory()->NewWithContext(function, context, extension);
     }
-  } else {
-    ASSERT(current->IsWithContext());
-    Handle<JSObject> extension(JSObject::cast(current->extension()));
-    new_current =
-        isolate->factory()->NewWithContext(function, new_previous, extension);
   }
-  return scope.CloseAndEscape(new_current);
+
+  return scope.CloseAndEscape(context);
 }
 
 
@@ -11718,33 +11743,32 @@
 // Runtime_DebugEvaluate.
 static Handle<Object> GetArgumentsObject(Isolate* isolate,
                                          JavaScriptFrame* frame,
-                                         int inlined_frame_index,
-                                         Handle<JSFunction> function,
-                                         Handle<SerializedScopeInfo> scope_info,
-                                         const ScopeInfo<>* sinfo,
+                                         FrameInspector* frame_inspector,
+                                         Handle<ScopeInfo> scope_info,
                                          Handle<Context> function_context) {
   // Try to find the value of 'arguments' to pass as parameter. If it is not
   // found (that is the debugged function does not reference 'arguments' and
   // does not support eval) then create an 'arguments' object.
   int index;
-  if (sinfo->number_of_stack_slots() > 0) {
+  if (scope_info->StackLocalCount() > 0) {
     index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
     if (index != -1) {
       return Handle<Object>(frame->GetExpression(index), isolate);
     }
   }
 
-  if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
-    index = scope_info->ContextSlotIndex(isolate->heap()->arguments_symbol(),
-                                         NULL);
+  if (scope_info->HasHeapAllocatedLocals()) {
+    VariableMode mode;
+    InitializationFlag init_flag;
+    index = scope_info->ContextSlotIndex(
+        isolate->heap()->arguments_symbol(), &mode, &init_flag);
     if (index != -1) {
       return Handle<Object>(function_context->get(index), isolate);
     }
   }
 
-  FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
-
-  int length = frame_inspector.GetParametersCount();
+  Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
+  int length = frame_inspector->GetParametersCount();
   Handle<JSObject> arguments =
       isolate->factory()->NewArgumentsObject(function, length);
   Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
@@ -11752,7 +11776,7 @@
   AssertNoAllocation no_gc;
   WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < length; i++) {
-    array->set(i, frame_inspector.GetParameter(i), mode);
+    array->set(i, frame_inspector->GetParameter(i), mode);
   }
   arguments->set_elements(*array);
   return arguments;
@@ -11787,10 +11811,10 @@
       return maybe_check_result;
     }
   }
-  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
-  CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
-  CONVERT_ARG_CHECKED(String, source, 3);
-  CONVERT_BOOLEAN_CHECKED(disable_break, args[4]);
+  CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+  CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
+  CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
+  CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
   Handle<Object> additional_context(args[5]);
 
   // Handle the processing of break.
@@ -11800,17 +11824,14 @@
   StackFrame::Id id = UnwrapFrameId(wrapped_id);
   JavaScriptFrameIterator it(isolate, id);
   JavaScriptFrame* frame = it.frame();
-  Handle<JSFunction> function(JSFunction::cast(frame->function()));
-  Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
-  ScopeInfo<> sinfo(*scope_info);
+  FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
+  Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
+  Handle<ScopeInfo> scope_info(function->shared()->scope_info());
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
-  SaveContext* save = isolate->save_context();
-  while (save != NULL && !save->below(frame)) {
-    save = save->prev();
-  }
-  ASSERT(save != NULL);
+  SaveContext* save = FindSavedContextForFrame(isolate, frame);
+
   SaveContext savex(isolate);
   isolate->set_context(*(save->context()));
 
@@ -11825,14 +11846,14 @@
                                       isolate->factory()->undefined_value());
   go_between->set_context(function->context());
 #ifdef DEBUG
-  ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info());
-  ASSERT(go_between_sinfo.number_of_parameters() == 0);
-  ASSERT(go_between_sinfo.number_of_context_slots() == 0);
+  Handle<ScopeInfo> go_between_scope_info(go_between->shared()->scope_info());
+  ASSERT(go_between_scope_info->ParameterCount() == 0);
+  ASSERT(go_between_scope_info->ContextLocalCount() == 0);
 #endif
 
   // Materialize the content of the local scope into a JSObject.
-  Handle<JSObject> local_scope = MaterializeLocalScope(
-      isolate, frame, inlined_frame_index);
+  Handle<JSObject> local_scope = MaterializeLocalScopeWithFrameInspector(
+      isolate, frame, &frame_inspector);
   RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
 
   // Allocate a new context for the debug evaluation and set the extension
@@ -11845,10 +11866,14 @@
   Handle<Context> frame_context(Context::cast(frame->context()));
   Handle<Context> function_context;
   // Get the function's context if it has one.
-  if (scope_info->HasHeapAllocatedLocals()) {
+  if (scope_info->HasContext()) {
     function_context = Handle<Context>(frame_context->declaration_context());
   }
-  context = CopyWithContextChain(isolate, go_between, frame_context, context);
+  context = CopyNestedScopeContextChain(isolate,
+                                        go_between,
+                                        context,
+                                        frame,
+                                        inlined_jsframe_index);
 
   if (additional_context->IsJSObject()) {
     Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
@@ -11872,7 +11897,8 @@
       Compiler::CompileEval(function_source,
                             context,
                             context->IsGlobalContext(),
-                            kNonStrictMode);
+                            CLASSIC_MODE,
+                            RelocInfo::kNoPosition);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
@@ -11886,17 +11912,19 @@
   if (has_pending_exception) return Failure::Exception();
 
   Handle<Object> arguments = GetArgumentsObject(isolate,
-                                                frame, inlined_frame_index,
-                                                function, scope_info,
-                                                &sinfo, function_context);
+                                                frame,
+                                                &frame_inspector,
+                                                scope_info,
+                                                function_context);
 
   // Invoke the evaluation function and return the result.
-  const int argc = 2;
-  Object** argv[argc] = { arguments.location(),
-                          Handle<Object>::cast(source).location() };
+  Handle<Object> argv[] = { arguments, source };
   Handle<Object> result =
-      Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
-                      argc, argv, &has_pending_exception);
+      Execution::Call(Handle<JSFunction>::cast(evaluation_function),
+                      receiver,
+                      ARRAY_SIZE(argv),
+                      argv,
+                      &has_pending_exception);
   if (has_pending_exception) return Failure::Exception();
 
   // Skip the global proxy as it has no properties and always delegates to the
@@ -11922,8 +11950,8 @@
       return maybe_check_result;
     }
   }
-  CONVERT_ARG_CHECKED(String, source, 1);
-  CONVERT_BOOLEAN_CHECKED(disable_break, args[2]);
+  CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
+  CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
   Handle<Object> additional_context(args[3]);
 
   // Handle the processing of break.
@@ -11946,15 +11974,12 @@
   bool is_global = true;
 
   if (additional_context->IsJSObject()) {
-    // Create a function context first, than put 'with' context on top of it.
-    Handle<JSFunction> go_between = isolate->factory()->NewFunction(
-        isolate->factory()->empty_string(),
-        isolate->factory()->undefined_value());
-    go_between->set_context(*context);
-    context =
-        isolate->factory()->NewFunctionContext(
-            Context::MIN_CONTEXT_SLOTS, go_between);
-    context->set_extension(JSObject::cast(*additional_context));
+    // Create a new with context with the additional context information between
+    // the context of the debugged function and the eval code to be executed.
+    context = isolate->factory()->NewWithContext(
+        Handle<JSFunction>(context->closure()),
+        context,
+        Handle<JSObject>::cast(additional_context));
     is_global = false;
   }
 
@@ -11962,7 +11987,11 @@
   // Currently, the eval code will be executed in non-strict mode,
   // even in the strict code context.
   Handle<SharedFunctionInfo> shared =
-      Compiler::CompileEval(source, context, is_global, kNonStrictMode);
+      Compiler::CompileEval(source,
+                            context,
+                            is_global,
+                            CLASSIC_MODE,
+                            RelocInfo::kNoPosition);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       Handle<JSFunction>(
@@ -11975,6 +12004,8 @@
   Handle<Object> result =
     Execution::Call(compiled_function, receiver, 0, NULL,
                     &has_pending_exception);
+  // Clear the oneshot breakpoints so that the debugger does not step further.
+  isolate->debug()->ClearStepping();
   if (has_pending_exception) return Failure::Exception();
   return *result;
 }
@@ -11994,7 +12025,7 @@
     // because using
     //   instances->set(i, *GetScriptWrapper(script))
     // is unsafe as GetScriptWrapper might call GC and the C++ compiler might
-    // already have deferenced the instances handle.
+    // already have dereferenced the instances handle.
     Handle<JSValue> wrapper = GetScriptWrapper(script);
     instances->set(i, *wrapper);
   }
@@ -12002,13 +12033,14 @@
   // Return result as a JS array.
   Handle<JSObject> result =
       isolate->factory()->NewJSObject(isolate->array_function());
-  Handle<JSArray>::cast(result)->SetContent(*instances);
+  isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances);
   return *result;
 }
 
 
 // Helper function used by Runtime_DebugReferencedBy below.
-static int DebugReferencedBy(JSObject* target,
+static int DebugReferencedBy(HeapIterator* iterator,
+                             JSObject* target,
                              Object* instance_filter, int max_references,
                              FixedArray* instances, int instances_size,
                              JSFunction* arguments_function) {
@@ -12018,9 +12050,8 @@
   // Iterate the heap.
   int count = 0;
   JSObject* last = NULL;
-  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
-  while (((heap_obj = iterator.next()) != NULL) &&
+  while (((heap_obj = iterator->next()) != NULL) &&
          (max_references == 0 || count < max_references)) {
     // Only look at all JSObjects.
     if (heap_obj->IsJSObject()) {
@@ -12085,16 +12116,22 @@
   ASSERT(args.length() == 3);
 
   // First perform a full GC in order to avoid references from dead objects.
-  isolate->heap()->CollectAllGarbage(false);
+  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                                     "%DebugReferencedBy");
+  // The heap iterator reserves the right to do a GC to make the heap iterable.
+  // Due to the GC above we know it won't need to do that, but it seems cleaner
+  // to get the heap iterator constructed before we start having unprotected
+  // Object* locals that are not protected by handles.
 
   // Check parameters.
-  CONVERT_CHECKED(JSObject, target, args[0]);
+  CONVERT_ARG_CHECKED(JSObject, target, 0);
   Object* instance_filter = args[1];
   RUNTIME_ASSERT(instance_filter->IsUndefined() ||
                  instance_filter->IsJSObject());
   CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
   RUNTIME_ASSERT(max_references >= 0);
 
+
   // Get the constructor function for context extension and arguments array.
   JSObject* arguments_boilerplate =
       isolate->context()->global_context()->arguments_boilerplate();
@@ -12103,7 +12140,9 @@
 
   // Get the number of referencing objects.
   int count;
-  count = DebugReferencedBy(target, instance_filter, max_references,
+  HeapIterator heap_iterator;
+  count = DebugReferencedBy(&heap_iterator,
+                            target, instance_filter, max_references,
                             NULL, 0, arguments_function);
 
   // Allocate an array to hold the result.
@@ -12114,30 +12153,34 @@
   FixedArray* instances = FixedArray::cast(object);
 
   // Fill the referencing objects.
-  count = DebugReferencedBy(target, instance_filter, max_references,
+  // AllocateFixedArray above does not make the heap non-iterable.
+  ASSERT(HEAP->IsHeapIterable());
+  HeapIterator heap_iterator2;
+  count = DebugReferencedBy(&heap_iterator2,
+                            target, instance_filter, max_references,
                             instances, count, arguments_function);
 
   // Return result as JS array.
   Object* result;
-  { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+  MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
       isolate->context()->global_context()->array_function());
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  JSArray::cast(result)->SetContent(instances);
-  return result;
+  if (!maybe_result->ToObject(&result)) return maybe_result;
+  return JSArray::cast(result)->SetContent(instances);
 }
 
 
 // Helper function used by Runtime_DebugConstructedBy below.
-static int DebugConstructedBy(JSFunction* constructor, int max_references,
-                              FixedArray* instances, int instances_size) {
+static int DebugConstructedBy(HeapIterator* iterator,
+                              JSFunction* constructor,
+                              int max_references,
+                              FixedArray* instances,
+                              int instances_size) {
   AssertNoAllocation no_alloc;
 
   // Iterate the heap.
   int count = 0;
-  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
-  while (((heap_obj = iterator.next()) != NULL) &&
+  while (((heap_obj = iterator->next()) != NULL) &&
          (max_references == 0 || count < max_references)) {
     // Only look at all JSObjects.
     if (heap_obj->IsJSObject()) {
@@ -12165,16 +12208,22 @@
   ASSERT(args.length() == 2);
 
   // First perform a full GC in order to avoid dead objects.
-  isolate->heap()->CollectAllGarbage(false);
+  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                                     "%DebugConstructedBy");
 
   // Check parameters.
-  CONVERT_CHECKED(JSFunction, constructor, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, constructor, 0);
   CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
   RUNTIME_ASSERT(max_references >= 0);
 
   // Get the number of referencing objects.
   int count;
-  count = DebugConstructedBy(constructor, max_references, NULL, 0);
+  HeapIterator heap_iterator;
+  count = DebugConstructedBy(&heap_iterator,
+                             constructor,
+                             max_references,
+                             NULL,
+                             0);
 
   // Allocate an array to hold the result.
   Object* object;
@@ -12183,8 +12232,14 @@
   }
   FixedArray* instances = FixedArray::cast(object);
 
+  ASSERT(HEAP->IsHeapIterable());
   // Fill the referencing objects.
-  count = DebugConstructedBy(constructor, max_references, instances, count);
+  HeapIterator heap_iterator2;
+  count = DebugConstructedBy(&heap_iterator2,
+                             constructor,
+                             max_references,
+                             instances,
+                             count);
 
   // Return result as JS array.
   Object* result;
@@ -12192,8 +12247,7 @@
           isolate->context()->global_context()->array_function());
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  JSArray::cast(result)->SetContent(instances);
-  return result;
+  return JSArray::cast(result)->SetContent(instances);
 }
 
 
@@ -12202,13 +12256,32 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSObject, obj, args[0]);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
 
   // Use the __proto__ accessor.
   return Accessors::ObjectPrototype.getter(obj, NULL);
 }
 
 
+// Patches script source (should be called upon BeforeCompile event).
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+
+  CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
+  Handle<String> source(String::cast(args[1]));
+
+  RUNTIME_ASSERT(script_wrapper->value()->IsScript());
+  Handle<Script> script(Script::cast(script_wrapper->value()));
+
+  int compilation_state = Smi::cast(script->compilation_state())->value();
+  RUNTIME_ASSERT(compilation_state == Script::COMPILATION_STATE_INITIAL);
+  script->set_source(*source);
+
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
   ASSERT(args.length() == 0);
   CPU::DebugBreak();
@@ -12221,9 +12294,9 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   // Get the function and make sure it is compiled.
-  CONVERT_ARG_CHECKED(JSFunction, func, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
   Handle<SharedFunctionInfo> shared(func->shared());
-  if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
+  if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
   func->code()->PrintLn();
@@ -12237,9 +12310,9 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   // Get the function and make sure it is compiled.
-  CONVERT_ARG_CHECKED(JSFunction, func, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
   Handle<SharedFunctionInfo> shared(func->shared());
-  if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
+  if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
   shared->construct_stub()->PrintLn();
@@ -12252,19 +12325,20 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSFunction, f, args[0]);
+  CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return f->shared()->inferred_name();
 }
 
 
-static int FindSharedFunctionInfosForScript(Script* script,
+static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
+                                            Script* script,
                                             FixedArray* buffer) {
   AssertNoAllocation no_allocations;
-
   int counter = 0;
   int buffer_size = buffer->length();
-  HeapIterator iterator;
-  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+  for (HeapObject* obj = iterator->next();
+       obj != NULL;
+       obj = iterator->next()) {
     ASSERT(obj != NULL);
     if (!obj->IsSharedFunctionInfo()) {
       continue;
@@ -12288,7 +12362,8 @@
                  Runtime_LiveEditFindSharedFunctionInfosForScript) {
   ASSERT(args.length() == 1);
   HandleScope scope(isolate);
-  CONVERT_CHECKED(JSValue, script_value, args[0]);
+  CONVERT_ARG_CHECKED(JSValue, script_value, 0);
+
 
   Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
 
@@ -12296,10 +12371,23 @@
 
   Handle<FixedArray> array;
   array = isolate->factory()->NewFixedArray(kBufferSize);
-  int number = FindSharedFunctionInfosForScript(*script, *array);
+  int number;
+  {
+    isolate->heap()->EnsureHeapIsIterable();
+    AssertNoAllocation no_allocations;
+    HeapIterator heap_iterator;
+    Script* scr = *script;
+    FixedArray* arr = *array;
+    number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
+  }
   if (number > kBufferSize) {
     array = isolate->factory()->NewFixedArray(number);
-    FindSharedFunctionInfosForScript(*script, *array);
+    isolate->heap()->EnsureHeapIsIterable();
+    AssertNoAllocation no_allocations;
+    HeapIterator heap_iterator;
+    Script* scr = *script;
+    FixedArray* arr = *array;
+    FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
   }
 
   Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
@@ -12320,8 +12408,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_CHECKED(JSValue, script, args[0]);
-  CONVERT_ARG_CHECKED(String, source, 1);
+  CONVERT_ARG_CHECKED(JSValue, script, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
   Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
 
   JSArray* result =  LiveEdit::GatherCompileInfo(script_handle, source);
@@ -12339,13 +12427,12 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
   ASSERT(args.length() == 3);
   HandleScope scope(isolate);
-  CONVERT_CHECKED(JSValue, original_script_value, args[0]);
-  CONVERT_ARG_CHECKED(String, new_source, 1);
+  CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
   Handle<Object> old_script_name(args[2], isolate);
 
-  CONVERT_CHECKED(Script, original_script_pointer,
-                  original_script_value->value());
-  Handle<Script> original_script(original_script_pointer);
+  RUNTIME_ASSERT(original_script_value->value()->IsScript());
+  Handle<Script> original_script(Script::cast(original_script_value->value()));
 
   Object* old_script = LiveEdit::ChangeScriptSource(original_script,
                                                     new_source,
@@ -12363,7 +12450,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
   ASSERT(args.length() == 1);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSArray, shared_info, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
   return LiveEdit::FunctionSourceUpdated(shared_info);
 }
 
@@ -12372,8 +12459,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0);
-  CONVERT_ARG_CHECKED(JSArray, shared_info, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1);
 
   return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
 }
@@ -12388,7 +12475,8 @@
   if (function_object->IsJSValue()) {
     Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
     if (script_object->IsJSValue()) {
-      CONVERT_CHECKED(Script, script, JSValue::cast(*script_object)->value());
+      RUNTIME_ASSERT(JSValue::cast(*script_object)->value()->IsScript());
+      Script* script = Script::cast(JSValue::cast(*script_object)->value());
       script_object = Handle<Object>(script, isolate);
     }
 
@@ -12408,9 +12496,9 @@
   ASSERT(args.length() == 3);
   HandleScope scope(isolate);
 
-  CONVERT_ARG_CHECKED(JSValue, parent_wrapper, 0);
-  CONVERT_ARG_CHECKED(JSValue, orig_wrapper, 1);
-  CONVERT_ARG_CHECKED(JSValue, subst_wrapper, 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSValue, subst_wrapper, 2);
 
   LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
                                        subst_wrapper);
@@ -12427,8 +12515,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
-  CONVERT_ARG_CHECKED(JSArray, position_change_array, 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
 
   return LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
 }
@@ -12441,8 +12529,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
-  CONVERT_BOOLEAN_CHECKED(do_drop, args[1]);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
+  CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
 
   return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
 }
@@ -12453,8 +12541,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(String, s1, 0);
-  CONVERT_ARG_CHECKED(String, s2, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
 
   return *LiveEdit::CompareStrings(s1, s2);
 }
@@ -12465,7 +12553,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
 
   Handle<Code> code(function->code(), isolate);
@@ -12502,8 +12590,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
-  CONVERT_BOOLEAN_CHECKED(without_debugger, args[1]);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_BOOLEAN_ARG_CHECKED(without_debugger, 1);
 
   Handle<Object> result;
   bool pending_exception;
@@ -12527,7 +12615,7 @@
 
 // Sets a v8 flag.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
-  CONVERT_CHECKED(String, arg, args[0]);
+  CONVERT_ARG_CHECKED(String, arg, 0);
   SmartArrayPointer<char> flags =
       arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   FlagList::SetFlagsFromString(*flags, StrLength(*flags));
@@ -12538,7 +12626,7 @@
 // Performs a GC.
 // Presently, it only does a full GC.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
-  isolate->heap()->CollectAllGarbage(true);
+  isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage");
   return isolate->heap()->undefined_value();
 }
 
@@ -12597,7 +12685,7 @@
   CONVERT_SMI_ARG_CHECKED(id2, 1);
   CONVERT_SMI_ARG_CHECKED(start, 2);
   CONVERT_SMI_ARG_CHECKED(count, 3);
-  CONVERT_ARG_CHECKED(JSObject, filter_obj, 4);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 4);
   EnterDebugger enter_debugger;
   return LiveObjectList::Dump(id1, id2, start, count, filter_obj);
 #else
@@ -12624,7 +12712,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjId) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
-  CONVERT_ARG_CHECKED(String, address, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, address, 0);
   Object* result = LiveObjectList::GetObjId(address);
   return result;
 #else
@@ -12642,7 +12730,7 @@
   RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean());
   RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi());
   RUNTIME_ASSERT(args[4]->IsUndefined() || args[4]->IsSmi());
-  CONVERT_ARG_CHECKED(JSObject, filter_obj, 5);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 5);
 
   Handle<JSObject> instance_filter;
   if (args[1]->IsJSObject()) {
@@ -12743,7 +12831,7 @@
   HandleScope scope;
   CONVERT_SMI_ARG_CHECKED(id1, 0);
   CONVERT_SMI_ARG_CHECKED(id2, 1);
-  CONVERT_ARG_CHECKED(JSObject, filter_obj, 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 2);
 
   EnterDebugger enter_debugger;
   return LiveObjectList::Summarize(id1, id2, filter_obj);
@@ -12780,6 +12868,8 @@
   // Scan the heap for Script objects to find the script with the requested
   // script data.
   Handle<Script> script;
+  script_name->GetHeap()->EnsureHeapIsIterable();
+  AssertNoAllocation no_allocation_during_heap_iteration;
   HeapIterator iterator;
   HeapObject* obj = NULL;
   while (script.is_null() && ((obj = iterator.next()) != NULL)) {
@@ -12809,7 +12899,7 @@
 
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(String, script_name, args[0]);
+  CONVERT_ARG_CHECKED(String, script_name, 0);
 
   // Find the requested script.
   Handle<Object> result =
@@ -12828,34 +12918,32 @@
                                   Object* caller,
                                   bool* seen_caller) {
   // Only display JS frames.
-  if (!raw_frame->is_java_script())
+  if (!raw_frame->is_java_script()) {
     return false;
+  }
   JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
   Object* raw_fun = frame->function();
   // Not sure when this can happen but skip it just in case.
-  if (!raw_fun->IsJSFunction())
+  if (!raw_fun->IsJSFunction()) {
     return false;
+  }
   if ((raw_fun == caller) && !(*seen_caller)) {
     *seen_caller = true;
     return false;
   }
   // Skip all frames until we've seen the caller.
   if (!(*seen_caller)) return false;
-  // Also, skip the most obvious builtin calls. We recognize builtins
-  // as (1) functions called with the builtins object as the receiver and
-  // as (2) functions from native scripts called with undefined as the
-  // receiver (direct calls to helper functions in the builtins
-  // code). Some builtin calls (such as Number.ADD which is invoked
-  // using 'call') are very difficult to recognize so we're leaving
-  // them in for now.
-  if (frame->receiver()->IsJSBuiltinsObject()) {
-    return false;
-  }
-  JSFunction* fun = JSFunction::cast(raw_fun);
-  Object* raw_script = fun->shared()->script();
-  if (frame->receiver()->IsUndefined() && raw_script->IsScript()) {
-    int script_type = Script::cast(raw_script)->type()->value();
-    return script_type != Script::TYPE_NATIVE;
+  // Also, skip non-visible built-in functions and any call with the builtins
+  // object as receiver, so as to not reveal either the builtins object or
+  // an internal function.
+  // The --builtins-in-stack-traces command line flag allows including
+  // internal call sites in the stack trace for debugging purposes.
+  if (!FLAG_builtins_in_stack_traces) {
+    JSFunction* fun = JSFunction::cast(raw_fun);
+    if (frame->receiver()->IsJSBuiltinsObject() ||
+        (fun->IsBuiltin() && !fun->shared()->native())) {
+      return false;
+    }
   }
   return true;
 }
@@ -12865,9 +12953,10 @@
 // element segments each containing a receiver, function, code and
 // native code offset.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
-  ASSERT_EQ(args.length(), 2);
-  Handle<Object> caller = args.at<Object>(0);
-  CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
+  ASSERT_EQ(args.length(), 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
+  Handle<Object> caller = args.at<Object>(1);
+  CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]);
 
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
@@ -12917,6 +13006,8 @@
     iter.Advance();
   }
   Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
+  // Capture and attach a more detailed stack trace if necessary.
+  isolate->CaptureAndSetCurrentStackTraceFor(error_object);
   result->set_length(Smi::FromInt(cursor));
   return *result;
 }
@@ -12948,7 +13039,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
   // This is only called from codegen, so checks might be more lax.
-  CONVERT_CHECKED(JSFunctionResultCache, cache, args[0]);
+  CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
   Object* key = args[1];
 
   int finger_index = cache->finger_index();
@@ -12991,18 +13082,20 @@
     // TODO(antonm): consider passing a receiver when constructing a cache.
     Handle<Object> receiver(isolate->global_context()->global());
     // This handle is nor shared, nor used later, so it's safe.
-    Object** argv[] = { key_handle.location() };
-    bool pending_exception = false;
+    Handle<Object> argv[] = { key_handle };
+    bool pending_exception;
     value = Execution::Call(factory,
                             receiver,
-                            1,
+                            ARRAY_SIZE(argv),
                             argv,
                             &pending_exception);
     if (pending_exception) return Failure::Exception();
   }
 
 #ifdef DEBUG
-  cache_handle->JSFunctionResultCacheVerify();
+  if (FLAG_verify_heap) {
+    cache_handle->JSFunctionResultCacheVerify();
+  }
 #endif
 
   // Function invocation may have cleared the cache.  Reread all the data.
@@ -13031,7 +13124,9 @@
   cache_handle->set_finger_index(index);
 
 #ifdef DEBUG
-  cache_handle->JSFunctionResultCacheVerify();
+  if (FLAG_verify_heap) {
+    cache_handle->JSFunctionResultCacheVerify();
+  }
 #endif
 
   return *value;
@@ -13040,8 +13135,8 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) {
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(String, type, 0);
-  CONVERT_ARG_CHECKED(JSArray, arguments, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, type, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 1);
   return *isolate->factory()->NewJSMessageObject(
       type,
       arguments,
@@ -13054,25 +13149,25 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) {
-  CONVERT_CHECKED(JSMessageObject, message, args[0]);
+  CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
   return message->type();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) {
-  CONVERT_CHECKED(JSMessageObject, message, args[0]);
+  CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
   return message->arguments();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
-  CONVERT_CHECKED(JSMessageObject, message, args[0]);
+  CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
   return Smi::FromInt(message->start_position());
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
-  CONVERT_CHECKED(JSMessageObject, message, args[0]);
+  CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
   return message->script();
 }
 
@@ -13126,8 +13221,8 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
   ASSERT(args.length() == 2);
-  CONVERT_CHECKED(String, format, args[0]);
-  CONVERT_CHECKED(JSArray, elms, args[1]);
+  CONVERT_ARG_CHECKED(String, format, 0);
+  CONVERT_ARG_CHECKED(JSArray, elms, 1);
   String::FlatContent format_content = format->GetFlatContent();
   RUNTIME_ASSERT(format_content.IsAscii());
   Vector<const char> chars = format_content.ToAsciiVector();
@@ -13144,10 +13239,11 @@
 
 #define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name)        \
   RUNTIME_FUNCTION(MaybeObject*, Runtime_Has##Name) {     \
-    CONVERT_CHECKED(JSObject, obj, args[0]);              \
+    CONVERT_ARG_CHECKED(JSObject, obj, 0);              \
     return isolate->heap()->ToBoolean(obj->Has##Name());  \
   }
 
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOnlyElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
@@ -13164,6 +13260,14 @@
 
 #undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
 
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSObject, obj1, 0);
+  CONVERT_ARG_CHECKED(JSObject, obj2, 1);
+  return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
+}
+
 // ----------------------------------------------------------------------------
 // Implementation of Runtime
 
@@ -13231,14 +13335,20 @@
   Isolate* isolate = Isolate::Current();
   Failure* failure = Failure::cast(result);
   if (failure->IsRetryAfterGC()) {
+    if (isolate->heap()->new_space()->AddFreshPage()) {
+      return;
+    }
+
     // Try to do a garbage collection; ignore it if it fails. The C
     // entry stub will throw an out-of-memory exception in that case.
-    isolate->heap()->CollectGarbage(failure->allocation_space());
+    isolate->heap()->CollectGarbage(failure->allocation_space(),
+                                    "Runtime::PerformGC");
   } else {
     // Handle last resort GC and make sure to allow future allocations
     // to grow the heap without causing GCs (if possible).
     isolate->counters()->gc_last_resort_from_js()->Increment();
-    isolate->heap()->CollectAllGarbage(false);
+    isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+                                       "Runtime::PerformGC");
   }
 }
 
diff --git a/src/runtime.h b/src/runtime.h
index 1538b7d..fe9cfd9 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -69,9 +69,6 @@
   \
   F(GetPrototype, 1, 1) \
   F(IsInPrototypeChain, 2, 1) \
-  F(SetHiddenPrototype, 2, 1) \
-  \
-  F(IsConstructCall, 0, 1) \
   \
   F(GetOwnProperty, 2, 1) \
   \
@@ -80,6 +77,7 @@
   \
   /* Utilities */ \
   F(CheckIsBootstrapping, 0, 1) \
+  F(Call, -1 /* >= 2 */, 1) \
   F(Apply, 5, 1) \
   F(GetFunctionDelegate, 1, 1) \
   F(GetConstructorDelegate, 1, 1) \
@@ -91,13 +89,14 @@
   F(NotifyOSR, 0, 1) \
   F(DeoptimizeFunction, 1, 1) \
   F(RunningInSimulator, 0, 1) \
-  F(OptimizeFunctionOnNextCall, 1, 1) \
+  F(OptimizeFunctionOnNextCall, -1, 1) \
   F(GetOptimizationStatus, 1, 1) \
   F(GetOptimizationCount, 1, 1) \
   F(CompileForOnStackReplacement, 1, 1) \
   F(SetNewFunctionAttributes, 1, 1) \
   F(AllocateInNewSpace, 1, 1) \
   F(SetNativeFlag, 1, 1) \
+  F(StoreArrayLiteralElement, 5, 1) \
   \
   /* Array join support */ \
   F(PushIfAbsent, 2, 1) \
@@ -142,7 +141,7 @@
   F(StringAdd, 2, 1) \
   F(StringBuilderConcat, 3, 1) \
   F(StringBuilderJoin, 3, 1) \
-  F(SparseJoinWithSeparator, 3, 1)            \
+  F(SparseJoinWithSeparator, 3, 1) \
   \
   /* Bit operations */ \
   F(NumberOr, 2, 1) \
@@ -196,6 +195,7 @@
   F(StringLocaleCompare, 2, 1) \
   F(SubString, 3, 1) \
   F(StringReplaceRegExpWithString, 4, 1) \
+  F(StringReplaceOneCharWithString, 3, 1) \
   F(StringMatch, 3, 1) \
   F(StringTrim, 3, 1) \
   F(StringToArray, 2, 1) \
@@ -211,14 +211,14 @@
   /* Reflection */ \
   F(FunctionSetInstanceClassName, 2, 1) \
   F(FunctionSetLength, 2, 1) \
-  F(BoundFunctionSetLength, 2, 1)    \
   F(FunctionSetPrototype, 2, 1) \
   F(FunctionSetReadOnlyPrototype, 1, 1) \
   F(FunctionGetName, 1, 1) \
   F(FunctionSetName, 2, 1) \
   F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
   F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
-  F(FunctionSetBound, 1, 1) \
+  F(FunctionBindArguments, 4, 1) \
+  F(BoundFunctionGetBindings, 1, 1) \
   F(FunctionRemovePrototype, 1, 1) \
   F(FunctionGetSourceCode, 1, 1) \
   F(FunctionGetScript, 1, 1) \
@@ -227,7 +227,7 @@
   F(FunctionIsAPIFunction, 1, 1) \
   F(FunctionIsBuiltin, 1, 1) \
   F(GetScript, 1, 1) \
-  F(CollectStackTrace, 2, 1) \
+  F(CollectStackTrace, 3, 1) \
   F(GetV8Version, 0, 1) \
   \
   F(ClassOf, 1, 1) \
@@ -244,10 +244,9 @@
   F(DateCurrentTime, 0, 1) \
   F(DateParseString, 2, 1) \
   F(DateLocalTimezone, 1, 1) \
-  F(DateLocalTimeOffset, 0, 1) \
-  F(DateDaylightSavingsOffset, 1, 1) \
-  F(DateMakeDay, 3, 1) \
-  F(DateYMDFromTime, 2, 1) \
+  F(DateToUTC, 1, 1) \
+  F(DateMakeDay, 2, 1) \
+  F(DateSetValue, 3, 1) \
   \
   /* Numbers */ \
   \
@@ -257,8 +256,7 @@
   \
   /* Eval */ \
   F(GlobalReceiver, 1, 1) \
-  F(ResolvePossiblyDirectEval, 4, 2) \
-  F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
+  F(ResolvePossiblyDirectEval, 5, 2) \
   \
   F(SetProperty, -1 /* 4 or 5 */, 1) \
   F(DefineOrRedefineDataProperty, 4, 1) \
@@ -273,14 +271,10 @@
   F(SwapElements, 3, 1) \
   \
   /* Getters and Setters */ \
-  F(DefineAccessor, -1 /* 4 or 5 */, 1) \
   F(LookupAccessor, 3, 1) \
   \
   /* Literals */ \
   F(MaterializeRegExpLiteral, 4, 1)\
-  F(CreateArrayLiteralBoilerplate, 3, 1) \
-  F(CloneLiteralBoilerplate, 1, 1) \
-  F(CloneShallowLiteralBoilerplate, 1, 1) \
   F(CreateObjectLiteral, 4, 1) \
   F(CreateObjectLiteralShallow, 4, 1) \
   F(CreateArrayLiteral, 3, 1) \
@@ -296,6 +290,17 @@
   F(GetConstructTrap, 1, 1) \
   F(Fix, 1, 1) \
   \
+  /* Harmony sets */ \
+  F(SetInitialize, 1, 1) \
+  F(SetAdd, 2, 1) \
+  F(SetHas, 2, 1) \
+  F(SetDelete, 2, 1) \
+  \
+  /* Harmony maps */ \
+  F(MapInitialize, 1, 1) \
+  F(MapGet, 2, 1) \
+  F(MapSet, 3, 1) \
+  \
   /* Harmony weakmaps */ \
   F(WeakMapInitialize, 1, 1) \
   F(WeakMapGet, 2, 1) \
@@ -304,12 +309,13 @@
   /* Statements */ \
   F(NewClosure, 3, 1) \
   F(NewObject, 1, 1) \
-  F(NewObjectFromBound, 2, 1) \
+  F(NewObjectFromBound, 1, 1) \
   F(FinalizeInstanceSize, 1, 1) \
   F(Throw, 1, 1) \
   F(ReThrow, 1, 1) \
   F(ThrowReferenceError, 1, 1) \
   F(StackGuard, 0, 1) \
+  F(Interrupt, 0, 1) \
   F(PromoteScheduledException, 0, 1) \
   \
   /* Contexts */ \
@@ -354,6 +360,7 @@
   F(IS_VAR, 1, 1) \
   \
   /* expose boolean functions from objects-inl.h */ \
+  F(HasFastSmiOnlyElements, 1, 1) \
   F(HasFastElements, 1, 1) \
   F(HasFastDoubleElements, 1, 1) \
   F(HasDictionaryElements, 1, 1) \
@@ -367,6 +374,9 @@
   F(HasExternalUnsignedIntElements, 1, 1) \
   F(HasExternalFloatElements, 1, 1) \
   F(HasExternalDoubleElements, 1, 1) \
+  F(TransitionElementsSmiToDouble, 1, 1) \
+  F(TransitionElementsDoubleToObject, 1, 1) \
+  F(HaveSameMap, 2, 1) \
   /* profiler */ \
   F(ProfilerResume, 0, 1) \
   F(ProfilerPause, 0, 1)
@@ -408,6 +418,7 @@
   F(DebugReferencedBy, 3, 1) \
   F(DebugConstructedBy, 2, 1) \
   F(DebugGetPrototype, 1, 1) \
+  F(DebugSetScriptSource, 2, 1) \
   F(SystemBreak, 0, 1) \
   F(DebugDisassembleFunction, 1, 1) \
   F(DebugDisassembleConstructor, 1, 1) \
@@ -475,11 +486,13 @@
   F(IsNonNegativeSmi, 1, 1)                                                  \
   F(IsArray, 1, 1)                                                           \
   F(IsRegExp, 1, 1)                                                          \
+  F(IsConstructCall, 0, 1)                                                   \
   F(CallFunction, -1 /* receiver + n args + function */, 1)                  \
   F(ArgumentsLength, 0, 1)                                                   \
   F(Arguments, 1, 1)                                                         \
   F(ValueOf, 1, 1)                                                           \
   F(SetValueOf, 2, 1)                                                        \
+  F(DateField, 2 /* date object, field index */, 1)                          \
   F(StringCharFromCode, 1, 1)                                                \
   F(StringCharAt, 2, 1)                                                      \
   F(ObjectEquals, 2, 1)                                                      \
@@ -492,6 +505,7 @@
   F(MathPow, 2, 1)                                                           \
   F(MathSin, 1, 1)                                                           \
   F(MathCos, 1, 1)                                                           \
+  F(MathTan, 1, 1)                                                           \
   F(MathSqrt, 1, 1)                                                          \
   F(MathLog, 1, 1)                                                           \
   F(IsRegExpEquivalent, 2, 1)                                                \
@@ -506,7 +520,6 @@
 // a corresponding runtime function, that is called for slow cases.
 // Entries have the form F(name, number of arguments, number of return values).
 #define INLINE_RUNTIME_FUNCTION_LIST(F) \
-  F(IsConstructCall, 0, 1)                                                   \
   F(ClassOf, 1, 1)                                                           \
   F(StringCharCodeAt, 2, 1)                                                  \
   F(Log, 3, 1)                                                               \
@@ -616,6 +629,13 @@
   // Get the intrinsic function with the given FunctionId.
   static const Function* FunctionForId(FunctionId id);
 
+  static Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
+                                                       Handle<String> subject,
+                                                       Handle<String> search,
+                                                       Handle<String> replace,
+                                                       bool* found,
+                                                       int recursion_limit);
+
   // General-purpose helper functions for runtime system.
   static int StringMatch(Isolate* isolate,
                          Handle<String> sub,
@@ -624,16 +644,14 @@
 
   static bool IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch);
 
-  // TODO(1240886): The following three methods are *not* handle safe,
-  // but accept handle arguments. This seems fragile.
+  // TODO(1240886): Some of the following methods are *not* handle safe, but
+  // accept handle arguments. This seems fragile.
 
   // Support getting the characters in a string using [] notation as
   // in Firefox/SpiderMonkey, Safari and Opera.
   MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
                                                          Handle<Object> object,
                                                          uint32_t index);
-  MUST_USE_RESULT static MaybeObject* GetElement(Handle<Object> object,
-                                                 uint32_t index);
 
   MUST_USE_RESULT static MaybeObject* SetObjectProperty(
       Isolate* isolate,
@@ -667,17 +685,21 @@
 
   // Helper functions used stubs.
   static void PerformGC(Object* result);
+
+  // Used in runtime.cc and hydrogen's VisitArrayLiteral.
+  static Handle<Object> CreateArrayLiteralBoilerplate(
+      Isolate* isolate,
+      Handle<FixedArray> literals,
+      Handle<FixedArray> elements);
 };
 
 
 //---------------------------------------------------------------------------
 // Constants used by interface to runtime functions.
 
-enum kDeclareGlobalsFlags {
-  kDeclareGlobalsEvalFlag = 1 << 0,
-  kDeclareGlobalsStrictModeFlag = 1 << 1,
-  kDeclareGlobalsNativeFlag = 1 << 2
-};
+class DeclareGlobalsEvalFlag:     public BitField<bool,         0, 1> {};
+class DeclareGlobalsNativeFlag:   public BitField<bool,         1, 1> {};
+class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
 
 } }  // namespace v8::internal
 
diff --git a/src/runtime.js b/src/runtime.js
index 14ff1b6..53d9a39 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -39,16 +39,16 @@
    -----------------------------------
 */
 
-// The following const declarations are shared with other native JS files.
-// They are all declared at this one spot to avoid const redeclaration errors.
-const $Object = global.Object;
-const $Array = global.Array;
-const $String = global.String;
-const $Number = global.Number;
-const $Function = global.Function;
-const $Boolean = global.Boolean;
-const $NaN = 0/0;
-const builtins = this;
+// The following declarations are shared with other native JS files.
+// They are all declared at this one spot to avoid redeclaration errors.
+var $Object = global.Object;
+var $Array = global.Array;
+var $String = global.String;
+var $Number = global.Number;
+var $Function = global.Function;
+var $Boolean = global.Boolean;
+var $NaN = 0/0;
+var builtins = this;
 
 // ECMA-262 Section 11.9.3.
 function EQUALS(y) {
@@ -355,7 +355,7 @@
   if (!IS_SPEC_OBJECT(x)) {
     throw %MakeTypeError('invalid_in_operator_use', [this, x]);
   }
-  return %_IsNonNegativeSmi(this) && !%IsJSProxy(x) ?
+  return %_IsNonNegativeSmi(this) ?
     %HasElement(x, this) : %HasProperty(x, %ToString(this));
 }
 
@@ -375,6 +375,12 @@
     return 1;
   }
 
+  // Check if function is bound, if so, get [[BoundFunction]] from it
+  // and use that instead of F.
+  var bindings = %BoundFunctionGetBindings(F);
+  if (bindings) {
+    F = bindings[kBoundFunctionIndex];  // Always a non-bound function.
+  }
   // Get the prototype of F; if it is not an object, throw an error.
   var O = F.prototype;
   if (!IS_SPEC_OBJECT(O)) {
@@ -386,13 +392,6 @@
 }
 
 
-// Get an array of property keys for the given object. Used in
-// for-in statements.
-function GET_KEYS() {
-  return %GetPropertyNames(this);
-}
-
-
 // Filter a given key against an object by checking if the object
 // has a property with the given key; return the key as a string if
 // it has. Otherwise returns 0 (smi). Used in for-in statements.
@@ -429,20 +428,10 @@
 }
 
 
-function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR(proxy) {
-  var arity = %_ArgumentsLength() - 1;
+function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR() {
+  var proxy = this;
   var trap = %GetConstructTrap(proxy);
-  var receiver = void 0;
-  if (!IS_UNDEFINED(trap)) {
-    trap = %GetCallTrap(proxy);
-    var proto = proxy.prototype;
-    if (!IS_SPEC_OBJECT(proto) && proto !== null) {
-      throw MakeTypeError("proto_object_or_null", [proto]);
-    }
-    receiver = new global.Object();
-    receiver.__proto__ = proto;
-  }
-  return %Apply(trap, this, arguments, 1, arity);
+  return %Apply(trap, this, arguments, 0, %_ArgumentsLength());
 }
 
 
@@ -469,11 +458,12 @@
   }
 
   if (!IS_SPEC_FUNCTION(this)) {
-    throw %MakeTypeError('apply_non_function', [ %ToString(this), typeof this ]);
+    throw %MakeTypeError('apply_non_function',
+                         [ %ToString(this), typeof this ]);
   }
 
   // Make sure the arguments list has the right type.
-  if (args != null && !IS_ARRAY(args) && !IS_ARGUMENTS(args)) {
+  if (args != null && !IS_SPEC_OBJECT(args)) {
     throw %MakeTypeError('apply_wrong_args', []);
   }
 
diff --git a/src/scanner-character-streams.cc b/src/scanner-character-streams.cc
index ee10703..56b9f03 100644
--- a/src/scanner-character-streams.cc
+++ b/src/scanner-character-streams.cc
@@ -36,19 +36,19 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// BufferedUC16CharacterStreams
+// BufferedUtf16CharacterStreams
 
-BufferedUC16CharacterStream::BufferedUC16CharacterStream()
-    : UC16CharacterStream(),
+BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
+    : Utf16CharacterStream(),
       pushback_limit_(NULL) {
   // Initialize buffer as being empty. First read will fill the buffer.
   buffer_cursor_ = buffer_;
   buffer_end_ = buffer_;
 }
 
-BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
+BufferedUtf16CharacterStream::~BufferedUtf16CharacterStream() { }
 
-void BufferedUC16CharacterStream::PushBack(uc32 character) {
+void BufferedUtf16CharacterStream::PushBack(uc32 character) {
   if (character == kEndOfInput) {
     pos_--;
     return;
@@ -63,7 +63,7 @@
 }
 
 
-void BufferedUC16CharacterStream::SlowPushBack(uc16 character) {
+void BufferedUtf16CharacterStream::SlowPushBack(uc16 character) {
   // In pushback mode, the end of the buffer contains pushback,
   // and the start of the buffer (from buffer start to pushback_limit_)
   // contains valid data that comes just after the pushback.
@@ -89,7 +89,7 @@
 }
 
 
-bool BufferedUC16CharacterStream::ReadBlock() {
+bool BufferedUtf16CharacterStream::ReadBlock() {
   buffer_cursor_ = buffer_;
   if (pushback_limit_ != NULL) {
     // Leave pushback mode.
@@ -106,7 +106,7 @@
 }
 
 
-unsigned BufferedUC16CharacterStream::SlowSeekForward(unsigned delta) {
+unsigned BufferedUtf16CharacterStream::SlowSeekForward(unsigned delta) {
   // Leave pushback mode (i.e., ignore that there might be valid data
   // in the buffer before the pushback_limit_ point).
   pushback_limit_ = NULL;
@@ -114,10 +114,10 @@
 }
 
 // ----------------------------------------------------------------------------
-// GenericStringUC16CharacterStream
+// GenericStringUtf16CharacterStream
 
 
-GenericStringUC16CharacterStream::GenericStringUC16CharacterStream(
+GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
     Handle<String> data,
     unsigned start_position,
     unsigned end_position)
@@ -130,10 +130,10 @@
 }
 
 
-GenericStringUC16CharacterStream::~GenericStringUC16CharacterStream() { }
+GenericStringUtf16CharacterStream::~GenericStringUtf16CharacterStream() { }
 
 
-unsigned GenericStringUC16CharacterStream::BufferSeekForward(unsigned delta) {
+unsigned GenericStringUtf16CharacterStream::BufferSeekForward(unsigned delta) {
   unsigned old_pos = pos_;
   pos_ = Min(pos_ + delta, length_);
   ReadBlock();
@@ -141,7 +141,7 @@
 }
 
 
-unsigned GenericStringUC16CharacterStream::FillBuffer(unsigned from_pos,
+unsigned GenericStringUtf16CharacterStream::FillBuffer(unsigned from_pos,
                                                       unsigned length) {
   if (from_pos >= length_) return 0;
   if (from_pos + length > length_) {
@@ -153,10 +153,10 @@
 
 
 // ----------------------------------------------------------------------------
-// Utf8ToUC16CharacterStream
-Utf8ToUC16CharacterStream::Utf8ToUC16CharacterStream(const byte* data,
-                                                     unsigned length)
-    : BufferedUC16CharacterStream(),
+// Utf8ToUtf16CharacterStream
+Utf8ToUtf16CharacterStream::Utf8ToUtf16CharacterStream(const byte* data,
+                                                       unsigned length)
+    : BufferedUtf16CharacterStream(),
       raw_data_(data),
       raw_data_length_(length),
       raw_data_pos_(0),
@@ -165,10 +165,10 @@
 }
 
 
-Utf8ToUC16CharacterStream::~Utf8ToUC16CharacterStream() { }
+Utf8ToUtf16CharacterStream::~Utf8ToUtf16CharacterStream() { }
 
 
-unsigned Utf8ToUC16CharacterStream::BufferSeekForward(unsigned delta) {
+unsigned Utf8ToUtf16CharacterStream::BufferSeekForward(unsigned delta) {
   unsigned old_pos = pos_;
   unsigned target_pos = pos_ + delta;
   SetRawPosition(target_pos);
@@ -178,9 +178,9 @@
 }
 
 
-unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
-                                               unsigned length) {
-  static const unibrow::uchar kMaxUC16Character = 0xffff;
+unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position,
+                                                unsigned length) {
+  static const unibrow::uchar kMaxUtf16Character = 0xffff;
   SetRawPosition(char_position);
   if (raw_character_position_ != char_position) {
     // char_position was not a valid position in the stream (hit the end
@@ -188,7 +188,7 @@
     return 0u;
   }
   unsigned i = 0;
-  while (i < length) {
+  while (i < length - 1) {
     if (raw_data_pos_ == raw_data_length_) break;
     unibrow::uchar c = raw_data_[raw_data_pos_];
     if (c <= unibrow::Utf8::kMaxOneByteChar) {
@@ -197,12 +197,13 @@
       c =  unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
                                          raw_data_length_ - raw_data_pos_,
                                          &raw_data_pos_);
-      // Don't allow characters outside of the BMP.
-      if (c > kMaxUC16Character) {
-        c = unibrow::Utf8::kBadChar;
-      }
     }
-    buffer_[i++] = static_cast<uc16>(c);
+    if (c > kMaxUtf16Character) {
+      buffer_[i++] = unibrow::Utf16::LeadSurrogate(c);
+      buffer_[i++] = unibrow::Utf16::TrailSurrogate(c);
+    } else {
+      buffer_[i++] = static_cast<uc16>(c);
+    }
   }
   raw_character_position_ = char_position + i;
   return i;
@@ -266,37 +267,52 @@
 }
 
 
-void Utf8ToUC16CharacterStream::SetRawPosition(unsigned target_position) {
+// This can't set a raw position between two surrogate pairs, since there
+// is no position in the UTF8 stream that corresponds to that.  This assumes
+// that the surrogate pair is correctly coded as a 4 byte UTF-8 sequence.  If
+// it is illegally coded as two 3 byte sequences then there is no problem here.
+void Utf8ToUtf16CharacterStream::SetRawPosition(unsigned target_position) {
   if (raw_character_position_ > target_position) {
     // Spool backwards in utf8 buffer.
     do {
+      int old_pos = raw_data_pos_;
       Utf8CharacterBack(raw_data_, &raw_data_pos_);
       raw_character_position_--;
+      ASSERT(old_pos - raw_data_pos_ <= 4);
+      // Step back over both code units for surrogate pairs.
+      if (old_pos - raw_data_pos_ == 4) raw_character_position_--;
     } while (raw_character_position_ > target_position);
+    // No surrogate pair splitting.
+    ASSERT(raw_character_position_ == target_position);
     return;
   }
   // Spool forwards in the utf8 buffer.
   while (raw_character_position_ < target_position) {
     if (raw_data_pos_ == raw_data_length_) return;
+    int old_pos = raw_data_pos_;
     Utf8CharacterForward(raw_data_, &raw_data_pos_);
     raw_character_position_++;
+    ASSERT(raw_data_pos_ - old_pos <= 4);
+    if (raw_data_pos_ - old_pos == 4) raw_character_position_++;
   }
+  // No surrogate pair splitting.
+  ASSERT(raw_character_position_ == target_position);
 }
 
 
 // ----------------------------------------------------------------------------
-// ExternalTwoByteStringUC16CharacterStream
+// ExternalTwoByteStringUtf16CharacterStream
 
-ExternalTwoByteStringUC16CharacterStream::
-    ~ExternalTwoByteStringUC16CharacterStream() { }
+ExternalTwoByteStringUtf16CharacterStream::
+    ~ExternalTwoByteStringUtf16CharacterStream() { }
 
 
-ExternalTwoByteStringUC16CharacterStream
-    ::ExternalTwoByteStringUC16CharacterStream(
+ExternalTwoByteStringUtf16CharacterStream
+    ::ExternalTwoByteStringUtf16CharacterStream(
         Handle<ExternalTwoByteString> data,
         int start_position,
         int end_position)
-    : UC16CharacterStream(),
+    : Utf16CharacterStream(),
       source_(data),
       raw_data_(data->GetTwoByteData(start_position)) {
   buffer_cursor_ = raw_data_,
diff --git a/src/scanner-character-streams.h b/src/scanner-character-streams.h
index 5c4ea2c..319ee8f 100644
--- a/src/scanner-character-streams.h
+++ b/src/scanner-character-streams.h
@@ -36,10 +36,10 @@
 // A buffered character stream based on a random access character
 // source (ReadBlock can be called with pos_ pointing to any position,
 // even positions before the current).
-class BufferedUC16CharacterStream: public UC16CharacterStream {
+class BufferedUtf16CharacterStream: public Utf16CharacterStream {
  public:
-  BufferedUC16CharacterStream();
-  virtual ~BufferedUC16CharacterStream();
+  BufferedUtf16CharacterStream();
+  virtual ~BufferedUtf16CharacterStream();
 
   virtual void PushBack(uc32 character);
 
@@ -60,12 +60,12 @@
 
 
 // Generic string stream.
-class GenericStringUC16CharacterStream: public BufferedUC16CharacterStream {
+class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
  public:
-  GenericStringUC16CharacterStream(Handle<String> data,
-                                   unsigned start_position,
-                                   unsigned end_position);
-  virtual ~GenericStringUC16CharacterStream();
+  GenericStringUtf16CharacterStream(Handle<String> data,
+                                    unsigned start_position,
+                                    unsigned end_position);
+  virtual ~GenericStringUtf16CharacterStream();
 
  protected:
   virtual unsigned BufferSeekForward(unsigned delta);
@@ -77,11 +77,11 @@
 };
 
 
-// UC16 stream based on a literal UTF-8 string.
-class Utf8ToUC16CharacterStream: public BufferedUC16CharacterStream {
+// Utf16 stream based on a literal UTF-8 string.
+class Utf8ToUtf16CharacterStream: public BufferedUtf16CharacterStream {
  public:
-  Utf8ToUC16CharacterStream(const byte* data, unsigned length);
-  virtual ~Utf8ToUC16CharacterStream();
+  Utf8ToUtf16CharacterStream(const byte* data, unsigned length);
+  virtual ~Utf8ToUtf16CharacterStream();
 
  protected:
   virtual unsigned BufferSeekForward(unsigned delta);
@@ -98,12 +98,12 @@
 
 
 // UTF16 buffer to read characters from an external string.
-class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
+class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
  public:
-  ExternalTwoByteStringUC16CharacterStream(Handle<ExternalTwoByteString> data,
-                                           int start_position,
-                                           int end_position);
-  virtual ~ExternalTwoByteStringUC16CharacterStream();
+  ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
+                                            int start_position,
+                                            int end_position);
+  virtual ~ExternalTwoByteStringUtf16CharacterStream();
 
   virtual void PushBack(uc32 character) {
     ASSERT(buffer_cursor_ > raw_data_);
diff --git a/src/scanner.cc b/src/scanner.cc
old mode 100644
new mode 100755
index 69ea8ae..7901b5d
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -36,29 +36,26 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// Scanner::LiteralScope
-
-Scanner::LiteralScope::LiteralScope(Scanner* self)
-    : scanner_(self), complete_(false) {
-  self->StartLiteral();
-}
-
-
-Scanner::LiteralScope::~LiteralScope() {
-  if (!complete_) scanner_->DropLiteral();
-}
-
-
-void Scanner::LiteralScope::Complete() {
-  scanner_->TerminateLiteral();
-  complete_ = true;
-}
-
-// ----------------------------------------------------------------------------
 // Scanner
 
 Scanner::Scanner(UnicodeCache* unicode_cache)
-    : unicode_cache_(unicode_cache) { }
+    : unicode_cache_(unicode_cache),
+      octal_pos_(Location::invalid()),
+      harmony_scoping_(false),
+      harmony_modules_(false) { }
+
+
+void Scanner::Initialize(Utf16CharacterStream* source) {
+  source_ = source;
+  // Need to capture identifiers in order to recognize "get" and "set"
+  // in object literals.
+  Init();
+  // Skip initial whitespace allowing HTML comment ends just like
+  // after a newline and scan first token.
+  has_line_terminator_before_next_ = true;
+  SkipWhiteSpace();
+  Scan();
+}
 
 
 uc32 Scanner::ScanHexNumber(int expected_length) {
@@ -88,29 +85,6 @@
 }
 
 
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner
-
-JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
-    : Scanner(scanner_contants),
-      octal_pos_(Location::invalid()),
-      harmony_block_scoping_(false) { }
-
-
-void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
-  source_ = source;
-  // Need to capture identifiers in order to recognize "get" and "set"
-  // in object literals.
-  Init();
-  // Skip initial whitespace allowing HTML comment ends just like
-  // after a newline and scan first token.
-  has_line_terminator_before_next_ = true;
-  SkipWhiteSpace();
-  Scan();
-}
-
-
 // Ensure that tokens can be stored in a byte.
 STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
 
@@ -247,7 +221,7 @@
 };
 
 
-Token::Value JavaScriptScanner::Next() {
+Token::Value Scanner::Next() {
   current_ = next_;
   has_line_terminator_before_next_ = false;
   has_multiline_comment_before_next_ = false;
@@ -279,7 +253,7 @@
 }
 
 
-bool JavaScriptScanner::SkipWhiteSpace() {
+bool Scanner::SkipWhiteSpace() {
   int start_position = source_pos();
 
   while (true) {
@@ -319,7 +293,7 @@
 }
 
 
-Token::Value JavaScriptScanner::SkipSingleLineComment() {
+Token::Value Scanner::SkipSingleLineComment() {
   Advance();
 
   // The line terminator at the end of the line is not considered
@@ -335,7 +309,7 @@
 }
 
 
-Token::Value JavaScriptScanner::SkipMultiLineComment() {
+Token::Value Scanner::SkipMultiLineComment() {
   ASSERT(c0_ == '*');
   Advance();
 
@@ -361,7 +335,7 @@
 }
 
 
-Token::Value JavaScriptScanner::ScanHtmlComment() {
+Token::Value Scanner::ScanHtmlComment() {
   // Check for <!-- comments.
   ASSERT(c0_ == '!');
   Advance();
@@ -376,7 +350,7 @@
 }
 
 
-void JavaScriptScanner::Scan() {
+void Scanner::Scan() {
   next_.literal_chars = NULL;
   Token::Value token;
   do {
@@ -616,7 +590,7 @@
 }
 
 
-void JavaScriptScanner::SeekForward(int pos) {
+void Scanner::SeekForward(int pos) {
   // After this call, we will have the token at the given position as
   // the "next" token. The "current" token will be invalid.
   if (pos == next_.location.beg_pos) return;
@@ -637,7 +611,7 @@
 }
 
 
-void JavaScriptScanner::ScanEscape() {
+void Scanner::ScanEscape() {
   uc32 c = c0_;
   Advance();
 
@@ -689,7 +663,7 @@
 
 // Octal escapes of the forms '\0xx' and '\xxx' are not a part of
 // ECMA-262. Other JS VMs support them.
-uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
+uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
   uc32 x = c - '0';
   int i = 0;
   for (; i < length; i++) {
@@ -712,7 +686,7 @@
 }
 
 
-Token::Value JavaScriptScanner::ScanString() {
+Token::Value Scanner::ScanString() {
   uc32 quote = c0_;
   Advance();  // consume quote
 
@@ -736,13 +710,13 @@
 }
 
 
-void JavaScriptScanner::ScanDecimalDigits() {
+void Scanner::ScanDecimalDigits() {
   while (IsDecimalDigit(c0_))
     AddLiteralCharAdvance();
 }
 
 
-Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
+Token::Value Scanner::ScanNumber(bool seen_period) {
   ASSERT(IsDecimalDigit(c0_));  // the first digit of the number or the fraction
 
   enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
@@ -827,7 +801,7 @@
 }
 
 
-uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
+uc32 Scanner::ScanIdentifierUnicodeEscape() {
   Advance();
   if (c0_ != 'u') return -1;
   Advance();
@@ -857,7 +831,8 @@
   KEYWORD_GROUP('e')                                                \
   KEYWORD("else", Token::ELSE)                                      \
   KEYWORD("enum", Token::FUTURE_RESERVED_WORD)                      \
-  KEYWORD("export", Token::FUTURE_RESERVED_WORD)                    \
+  KEYWORD("export", harmony_modules                                 \
+                    ? Token::EXPORT : Token::FUTURE_RESERVED_WORD)  \
   KEYWORD("extends", Token::FUTURE_RESERVED_WORD)                   \
   KEYWORD_GROUP('f')                                                \
   KEYWORD("false", Token::FALSE_LITERAL)                            \
@@ -867,12 +842,13 @@
   KEYWORD_GROUP('i')                                                \
   KEYWORD("if", Token::IF)                                          \
   KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD)         \
-  KEYWORD("import", Token::FUTURE_RESERVED_WORD)                    \
+  KEYWORD("import", harmony_modules                                 \
+                    ? Token::IMPORT : Token::FUTURE_RESERVED_WORD)  \
   KEYWORD("in", Token::IN)                                          \
   KEYWORD("instanceof", Token::INSTANCEOF)                          \
   KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD)          \
   KEYWORD_GROUP('l')                                                \
-  KEYWORD("let", harmony_block_scoping                              \
+  KEYWORD("let", harmony_scoping                                    \
                  ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
   KEYWORD_GROUP('n')                                                \
   KEYWORD("new", Token::NEW)                                        \
@@ -906,7 +882,8 @@
 
 static Token::Value KeywordOrIdentifierToken(const char* input,
                                              int input_length,
-                                             bool harmony_block_scoping) {
+                                             bool harmony_scoping,
+                                             bool harmony_modules) {
   ASSERT(input_length >= 1);
   const int kMinLength = 2;
   const int kMaxLength = 10;
@@ -944,7 +921,7 @@
 }
 
 
-Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
+Token::Value Scanner::ScanIdentifierOrKeyword() {
   ASSERT(unicode_cache_->IsIdentifierStart(c0_));
   LiteralScope literal(this);
   // Scan identifier start character.
@@ -982,14 +959,15 @@
     Vector<const char> chars = next_.literal_chars->ascii_literal();
     return KeywordOrIdentifierToken(chars.start(),
                                     chars.length(),
-                                    harmony_block_scoping_);
+                                    harmony_scoping_,
+                                    harmony_modules_);
   }
 
   return Token::IDENTIFIER;
 }
 
 
-Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
+Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
   // Scan the rest of the identifier characters.
   while (unicode_cache_->IsIdentifierPart(c0_)) {
     if (c0_ == '\\') {
@@ -1012,7 +990,7 @@
 }
 
 
-bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
+bool Scanner::ScanRegExpPattern(bool seen_equal) {
   // Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
   bool in_character_class = false;
 
@@ -1059,7 +1037,7 @@
 }
 
 
-bool JavaScriptScanner::ScanLiteralUnicodeEscape() {
+bool Scanner::ScanLiteralUnicodeEscape() {
   ASSERT(c0_ == '\\');
   uc32 chars_read[6] = {'\\', 'u', 0, 0, 0, 0};
   Advance();
@@ -1089,7 +1067,7 @@
 }
 
 
-bool JavaScriptScanner::ScanRegExpFlags() {
+bool Scanner::ScanRegExpFlags() {
   // Scan regular expression flags.
   LiteralScope literal(this);
   while (unicode_cache_->IsIdentifierPart(c0_)) {
diff --git a/src/scanner.h b/src/scanner.h
index 16c3a42..045e7d2 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -41,6 +41,26 @@
 namespace v8 {
 namespace internal {
 
+
+// General collection of (multi-)bit-flags that can be passed to scanners and
+// parsers to signify their (initial) mode of operation.
+enum ParsingFlags {
+  kNoParsingFlags = 0,
+  // Embed LanguageMode values in parsing flags, i.e., equivalent to:
+  // CLASSIC_MODE = 0,
+  // STRICT_MODE,
+  // EXTENDED_MODE,
+  kLanguageModeMask = 0x03,
+  kAllowLazy = 0x04,
+  kAllowNativesSyntax = 0x08,
+  kAllowModules = 0x10
+};
+
+STATIC_ASSERT((kLanguageModeMask & CLASSIC_MODE) == CLASSIC_MODE);
+STATIC_ASSERT((kLanguageModeMask & STRICT_MODE) == STRICT_MODE);
+STATIC_ASSERT((kLanguageModeMask & EXTENDED_MODE) == EXTENDED_MODE);
+
+
 // Returns the value (0 .. 15) of a hexadecimal character c.
 // If c is not a legal hexadecimal character, returns a value < 0.
 inline int HexValue(uc32 c) {
@@ -53,15 +73,17 @@
 
 
 // ---------------------------------------------------------------------
-// Buffered stream of characters, using an internal UC16 buffer.
+// Buffered stream of UTF-16 code units, using an internal UTF-16 buffer.
+// A code unit is a 16 bit value representing either a 16 bit code point
+// or one part of a surrogate pair that make a single 21 bit code point.
 
-class UC16CharacterStream {
+class Utf16CharacterStream {
  public:
-  UC16CharacterStream() : pos_(0) { }
-  virtual ~UC16CharacterStream() { }
+  Utf16CharacterStream() : pos_(0) { }
+  virtual ~Utf16CharacterStream() { }
 
-  // Returns and advances past the next UC16 character in the input
-  // stream. If there are no more characters, it returns a negative
+  // Returns and advances past the next UTF-16 code unit in the input
+  // stream. If there are no more code units, it returns a negative
   // value.
   inline uc32 Advance() {
     if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
@@ -70,47 +92,47 @@
     }
     // Note: currently the following increment is necessary to avoid a
     // parser problem! The scanner treats the final kEndOfInput as
-    // a character with a position, and does math relative to that
+    // a code unit with a position, and does math relative to that
     // position.
     pos_++;
 
     return kEndOfInput;
   }
 
-  // Return the current position in the character stream.
+  // Return the current position in the code unit stream.
   // Starts at zero.
   inline unsigned pos() const { return pos_; }
 
-  // Skips forward past the next character_count UC16 characters
+  // Skips forward past the next code_unit_count UTF-16 code units
   // in the input, or until the end of input if that comes sooner.
-  // Returns the number of characters actually skipped. If less
-  // than character_count,
-  inline unsigned SeekForward(unsigned character_count) {
+  // Returns the number of code units actually skipped. If less
+  // than code_unit_count,
+  inline unsigned SeekForward(unsigned code_unit_count) {
     unsigned buffered_chars =
         static_cast<unsigned>(buffer_end_ - buffer_cursor_);
-    if (character_count <= buffered_chars) {
-      buffer_cursor_ += character_count;
-      pos_ += character_count;
-      return character_count;
+    if (code_unit_count <= buffered_chars) {
+      buffer_cursor_ += code_unit_count;
+      pos_ += code_unit_count;
+      return code_unit_count;
     }
-    return SlowSeekForward(character_count);
+    return SlowSeekForward(code_unit_count);
   }
 
-  // Pushes back the most recently read UC16 character (or negative
+  // Pushes back the most recently read UTF-16 code unit (or negative
   // value if at end of input), i.e., the value returned by the most recent
   // call to Advance.
   // Must not be used right after calling SeekForward.
-  virtual void PushBack(int32_t character) = 0;
+  virtual void PushBack(int32_t code_unit) = 0;
 
  protected:
   static const uc32 kEndOfInput = -1;
 
-  // Ensures that the buffer_cursor_ points to the character at
+  // Ensures that the buffer_cursor_ points to the code_unit at
   // position pos_ of the input, if possible. If the position
   // is at or after the end of the input, return false. If there
-  // are more characters available, return true.
+  // are more code_units available, return true.
   virtual bool ReadBlock() = 0;
-  virtual unsigned SlowSeekForward(unsigned character_count) = 0;
+  virtual unsigned SlowSeekForward(unsigned code_unit_count) = 0;
 
   const uc16* buffer_cursor_;
   const uc16* buffer_end_;
@@ -158,23 +180,24 @@
     }
   }
 
-  inline void AddChar(uc16 character) {
+  INLINE(void AddChar(uint32_t code_unit)) {
     if (position_ >= backing_store_.length()) ExpandBuffer();
     if (is_ascii_) {
-      if (character < kMaxAsciiCharCodeU) {
-        backing_store_[position_] = static_cast<byte>(character);
+      if (code_unit < kMaxAsciiCharCodeU) {
+        backing_store_[position_] = static_cast<byte>(code_unit);
         position_ += kASCIISize;
         return;
       }
-      ConvertToUC16();
+      ConvertToUtf16();
     }
-    *reinterpret_cast<uc16*>(&backing_store_[position_]) = character;
+    ASSERT(code_unit < 0x10000u);
+    *reinterpret_cast<uc16*>(&backing_store_[position_]) = code_unit;
     position_ += kUC16Size;
   }
 
   bool is_ascii() { return is_ascii_; }
 
-  Vector<const uc16> uc16_literal() {
+  Vector<const uc16> utf16_literal() {
     ASSERT(!is_ascii_);
     ASSERT((position_ & 0x1) == 0);
     return Vector<const uc16>(
@@ -216,13 +239,13 @@
     backing_store_ = new_store;
   }
 
-  void ConvertToUC16() {
+  void ConvertToUtf16() {
     ASSERT(is_ascii_);
     Vector<byte> new_store;
     int new_content_size = position_ * kUC16Size;
     if (new_content_size >= backing_store_.length()) {
-      // Ensure room for all currently read characters as UC16 as well
-      // as the character about to be stored.
+      // Ensure room for all currently read code units as UC16 as well
+      // as the code unit about to be stored.
       new_store = Vector<byte>::New(NewCapacity(new_content_size));
     } else {
       new_store = backing_store_;
@@ -249,35 +272,32 @@
 
 
 // ----------------------------------------------------------------------------
-// Scanner base-class.
+// JavaScript Scanner.
 
-// Generic functionality used by both JSON and JavaScript scanners.
 class Scanner {
  public:
-  // -1 is outside of the range of any real source code.
-  static const int kNoOctalLocation = -1;
-
-  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
+  // Scoped helper for literal recording. Automatically drops the literal
+  // if aborting the scanning before it's complete.
   class LiteralScope {
    public:
-    explicit LiteralScope(Scanner* self);
-    ~LiteralScope();
-    void Complete();
+    explicit LiteralScope(Scanner* self)
+        : scanner_(self), complete_(false) {
+      scanner_->StartLiteral();
+    }
+     ~LiteralScope() {
+       if (!complete_) scanner_->DropLiteral();
+     }
+    void Complete() {
+      scanner_->TerminateLiteral();
+      complete_ = true;
+    }
 
    private:
     Scanner* scanner_;
     bool complete_;
   };
 
-  explicit Scanner(UnicodeCache* scanner_contants);
-
-  // Returns the current token again.
-  Token::Value current_token() { return current_.token; }
-
-  // One token look-ahead (past the token returned by Next()).
-  Token::Value peek() const { return next_.token; }
-
+  // Representation of an interval of source positions.
   struct Location {
     Location(int b, int e) : beg_pos(b), end_pos(e) { }
     Location() : beg_pos(0), end_pos(0) { }
@@ -292,28 +312,39 @@
     int end_pos;
   };
 
-  // Returns the location information for the current token
-  // (the token returned by Next()).
-  Location location() const { return current_.location; }
-  Location peek_location() const { return next_.location; }
+  // -1 is outside of the range of any real source code.
+  static const int kNoOctalLocation = -1;
 
+  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+
+  explicit Scanner(UnicodeCache* scanner_contants);
+
+  void Initialize(Utf16CharacterStream* source);
+
+  // Returns the next token and advances input.
+  Token::Value Next();
+  // Returns the current token again.
+  Token::Value current_token() { return current_.token; }
+  // Returns the location information for the current token
+  // (the token last returned by Next()).
+  Location location() const { return current_.location; }
   // Returns the literal string, if any, for the current token (the
-  // token returned by Next()). The string is 0-terminated and in
-  // UTF-8 format; they may contain 0-characters. Literal strings are
-  // collected for identifiers, strings, and numbers.
+  // token last returned by Next()). The string is 0-terminated.
+  // Literal strings are collected for identifiers, strings, and
+  // numbers.
   // These functions only give the correct result if the literal
   // was scanned between calls to StartLiteral() and TerminateLiteral().
-  bool is_literal_ascii() {
-    ASSERT_NOT_NULL(current_.literal_chars);
-    return current_.literal_chars->is_ascii();
-  }
   Vector<const char> literal_ascii_string() {
     ASSERT_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->ascii_literal();
   }
-  Vector<const uc16> literal_uc16_string() {
+  Vector<const uc16> literal_utf16_string() {
     ASSERT_NOT_NULL(current_.literal_chars);
-    return current_.literal_chars->uc16_literal();
+    return current_.literal_chars->utf16_literal();
+  }
+  bool is_literal_ascii() {
+    ASSERT_NOT_NULL(current_.literal_chars);
+    return current_.literal_chars->is_ascii();
   }
   int literal_length() const {
     ASSERT_NOT_NULL(current_.literal_chars);
@@ -330,19 +361,26 @@
     return current_.literal_chars->length() != source_length;
   }
 
+  // Similar functions for the upcoming token.
+
+  // One token look-ahead (past the token returned by Next()).
+  Token::Value peek() const { return next_.token; }
+
+  Location peek_location() const { return next_.location; }
+
   // Returns the literal string for the next token (the token that
   // would be returned if Next() were called).
-  bool is_next_literal_ascii() {
-    ASSERT_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->is_ascii();
-  }
   Vector<const char> next_literal_ascii_string() {
     ASSERT_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->ascii_literal();
   }
-  Vector<const uc16> next_literal_uc16_string() {
+  Vector<const uc16> next_literal_utf16_string() {
     ASSERT_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->uc16_literal();
+    return next_.literal_chars->utf16_literal();
+  }
+  bool is_next_literal_ascii() {
+    ASSERT_NOT_NULL(next_.literal_chars);
+    return next_.literal_chars->is_ascii();
   }
   int next_literal_length() const {
     ASSERT_NOT_NULL(next_.literal_chars);
@@ -353,7 +391,52 @@
 
   static const int kCharacterLookaheadBufferSize = 1;
 
- protected:
+  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+  uc32 ScanOctalEscape(uc32 c, int length);
+
+  // Returns the location of the last seen octal literal.
+  Location octal_position() const { return octal_pos_; }
+  void clear_octal_position() { octal_pos_ = Location::invalid(); }
+
+  // Seek forward to the given position.  This operation does not
+  // work in general, for instance when there are pushed back
+  // characters, but works for seeking forward until simple delimiter
+  // tokens, which is what it is used for.
+  void SeekForward(int pos);
+
+  bool HarmonyScoping() const {
+    return harmony_scoping_;
+  }
+  void SetHarmonyScoping(bool scoping) {
+    harmony_scoping_ = scoping;
+  }
+  bool HarmonyModules() const {
+    return harmony_modules_;
+  }
+  void SetHarmonyModules(bool modules) {
+    harmony_modules_ = modules;
+  }
+
+
+  // Returns true if there was a line terminator before the peek'ed token,
+  // possibly inside a multi-line comment.
+  bool HasAnyLineTerminatorBeforeNext() const {
+    return has_line_terminator_before_next_ ||
+           has_multiline_comment_before_next_;
+  }
+
+  // Scans the input as a regular expression pattern, previous
+  // character(s) must be /(=). Returns true if a pattern is scanned.
+  bool ScanRegExpPattern(bool seen_equal);
+  // Returns true if regexp flags are scanned (always since flags can
+  // be empty).
+  bool ScanRegExpFlags();
+
+  // Tells whether the buffer contains an identifier (no escapes).
+  // Used for checking if a property name is an identifier.
+  static bool IsIdentifier(unibrow::CharacterStream* buffer);
+
+ private:
   // The current and look-ahead token.
   struct TokenDesc {
     Token::Value token;
@@ -378,7 +461,7 @@
     next_.literal_chars = free_buffer;
   }
 
-  inline void AddLiteralChar(uc32 c) {
+  INLINE(void AddLiteralChar(uc32 c)) {
     ASSERT_NOT_NULL(next_.literal_chars);
     next_.literal_chars->AddChar(c);
   }
@@ -423,6 +506,31 @@
 
   uc32 ScanHexNumber(int expected_length);
 
+  // Scans a single JavaScript token.
+  void Scan();
+
+  bool SkipWhiteSpace();
+  Token::Value SkipSingleLineComment();
+  Token::Value SkipMultiLineComment();
+  // Scans a possible HTML comment -- begins with '<!'.
+  Token::Value ScanHtmlComment();
+
+  void ScanDecimalDigits();
+  Token::Value ScanNumber(bool seen_period);
+  Token::Value ScanIdentifierOrKeyword();
+  Token::Value ScanIdentifierSuffix(LiteralScope* literal);
+
+  void ScanEscape();
+  Token::Value ScanString();
+
+  // Decodes a unicode escape-sequence which is part of an identifier.
+  // If the escape sequence cannot be decoded the result is kBadChar.
+  uc32 ScanIdentifierUnicodeEscape();
+  // Recognizes a uniocde escape-sequence and adds its characters,
+  // uninterpreted, to the current literal. Used for parsing RegExp
+  // flags.
+  bool ScanLiteralUnicodeEscape();
+
   // Return the current source position.
   int source_pos() {
     return source_->pos() - kCharacterLookaheadBufferSize;
@@ -437,116 +545,16 @@
   TokenDesc current_;  // desc for current token (as returned by Next())
   TokenDesc next_;     // desc for next token (one token look-ahead)
 
-  // Input stream. Must be initialized to an UC16CharacterStream.
-  UC16CharacterStream* source_;
+  // Input stream. Must be initialized to an Utf16CharacterStream.
+  Utf16CharacterStream* source_;
 
-  // One Unicode character look-ahead; c0_ < 0 at the end of the input.
-  uc32 c0_;
-};
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner - base logic for JavaScript scanning.
-
-class JavaScriptScanner : public Scanner {
- public:
-  // A LiteralScope that disables recording of some types of JavaScript
-  // literals. If the scanner is configured to not record the specific
-  // type of literal, the scope will not call StartLiteral.
-  class LiteralScope {
-   public:
-    explicit LiteralScope(JavaScriptScanner* self)
-        : scanner_(self), complete_(false) {
-      scanner_->StartLiteral();
-    }
-     ~LiteralScope() {
-       if (!complete_) scanner_->DropLiteral();
-     }
-    void Complete() {
-      scanner_->TerminateLiteral();
-      complete_ = true;
-    }
-
-   private:
-    JavaScriptScanner* scanner_;
-    bool complete_;
-  };
-
-  explicit JavaScriptScanner(UnicodeCache* scanner_contants);
-
-  void Initialize(UC16CharacterStream* source);
-
-  // Returns the next token.
-  Token::Value Next();
-
-  // Returns true if there was a line terminator before the peek'ed token,
-  // possibly inside a multi-line comment.
-  bool HasAnyLineTerminatorBeforeNext() const {
-    return has_line_terminator_before_next_ ||
-           has_multiline_comment_before_next_;
-  }
-
-  // Scans the input as a regular expression pattern, previous
-  // character(s) must be /(=). Returns true if a pattern is scanned.
-  bool ScanRegExpPattern(bool seen_equal);
-  // Returns true if regexp flags are scanned (always since flags can
-  // be empty).
-  bool ScanRegExpFlags();
-
-  // Tells whether the buffer contains an identifier (no escapes).
-  // Used for checking if a property name is an identifier.
-  static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
-  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
-  uc32 ScanOctalEscape(uc32 c, int length);
-
-  // Returns the location of the last seen octal literal
-  Location octal_position() const { return octal_pos_; }
-  void clear_octal_position() { octal_pos_ = Location::invalid(); }
-
-  // Seek forward to the given position.  This operation does not
-  // work in general, for instance when there are pushed back
-  // characters, but works for seeking forward until simple delimiter
-  // tokens, which is what it is used for.
-  void SeekForward(int pos);
-
-  bool HarmonyBlockScoping() const {
-    return harmony_block_scoping_;
-  }
-  void SetHarmonyBlockScoping(bool block_scoping) {
-    harmony_block_scoping_ = block_scoping;
-  }
-
-
- protected:
-  bool SkipWhiteSpace();
-  Token::Value SkipSingleLineComment();
-  Token::Value SkipMultiLineComment();
-
-  // Scans a single JavaScript token.
-  void Scan();
-
-  void ScanDecimalDigits();
-  Token::Value ScanNumber(bool seen_period);
-  Token::Value ScanIdentifierOrKeyword();
-  Token::Value ScanIdentifierSuffix(LiteralScope* literal);
-
-  void ScanEscape();
-  Token::Value ScanString();
-
-  // Scans a possible HTML comment -- begins with '<!'.
-  Token::Value ScanHtmlComment();
-
-  // Decodes a unicode escape-sequence which is part of an identifier.
-  // If the escape sequence cannot be decoded the result is kBadChar.
-  uc32 ScanIdentifierUnicodeEscape();
-  // Recognizes a uniocde escape-sequence and adds its characters,
-  // uninterpreted, to the current literal. Used for parsing RegExp
-  // flags.
-  bool ScanLiteralUnicodeEscape();
 
   // Start position of the octal literal last scanned.
   Location octal_pos_;
 
+  // One Unicode character look-ahead; c0_ < 0 at the end of the input.
+  uc32 c0_;
+
   // Whether there is a line terminator whitespace character after
   // the current token, and  before the next. Does not count newlines
   // inside multiline comments.
@@ -554,9 +562,10 @@
   // Whether there is a multi-line comment that contains a
   // line-terminator after the current token, and before the next.
   bool has_multiline_comment_before_next_;
-  // Whether we scan 'let' as a keyword for harmony block scoped
-  // let bindings.
-  bool harmony_block_scoping_;
+  // Whether we scan 'let' as a keyword for harmony block-scoped let bindings.
+  bool harmony_scoping_;
+  // Whether we scan 'module', 'import', 'export' as keywords.
+  bool harmony_modules_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index ad31ca4..0f36234 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -38,456 +38,297 @@
 namespace internal {
 
 
-static int CompareLocal(Variable* const* v, Variable* const* w) {
-  int x = (*v)->index();
-  int y = (*w)->index();
-  // Consider sorting them according to type as well?
-  return x - y;
-}
+Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
+  // Collect stack and context locals.
+  ZoneList<Variable*> stack_locals(scope->StackLocalCount());
+  ZoneList<Variable*> context_locals(scope->ContextLocalCount());
+  scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
+  const int stack_local_count = stack_locals.length();
+  const int context_local_count = context_locals.length();
+  // Make sure we allocate the correct amount.
+  ASSERT(scope->StackLocalCount() == stack_local_count);
+  ASSERT(scope->ContextLocalCount() == context_local_count);
 
+  // Determine use and location of the function variable if it is present.
+  FunctionVariableInfo function_name_info;
+  VariableMode function_variable_mode;
+  if (scope->is_function_scope() && scope->function() != NULL) {
+    Variable* var = scope->function()->var();
+    if (!var->is_used()) {
+      function_name_info = UNUSED;
+    } else if (var->IsContextSlot()) {
+      function_name_info = CONTEXT;
+    } else {
+      ASSERT(var->IsStackLocal());
+      function_name_info = STACK;
+    }
+    function_variable_mode = var->mode();
+  } else {
+    function_name_info = NONE;
+    function_variable_mode = VAR;
+  }
 
-template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
-    : function_name_(FACTORY->empty_symbol()),
-      calls_eval_(scope->calls_eval()),
-      is_strict_mode_(scope->is_strict_mode()),
-      parameters_(scope->num_parameters()),
-      stack_slots_(scope->num_stack_slots()),
-      context_slots_(scope->num_heap_slots()),
-      context_modes_(scope->num_heap_slots()) {
+  const bool has_function_name = function_name_info != NONE;
+  const int parameter_count = scope->num_parameters();
+  const int length = kVariablePartIndex
+      + parameter_count + stack_local_count + 2 * context_local_count
+      + (has_function_name ? 2 : 0);
+
+  Handle<ScopeInfo> scope_info = FACTORY->NewScopeInfo(length);
+
+  // Encode the flags.
+  int flags = TypeField::encode(scope->type()) |
+      CallsEvalField::encode(scope->calls_eval()) |
+      LanguageModeField::encode(scope->language_mode()) |
+      FunctionVariableField::encode(function_name_info) |
+      FunctionVariableMode::encode(function_variable_mode);
+  scope_info->SetFlags(flags);
+  scope_info->SetParameterCount(parameter_count);
+  scope_info->SetStackLocalCount(stack_local_count);
+  scope_info->SetContextLocalCount(context_local_count);
+
+  int index = kVariablePartIndex;
   // Add parameters.
-  for (int i = 0; i < scope->num_parameters(); i++) {
-    ASSERT(parameters_.length() == i);
-    parameters_.Add(scope->parameter(i)->name());
+  ASSERT(index == scope_info->ParameterEntriesIndex());
+  for (int i = 0; i < parameter_count; ++i) {
+    scope_info->set(index++, *scope->parameter(i)->name());
   }
 
-  // Add stack locals and collect heap locals.
-  // We are assuming that the locals' slots are allocated in
-  // increasing order, so we can simply add them to the
-  // ScopeInfo lists. However, due to usage analysis, this is
-  // not true for context-allocated locals: Some of them
-  // may be parameters which are allocated before the
-  // non-parameter locals. When the non-parameter locals are
-  // sorted according to usage, the allocated slot indices may
-  // not be in increasing order with the variable list anymore.
-  // Thus, we first collect the context-allocated locals, and then
-  // sort them by context slot index before adding them to the
-  // ScopeInfo list.
-  List<Variable*, Allocator> locals(32);  // 32 is a wild guess
-  ASSERT(locals.is_empty());
-  scope->CollectUsedVariables(&locals);
-  locals.Sort(&CompareLocal);
-
-  List<Variable*, Allocator> heap_locals(locals.length());
-  for (int i = 0; i < locals.length(); i++) {
-    Variable* var = locals[i];
-    if (var->is_used()) {
-      switch (var->location()) {
-        case Variable::UNALLOCATED:
-        case Variable::PARAMETER:
-          break;
-
-        case Variable::LOCAL:
-          ASSERT(stack_slots_.length() == var->index());
-          stack_slots_.Add(var->name());
-          break;
-
-        case Variable::CONTEXT:
-          heap_locals.Add(var);
-          break;
-
-        case Variable::LOOKUP:
-          // We don't expect lookup variables in the locals list.
-          UNREACHABLE();
-          break;
-      }
-    }
+  // Add stack locals' names. We are assuming that the stack locals'
+  // slots are allocated in increasing order, so we can simply add
+  // them to the ScopeInfo object.
+  ASSERT(index == scope_info->StackLocalEntriesIndex());
+  for (int i = 0; i < stack_local_count; ++i) {
+    ASSERT(stack_locals[i]->index() == i);
+    scope_info->set(index++, *stack_locals[i]->name());
   }
 
-  // Add heap locals.
-  if (scope->num_heap_slots() > 0) {
-    // Add user-defined slots.
-    for (int i = 0; i < heap_locals.length(); i++) {
-      ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_slots_.length());
-      ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_modes_.length());
-      context_slots_.Add(heap_locals[i]->name());
-      context_modes_.Add(heap_locals[i]->mode());
-    }
+  // Due to usage analysis, context-allocated locals are not necessarily in
+  // increasing order: Some of them may be parameters which are allocated before
+  // the non-parameter locals. When the non-parameter locals are sorted
+  // according to usage, the allocated slot indices may not be in increasing
+  // order with the variable list anymore. Thus, we first need to sort them by
+  // context slot index before adding them to the ScopeInfo object.
+  context_locals.Sort(&Variable::CompareIndex);
 
-  } else {
-    ASSERT(heap_locals.length() == 0);
+  // Add context locals' names.
+  ASSERT(index == scope_info->ContextLocalNameEntriesIndex());
+  for (int i = 0; i < context_local_count; ++i) {
+    scope_info->set(index++, *context_locals[i]->name());
   }
 
-  // Add the function context slot, if present.
-  // For now, this must happen at the very end because of the
-  // ordering of the scope info slots and the respective slot indices.
-  if (scope->is_function_scope()) {
-    VariableProxy* proxy = scope->function();
-    if (proxy != NULL &&
-        proxy->var()->is_used() &&
-        proxy->var()->IsContextSlot()) {
-      function_name_ = proxy->name();
-      // Note that we must not find the function name in the context slot
-      // list - instead it must be handled separately in the
-      // Contexts::Lookup() function. Thus record an empty symbol here so we
-      // get the correct number of context slots.
-      ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_slots_.length());
-      ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_modes_.length());
-      context_slots_.Add(FACTORY->empty_symbol());
-      context_modes_.Add(Variable::INTERNAL);
-    }
+  // Add context locals' info.
+  ASSERT(index == scope_info->ContextLocalInfoEntriesIndex());
+  for (int i = 0; i < context_local_count; ++i) {
+    Variable* var = context_locals[i];
+    uint32_t value = ContextLocalMode::encode(var->mode()) |
+        ContextLocalInitFlag::encode(var->initialization_flag());
+    scope_info->set(index++, Smi::FromInt(value));
   }
-}
 
-
-// Encoding format in a FixedArray object:
-//
-// - function name
-//
-// - calls eval boolean flag
-//
-// - number of variables in the context object (smi) (= function context
-//   slot index + 1)
-// - list of pairs (name, Var mode) of context-allocated variables (starting
-//   with context slot 0)
-//
-// - number of parameters (smi)
-// - list of parameter names (starting with parameter 0 first)
-//
-// - number of variables on the stack (smi)
-// - list of names of stack-allocated variables (starting with stack slot 0)
-
-// The ScopeInfo representation could be simplified and the ScopeInfo
-// re-implemented (with almost the same interface). Here is a
-// suggestion for the new format:
-//
-// - have a single list with all variable names (parameters, stack locals,
-//   context locals), followed by a list of non-Object* values containing
-//   the variables information (what kind, index, attributes)
-// - searching the linear list of names is fast and yields an index into the
-//   list if the variable name is found
-// - that list index is then used to find the variable information in the
-//   subsequent list
-// - the list entries don't have to be in any particular order, so all the
-//   current sorting business can go away
-// - the ScopeInfo lookup routines can be reduced to perhaps a single lookup
-//   which returns all information at once
-// - when gathering the information from a Scope, we only need to iterate
-//   through the local variables (parameters and context info is already
-//   present)
-
-
-static inline Object** ReadInt(Object** p, int* x) {
-  *x = (reinterpret_cast<Smi*>(*p++))->value();
-  return p;
-}
-
-
-static inline Object** ReadBool(Object** p, bool* x) {
-  *x = (reinterpret_cast<Smi*>(*p++))->value() != 0;
-  return p;
-}
-
-
-static inline Object** ReadSymbol(Object** p, Handle<String>* s) {
-  *s = Handle<String>(reinterpret_cast<String*>(*p++));
-  return p;
-}
-
-
-template <class Allocator>
-static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
-  ASSERT(list->is_empty());
-  int n;
-  p = ReadInt(p, &n);
-  while (n-- > 0) {
-    Handle<String> s;
-    p = ReadSymbol(p, &s);
-    list->Add(s);
+  // If present, add the function variable name and its index.
+  ASSERT(index == scope_info->FunctionNameEntryIndex());
+  if (has_function_name) {
+    int var_index = scope->function()->var()->index();
+    scope_info->set(index++, *scope->function()->name());
+    scope_info->set(index++, Smi::FromInt(var_index));
+    ASSERT(function_name_info != STACK ||
+           (var_index == scope_info->StackLocalCount() &&
+            var_index == scope_info->StackSlotCount() - 1));
+    ASSERT(function_name_info != CONTEXT ||
+           var_index == scope_info->ContextLength() - 1);
   }
-  return p;
+
+  ASSERT(index == scope_info->length());
+  ASSERT(scope->num_parameters() == scope_info->ParameterCount());
+  ASSERT(scope->num_stack_slots() == scope_info->StackSlotCount());
+  ASSERT(scope->num_heap_slots() == scope_info->ContextLength());
+  return scope_info;
 }
 
 
-template <class Allocator>
-static Object** ReadList(Object** p,
-                         List<Handle<String>, Allocator>* list,
-                         List<Variable::Mode, Allocator>* modes) {
-  ASSERT(list->is_empty());
-  int n;
-  p = ReadInt(p, &n);
-  while (n-- > 0) {
-    Handle<String> s;
-    int m;
-    p = ReadSymbol(p, &s);
-    p = ReadInt(p, &m);
-    list->Add(s);
-    modes->Add(static_cast<Variable::Mode>(m));
-  }
-  return p;
+ScopeInfo* ScopeInfo::Empty() {
+  return reinterpret_cast<ScopeInfo*>(HEAP->empty_fixed_array());
 }
 
 
-template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
-  : function_name_(FACTORY->empty_symbol()),
-    parameters_(4),
-    stack_slots_(8),
-    context_slots_(8),
-    context_modes_(8) {
-  if (data->length() > 0) {
-    Object** p0 = data->data_start();
-    Object** p = p0;
-    p = ReadSymbol(p, &function_name_);
-    p = ReadBool(p, &calls_eval_);
-    p = ReadBool(p, &is_strict_mode_);
-    p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
-    p = ReadList<Allocator>(p, &parameters_);
-    p = ReadList<Allocator>(p, &stack_slots_);
-    ASSERT((p - p0) == FixedArray::cast(data)->length());
-  }
-}
-
-
-static inline Object** WriteInt(Object** p, int x) {
-  *p++ = Smi::FromInt(x);
-  return p;
-}
-
-
-static inline Object** WriteBool(Object** p, bool b) {
-  *p++ = Smi::FromInt(b ? 1 : 0);
-  return p;
-}
-
-
-static inline Object** WriteSymbol(Object** p, Handle<String> s) {
-  *p++ = *s;
-  return p;
-}
-
-
-template <class Allocator>
-static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
-  const int n = list->length();
-  p = WriteInt(p, n);
-  for (int i = 0; i < n; i++) {
-    p = WriteSymbol(p, list->at(i));
-  }
-  return p;
-}
-
-
-template <class Allocator>
-static Object** WriteList(Object** p,
-                          List<Handle<String>, Allocator>* list,
-                          List<Variable::Mode, Allocator>* modes) {
-  const int n = list->length();
-  p = WriteInt(p, n);
-  for (int i = 0; i < n; i++) {
-    p = WriteSymbol(p, list->at(i));
-    p = WriteInt(p, modes->at(i));
-  }
-  return p;
-}
-
-
-template<class Allocator>
-Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
-  // function name, calls eval, is_strict_mode, length for 3 tables:
-  const int extra_slots = 1 + 1 + 1 + 3;
-  int length = extra_slots +
-               context_slots_.length() * 2 +
-               parameters_.length() +
-               stack_slots_.length();
-
-  Handle<SerializedScopeInfo> data(
-      SerializedScopeInfo::cast(*FACTORY->NewSerializedScopeInfo(length)));
-  AssertNoAllocation nogc;
-
-  Object** p0 = data->data_start();
-  Object** p = p0;
-  p = WriteSymbol(p, function_name_);
-  p = WriteBool(p, calls_eval_);
-  p = WriteBool(p, is_strict_mode_);
-  p = WriteList(p, &context_slots_, &context_modes_);
-  p = WriteList(p, &parameters_);
-  p = WriteList(p, &stack_slots_);
-  ASSERT((p - p0) == length);
-
-  return data;
-}
-
-
-template<class Allocator>
-Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
-  // A local variable can be allocated either on the stack or in the context.
-  // For variables allocated in the context they are always preceded by
-  // Context::MIN_CONTEXT_SLOTS of fixed allocated slots in the context.
-  if (i < number_of_stack_slots()) {
-    return stack_slot_name(i);
-  } else {
-    return context_slot_name(i - number_of_stack_slots() +
-                             Context::MIN_CONTEXT_SLOTS);
-  }
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::NumberOfLocals() const {
-  int number_of_locals = number_of_stack_slots();
-  if (number_of_context_slots() > 0) {
-    ASSERT(number_of_context_slots() >= Context::MIN_CONTEXT_SLOTS);
-    number_of_locals += number_of_context_slots() - Context::MIN_CONTEXT_SLOTS;
-  }
-  return number_of_locals;
-}
-
-
-Handle<SerializedScopeInfo> SerializedScopeInfo::Create(Scope* scope) {
-  ScopeInfo<ZoneListAllocationPolicy> sinfo(scope);
-  return sinfo.Serialize();
-}
-
-
-SerializedScopeInfo* SerializedScopeInfo::Empty() {
-  return reinterpret_cast<SerializedScopeInfo*>(HEAP->empty_fixed_array());
-}
-
-
-Object** SerializedScopeInfo::ContextEntriesAddr() {
+ScopeType ScopeInfo::Type() {
   ASSERT(length() > 0);
-  // +3 for function name, calls eval, strict mode.
-  return data_start() + 3;
+  return TypeField::decode(Flags());
 }
 
 
-Object** SerializedScopeInfo::ParameterEntriesAddr() {
-  ASSERT(length() > 0);
-  Object** p = ContextEntriesAddr();
-  int number_of_context_slots;
-  p = ReadInt(p, &number_of_context_slots);
-  return p + number_of_context_slots*2;  // *2 for pairs
+bool ScopeInfo::CallsEval() {
+  return length() > 0 && CallsEvalField::decode(Flags());
 }
 
 
-Object** SerializedScopeInfo::StackSlotEntriesAddr() {
-  ASSERT(length() > 0);
-  Object** p = ParameterEntriesAddr();
-  int number_of_parameter_slots;
-  p = ReadInt(p, &number_of_parameter_slots);
-  return p + number_of_parameter_slots;
+LanguageMode ScopeInfo::language_mode() {
+  return length() > 0 ? LanguageModeField::decode(Flags()) : CLASSIC_MODE;
 }
 
 
-bool SerializedScopeInfo::CallsEval() {
+int ScopeInfo::LocalCount() {
+  return StackLocalCount() + ContextLocalCount();
+}
+
+
+int ScopeInfo::StackSlotCount() {
   if (length() > 0) {
-    Object** p = data_start() + 1;  // +1 for function name.
-    bool calls_eval;
-    p = ReadBool(p, &calls_eval);
-    return calls_eval;
-  }
-  return false;
-}
-
-
-bool SerializedScopeInfo::IsStrictMode() {
-  if (length() > 0) {
-    Object** p = data_start() + 2;  // +2 for function name, calls eval.
-    bool strict_mode;
-    p = ReadBool(p, &strict_mode);
-    return strict_mode;
-  }
-  return false;
-}
-
-
-int SerializedScopeInfo::NumberOfStackSlots() {
-  if (length() > 0) {
-    Object** p = StackSlotEntriesAddr();
-    int number_of_stack_slots;
-    ReadInt(p, &number_of_stack_slots);
-    return number_of_stack_slots;
+    bool function_name_stack_slot =
+        FunctionVariableField::decode(Flags()) == STACK;
+    return StackLocalCount() + (function_name_stack_slot ? 1 : 0);
   }
   return 0;
 }
 
 
-int SerializedScopeInfo::NumberOfContextSlots() {
+int ScopeInfo::ContextLength() {
   if (length() > 0) {
-    Object** p = ContextEntriesAddr();
-    int number_of_context_slots;
-    ReadInt(p, &number_of_context_slots);
-    return number_of_context_slots + Context::MIN_CONTEXT_SLOTS;
+    int context_locals = ContextLocalCount();
+    bool function_name_context_slot =
+        FunctionVariableField::decode(Flags()) == CONTEXT;
+    bool has_context = context_locals > 0 ||
+        function_name_context_slot ||
+        Type() == WITH_SCOPE ||
+        (Type() == FUNCTION_SCOPE && CallsEval());
+    if (has_context) {
+      return Context::MIN_CONTEXT_SLOTS + context_locals +
+          (function_name_context_slot ? 1 : 0);
+    }
   }
   return 0;
 }
 
 
-bool SerializedScopeInfo::HasHeapAllocatedLocals() {
+bool ScopeInfo::HasFunctionName() {
   if (length() > 0) {
-    Object** p = ContextEntriesAddr();
-    int number_of_context_slots;
-    ReadInt(p, &number_of_context_slots);
-    return number_of_context_slots > 0;
+    return NONE != FunctionVariableField::decode(Flags());
+  } else {
+    return false;
   }
-  return false;
 }
 
 
-int SerializedScopeInfo::StackSlotIndex(String* name) {
+bool ScopeInfo::HasHeapAllocatedLocals() {
+  if (length() > 0) {
+    return ContextLocalCount() > 0;
+  } else {
+    return false;
+  }
+}
+
+
+bool ScopeInfo::HasContext() {
+  if (length() > 0) {
+    return ContextLength() > 0;
+  } else {
+    return false;
+  }
+}
+
+
+String* ScopeInfo::FunctionName() {
+  ASSERT(HasFunctionName());
+  return String::cast(get(FunctionNameEntryIndex()));
+}
+
+
+String* ScopeInfo::ParameterName(int var) {
+  ASSERT(0 <= var && var < ParameterCount());
+  int info_index = ParameterEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+String* ScopeInfo::LocalName(int var) {
+  ASSERT(0 <= var && var < LocalCount());
+  ASSERT(StackLocalEntriesIndex() + StackLocalCount() ==
+         ContextLocalNameEntriesIndex());
+  int info_index = StackLocalEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+String* ScopeInfo::StackLocalName(int var) {
+  ASSERT(0 <= var && var < StackLocalCount());
+  int info_index = StackLocalEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+String* ScopeInfo::ContextLocalName(int var) {
+  ASSERT(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalNameEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+VariableMode ScopeInfo::ContextLocalMode(int var) {
+  ASSERT(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalInfoEntriesIndex() + var;
+  int value = Smi::cast(get(info_index))->value();
+  return ContextLocalMode::decode(value);
+}
+
+
+InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
+  ASSERT(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalInfoEntriesIndex() + var;
+  int value = Smi::cast(get(info_index))->value();
+  return ContextLocalInitFlag::decode(value);
+}
+
+
+int ScopeInfo::StackSlotIndex(String* name) {
   ASSERT(name->IsSymbol());
   if (length() > 0) {
-    // Slots start after length entry.
-    Object** p0 = StackSlotEntriesAddr();
-    int number_of_stack_slots;
-    p0 = ReadInt(p0, &number_of_stack_slots);
-    Object** p = p0;
-    Object** end = p0 + number_of_stack_slots;
-    while (p != end) {
-      if (*p == name) return static_cast<int>(p - p0);
-      p++;
+    int start = StackLocalEntriesIndex();
+    int end = StackLocalEntriesIndex() + StackLocalCount();
+    for (int i = start; i < end; ++i) {
+      if (name == get(i)) {
+        return i - start;
+      }
     }
   }
   return -1;
 }
 
-int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
+
+int ScopeInfo::ContextSlotIndex(String* name,
+                                VariableMode* mode,
+                                InitializationFlag* init_flag) {
   ASSERT(name->IsSymbol());
-  Isolate* isolate = GetIsolate();
-  int result = isolate->context_slot_cache()->Lookup(this, name, mode);
-  if (result != ContextSlotCache::kNotFound) return result;
+  ASSERT(mode != NULL);
+  ASSERT(init_flag != NULL);
   if (length() > 0) {
-    // Slots start after length entry.
-    Object** p0 = ContextEntriesAddr();
-    int number_of_context_slots;
-    p0 = ReadInt(p0, &number_of_context_slots);
-    Object** p = p0;
-    Object** end = p0 + number_of_context_slots * 2;
-    while (p != end) {
-      if (*p == name) {
-        ASSERT(((p - p0) & 1) == 0);
-        int v;
-        ReadInt(p + 1, &v);
-        Variable::Mode mode_value = static_cast<Variable::Mode>(v);
-        if (mode != NULL) *mode = mode_value;
-        result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
-        isolate->context_slot_cache()->Update(this, name, mode_value, result);
+    ContextSlotCache* context_slot_cache = GetIsolate()->context_slot_cache();
+    int result = context_slot_cache->Lookup(this, name, mode, init_flag);
+    if (result != ContextSlotCache::kNotFound) {
+      ASSERT(result < ContextLength());
+      return result;
+    }
+
+    int start = ContextLocalNameEntriesIndex();
+    int end = ContextLocalNameEntriesIndex() + ContextLocalCount();
+    for (int i = start; i < end; ++i) {
+      if (name == get(i)) {
+        int var = i - start;
+        *mode = ContextLocalMode(var);
+        *init_flag = ContextLocalInitFlag(var);
+        result = Context::MIN_CONTEXT_SLOTS + var;
+        context_slot_cache->Update(this, name, *mode, *init_flag, result);
+        ASSERT(result < ContextLength());
         return result;
       }
-      p += 2;
     }
+    context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1);
   }
-  isolate->context_slot_cache()->Update(this, name, Variable::INTERNAL, -1);
   return -1;
 }
 
 
-int SerializedScopeInfo::ParameterIndex(String* name) {
+int ScopeInfo::ParameterIndex(String* name) {
   ASSERT(name->IsSymbol());
   if (length() > 0) {
     // We must read parameters from the end since for
@@ -495,41 +336,58 @@
     // last declaration of that parameter is used
     // inside a function (and thus we need to look
     // at the last index). Was bug# 1110337.
-    //
-    // Eventually, we should only register such parameters
-    // once, with corresponding index. This requires a new
-    // implementation of the ScopeInfo code. See also other
-    // comments in this file regarding this.
-    Object** p = ParameterEntriesAddr();
-    int number_of_parameter_slots;
-    Object** p0 = ReadInt(p, &number_of_parameter_slots);
-    p = p0 + number_of_parameter_slots;
-    while (p > p0) {
-      p--;
-      if (*p == name) return static_cast<int>(p - p0);
+    int start = ParameterEntriesIndex();
+    int end = ParameterEntriesIndex() + ParameterCount();
+    for (int i = end - 1; i >= start; --i) {
+      if (name == get(i)) {
+        return i - start;
+      }
     }
   }
   return -1;
 }
 
 
-int SerializedScopeInfo::FunctionContextSlotIndex(String* name) {
+int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
   ASSERT(name->IsSymbol());
+  ASSERT(mode != NULL);
   if (length() > 0) {
-    Object** p = data_start();
-    if (*p == name) {
-      p = ContextEntriesAddr();
-      int number_of_context_slots;
-      ReadInt(p, &number_of_context_slots);
-      ASSERT(number_of_context_slots != 0);
-      // The function context slot is the last entry.
-      return number_of_context_slots + Context::MIN_CONTEXT_SLOTS - 1;
+    if (FunctionVariableField::decode(Flags()) == CONTEXT &&
+        FunctionName() == name) {
+      *mode = FunctionVariableMode::decode(Flags());
+      return Smi::cast(get(FunctionNameEntryIndex() + 1))->value();
     }
   }
   return -1;
 }
 
 
+int ScopeInfo::ParameterEntriesIndex() {
+  ASSERT(length() > 0);
+  return kVariablePartIndex;
+}
+
+
+int ScopeInfo::StackLocalEntriesIndex() {
+  return ParameterEntriesIndex() + ParameterCount();
+}
+
+
+int ScopeInfo::ContextLocalNameEntriesIndex() {
+  return StackLocalEntriesIndex() + StackLocalCount();
+}
+
+
+int ScopeInfo::ContextLocalInfoEntriesIndex() {
+  return ContextLocalNameEntriesIndex() + ContextLocalCount();
+}
+
+
+int ScopeInfo::FunctionNameEntryIndex() {
+  return ContextLocalInfoEntriesIndex() + ContextLocalCount();
+}
+
+
 int ContextSlotCache::Hash(Object* data, String* name) {
   // Uses only lower 32 bits if pointers are larger.
   uintptr_t addr_hash =
@@ -540,12 +398,14 @@
 
 int ContextSlotCache::Lookup(Object* data,
                              String* name,
-                             Variable::Mode* mode) {
+                             VariableMode* mode,
+                             InitializationFlag* init_flag) {
   int index = Hash(data, name);
   Key& key = keys_[index];
   if ((key.data == data) && key.name->Equals(name)) {
     Value result(values_[index]);
     if (mode != NULL) *mode = result.mode();
+    if (init_flag != NULL) *init_flag = result.initialization_flag();
     return result.index() + kNotFound;
   }
   return kNotFound;
@@ -554,7 +414,8 @@
 
 void ContextSlotCache::Update(Object* data,
                               String* name,
-                              Variable::Mode mode,
+                              VariableMode mode,
+                              InitializationFlag init_flag,
                               int slot_index) {
   String* symbol;
   ASSERT(slot_index > kNotFound);
@@ -564,9 +425,9 @@
     key.data = data;
     key.name = symbol;
     // Please note value only takes a uint as index.
-    values_[index] = Value(mode, slot_index - kNotFound).raw();
+    values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
 #ifdef DEBUG
-    ValidateEntry(data, name, mode, slot_index);
+    ValidateEntry(data, name, mode, init_flag, slot_index);
 #endif
   }
 }
@@ -581,7 +442,8 @@
 
 void ContextSlotCache::ValidateEntry(Object* data,
                                      String* name,
-                                     Variable::Mode mode,
+                                     VariableMode mode,
+                                     InitializationFlag init_flag,
                                      int slot_index) {
   String* symbol;
   if (HEAP->LookupSymbolIfExists(name, &symbol)) {
@@ -591,51 +453,56 @@
     ASSERT(key.name->Equals(name));
     Value result(values_[index]);
     ASSERT(result.mode() == mode);
+    ASSERT(result.initialization_flag() == init_flag);
     ASSERT(result.index() + kNotFound == slot_index);
   }
 }
 
 
-template <class Allocator>
 static void PrintList(const char* list_name,
                       int nof_internal_slots,
-                      List<Handle<String>, Allocator>& list) {
-  if (list.length() > 0) {
+                      int start,
+                      int end,
+                      ScopeInfo* scope_info) {
+  if (start < end) {
     PrintF("\n  // %s\n", list_name);
     if (nof_internal_slots > 0) {
       PrintF("  %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
     }
-    for (int i = 0; i < list.length(); i++) {
-      PrintF("  %2d ", i + nof_internal_slots);
-      list[i]->ShortPrint();
+    for (int i = nof_internal_slots; start < end; ++i, ++start) {
+      PrintF("  %2d ", i);
+      String::cast(scope_info->get(start))->ShortPrint();
       PrintF("\n");
     }
   }
 }
 
 
-template<class Allocator>
-void ScopeInfo<Allocator>::Print() {
+void ScopeInfo::Print() {
   PrintF("ScopeInfo ");
-  if (function_name_->length() > 0)
-    function_name_->ShortPrint();
-  else
+  if (HasFunctionName()) {
+    FunctionName()->ShortPrint();
+  } else {
     PrintF("/* no function name */");
+  }
   PrintF("{");
 
-  PrintList<Allocator>("parameters", 0, parameters_);
-  PrintList<Allocator>("stack slots", 0, stack_slots_);
-  PrintList<Allocator>("context slots", Context::MIN_CONTEXT_SLOTS,
-                       context_slots_);
+  PrintList("parameters", 0,
+            ParameterEntriesIndex(),
+            ParameterEntriesIndex() + ParameterCount(),
+            this);
+  PrintList("stack slots", 0,
+            StackLocalEntriesIndex(),
+            StackLocalEntriesIndex() + StackLocalCount(),
+            this);
+  PrintList("context slots",
+            Context::MIN_CONTEXT_SLOTS,
+            ContextLocalNameEntriesIndex(),
+            ContextLocalNameEntriesIndex() + ContextLocalCount(),
+            this);
 
   PrintF("}\n");
 }
 #endif  // DEBUG
 
-
-// Make sure the classes get instantiated by the template system.
-template class ScopeInfo<FreeStoreAllocationPolicy>;
-template class ScopeInfo<PreallocatedStorage>;
-template class ScopeInfo<ZoneListAllocationPolicy>;
-
 } }  // namespace v8::internal
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 40c5c8a..93734f5 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -35,135 +35,6 @@
 namespace v8 {
 namespace internal {
 
-// Scope information represents information about a functions's
-// scopes (currently only one, because we don't do any inlining)
-// and the allocation of the scope's variables. Scope information
-// is stored in a compressed form in FixedArray objects and is used
-// at runtime (stack dumps, deoptimization, etc.).
-//
-// Historical note: In other VMs built by this team, ScopeInfo was
-// usually called DebugInfo since the information was used (among
-// other things) for on-demand debugging (Self, Smalltalk). However,
-// DebugInfo seems misleading, since this information is primarily used
-// in debugging-unrelated contexts.
-
-// Forward defined as
-// template <class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
-template<class Allocator>
-class ScopeInfo BASE_EMBEDDED {
- public:
-  // Create a ScopeInfo instance from a scope.
-  explicit ScopeInfo(Scope* scope);
-
-  // Create a ScopeInfo instance from SerializedScopeInfo.
-  explicit ScopeInfo(SerializedScopeInfo* data);
-
-  // Creates a SerializedScopeInfo holding the serialized scope info.
-  Handle<SerializedScopeInfo> Serialize();
-
-  // --------------------------------------------------------------------------
-  // Lookup
-
-  Handle<String> function_name() const { return function_name_; }
-
-  Handle<String> parameter_name(int i) const { return parameters_[i]; }
-  int number_of_parameters() const { return parameters_.length(); }
-
-  Handle<String> stack_slot_name(int i) const { return stack_slots_[i]; }
-  int number_of_stack_slots() const { return stack_slots_.length(); }
-
-  Handle<String> context_slot_name(int i) const {
-    return context_slots_[i - Context::MIN_CONTEXT_SLOTS];
-  }
-  int number_of_context_slots() const {
-    int l = context_slots_.length();
-    return l == 0 ? 0 : l + Context::MIN_CONTEXT_SLOTS;
-  }
-
-  Handle<String> LocalName(int i) const;
-  int NumberOfLocals() const;
-
-  // --------------------------------------------------------------------------
-  // Debugging support
-
-#ifdef DEBUG
-  void Print();
-#endif
-
- private:
-  Handle<String> function_name_;
-  bool calls_eval_;
-  bool is_strict_mode_;
-  List<Handle<String>, Allocator > parameters_;
-  List<Handle<String>, Allocator > stack_slots_;
-  List<Handle<String>, Allocator > context_slots_;
-  List<Variable::Mode, Allocator > context_modes_;
-};
-
-
-// This object provides quick access to scope info details for runtime
-// routines w/o the need to explicitly create a ScopeInfo object.
-class SerializedScopeInfo : public FixedArray {
- public :
-
-  static SerializedScopeInfo* cast(Object* object) {
-    ASSERT(object->IsSerializedScopeInfo());
-    return reinterpret_cast<SerializedScopeInfo*>(object);
-  }
-
-  // Does this scope call eval?
-  bool CallsEval();
-
-  // Is this scope a strict mode scope?
-  bool IsStrictMode();
-
-  // Return the number of stack slots for code.
-  int NumberOfStackSlots();
-
-  // Return the number of context slots for code.
-  int NumberOfContextSlots();
-
-  // Return if this has context slots besides MIN_CONTEXT_SLOTS;
-  bool HasHeapAllocatedLocals();
-
-  // Lookup support for serialized scope info. Returns the
-  // the stack slot index for a given slot name if the slot is
-  // present; otherwise returns a value < 0. The name must be a symbol
-  // (canonicalized).
-  int StackSlotIndex(String* name);
-
-  // Lookup support for serialized scope info. Returns the
-  // context slot index for a given slot name if the slot is present; otherwise
-  // returns a value < 0. The name must be a symbol (canonicalized).
-  // If the slot is present and mode != NULL, sets *mode to the corresponding
-  // mode for that variable.
-  int ContextSlotIndex(String* name, Variable::Mode* mode);
-
-  // Lookup support for serialized scope info. Returns the
-  // parameter index for a given parameter name if the parameter is present;
-  // otherwise returns a value < 0. The name must be a symbol (canonicalized).
-  int ParameterIndex(String* name);
-
-  // Lookup support for serialized scope info. Returns the
-  // function context slot index if the function name is present (named
-  // function expressions, only), otherwise returns a value < 0. The name
-  // must be a symbol (canonicalized).
-  int FunctionContextSlotIndex(String* name);
-
-  static Handle<SerializedScopeInfo> Create(Scope* scope);
-
-  // Serializes empty scope info.
-  static SerializedScopeInfo* Empty();
-
- private:
-  inline Object** ContextEntriesAddr();
-
-  inline Object** ParameterEntriesAddr();
-
-  inline Object** StackSlotEntriesAddr();
-};
-
-
 // Cache for mapping (data, property name) into context slot index.
 // The cache contains both positive and negative results.
 // Slot index equals -1 means the property is absent.
@@ -174,12 +45,14 @@
   // If absent, kNotFound is returned.
   int Lookup(Object* data,
              String* name,
-             Variable::Mode* mode);
+             VariableMode* mode,
+             InitializationFlag* init_flag);
 
   // Update an element in the cache.
   void Update(Object* data,
               String* name,
-              Variable::Mode mode,
+              VariableMode mode,
+              InitializationFlag init_flag,
               int slot_index);
 
   // Clear the cache.
@@ -201,7 +74,8 @@
 #ifdef DEBUG
   void ValidateEntry(Object* data,
                      String* name,
-                     Variable::Mode mode,
+                     VariableMode mode,
+                     InitializationFlag init_flag,
                      int slot_index);
 #endif
 
@@ -212,11 +86,17 @@
   };
 
   struct Value {
-    Value(Variable::Mode mode, int index) {
+    Value(VariableMode mode,
+          InitializationFlag init_flag,
+          int index) {
       ASSERT(ModeField::is_valid(mode));
+      ASSERT(InitField::is_valid(init_flag));
       ASSERT(IndexField::is_valid(index));
-      value_ = ModeField::encode(mode) | IndexField::encode(index);
+      value_ = ModeField::encode(mode) |
+          IndexField::encode(index) |
+          InitField::encode(init_flag);
       ASSERT(mode == this->mode());
+      ASSERT(init_flag == this->initialization_flag());
       ASSERT(index == this->index());
     }
 
@@ -224,14 +104,20 @@
 
     uint32_t raw() { return value_; }
 
-    Variable::Mode mode() { return ModeField::decode(value_); }
+    VariableMode mode() { return ModeField::decode(value_); }
+
+    InitializationFlag initialization_flag() {
+      return InitField::decode(value_);
+    }
 
     int index() { return IndexField::decode(value_); }
 
     // Bit fields in value_ (type, shift, size). Must be public so the
     // constants can be embedded in generated code.
-    class ModeField:  public BitField<Variable::Mode, 0, 3> {};
-    class IndexField: public BitField<int,            3, 32-3> {};
+    class ModeField:  public BitField<VariableMode,       0, 3> {};
+    class InitField:  public BitField<InitializationFlag, 3, 1> {};
+    class IndexField: public BitField<int,                4, 32-4> {};
+
    private:
     uint32_t value_;
   };
diff --git a/src/scopes.cc b/src/scopes.cc
index d5a7a9f..859cbd1 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,6 +31,7 @@
 
 #include "bootstrapper.h"
 #include "compiler.h"
+#include "messages.h"
 #include "scopeinfo.h"
 
 #include "allocation-inl.h"
@@ -39,26 +40,6 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// A Zone allocator for use with LocalsMap.
-
-// TODO(isolates): It is probably worth it to change the Allocator class to
-//                 take a pointer to an isolate.
-class ZoneAllocator: public Allocator {
- public:
-  /* nothing to do */
-  virtual ~ZoneAllocator()  {}
-
-  virtual void* New(size_t size)  { return ZONE->New(static_cast<int>(size)); }
-
-  /* ignored - Zone is freed in one fell swoop */
-  virtual void Delete(void* p)  {}
-};
-
-
-static ZoneAllocator LocalsMapAllocator;
-
-
-// ----------------------------------------------------------------------------
 // Implementation of LocalsMap
 //
 // Note: We are storing the handle locations as key values in the hash map.
@@ -76,30 +57,36 @@
 }
 
 
-// Dummy constructor
-VariableMap::VariableMap(bool gotta_love_static_overloading) : HashMap() {}
-
-VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
+VariableMap::VariableMap() : ZoneHashMap(Match, 8) {}
 VariableMap::~VariableMap() {}
 
 
-Variable* VariableMap::Declare(Scope* scope,
-                               Handle<String> name,
-                               Variable::Mode mode,
-                               bool is_valid_lhs,
-                               Variable::Kind kind) {
-  HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
+Variable* VariableMap::Declare(
+    Scope* scope,
+    Handle<String> name,
+    VariableMode mode,
+    bool is_valid_lhs,
+    Variable::Kind kind,
+    InitializationFlag initialization_flag,
+    Interface* interface) {
+  Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true);
   if (p->value == NULL) {
     // The variable has not been declared yet -> insert it.
     ASSERT(p->key == name.location());
-    p->value = new Variable(scope, name, mode, is_valid_lhs, kind);
+    p->value = new Variable(scope,
+                            name,
+                            mode,
+                            is_valid_lhs,
+                            kind,
+                            initialization_flag,
+                            interface);
   }
   return reinterpret_cast<Variable*>(p->value);
 }
 
 
 Variable* VariableMap::Lookup(Handle<String> name) {
-  HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), false);
+  Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false);
   if (p != NULL) {
     ASSERT(*reinterpret_cast<String**>(p->key) == *name);
     ASSERT(p->value != NULL);
@@ -112,22 +99,7 @@
 // ----------------------------------------------------------------------------
 // Implementation of Scope
 
-
-// Dummy constructor
-Scope::Scope(Type type)
-    : isolate_(Isolate::Current()),
-      inner_scopes_(0),
-      variables_(false),
-      temps_(0),
-      params_(0),
-      unresolved_(0),
-      decls_(0),
-      already_resolved_(false) {
-  SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
-}
-
-
-Scope::Scope(Scope* outer_scope, Type type)
+Scope::Scope(Scope* outer_scope, ScopeType type)
     : isolate_(Isolate::Current()),
       inner_scopes_(4),
       variables_(),
@@ -135,19 +107,22 @@
       params_(4),
       unresolved_(16),
       decls_(4),
+      interface_(FLAG_harmony_modules &&
+                 (type == MODULE_SCOPE || type == GLOBAL_SCOPE)
+                     ? Interface::NewModule() : NULL),
       already_resolved_(false) {
-  SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
+  SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
   // At some point we might want to provide outer scopes to
   // eval scopes (by walking the stack and reading the scope info).
   // In that case, the ASSERT below needs to be adjusted.
-  ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
+  ASSERT_EQ(type == GLOBAL_SCOPE, outer_scope == NULL);
   ASSERT(!HasIllegalRedeclaration());
 }
 
 
 Scope::Scope(Scope* inner_scope,
-             Type type,
-             Handle<SerializedScopeInfo> scope_info)
+             ScopeType type,
+             Handle<ScopeInfo> scope_info)
     : isolate_(Isolate::Current()),
       inner_scopes_(4),
       variables_(),
@@ -155,12 +130,15 @@
       params_(4),
       unresolved_(16),
       decls_(4),
+      interface_(NULL),
       already_resolved_(true) {
-  ASSERT(!scope_info.is_null());
   SetDefaults(type, NULL, scope_info);
-  if (scope_info->HasHeapAllocatedLocals()) {
-    num_heap_slots_ = scope_info_->NumberOfContextSlots();
+  if (!scope_info.is_null()) {
+    num_heap_slots_ = scope_info_->ContextLength();
   }
+  // Ensure at least MIN_CONTEXT_SLOTS to indicate a materialized context.
+  num_heap_slots_ = Max(num_heap_slots_,
+                        static_cast<int>(Context::MIN_CONTEXT_SLOTS));
   AddInnerScope(inner_scope);
 }
 
@@ -173,22 +151,25 @@
       params_(0),
       unresolved_(0),
       decls_(0),
+      interface_(NULL),
       already_resolved_(true) {
-  SetDefaults(CATCH_SCOPE, NULL, Handle<SerializedScopeInfo>::null());
+  SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
   AddInnerScope(inner_scope);
   ++num_var_or_const_;
+  num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
   Variable* variable = variables_.Declare(this,
                                           catch_variable_name,
-                                          Variable::VAR,
+                                          VAR,
                                           true,  // Valid left-hand side.
-                                          Variable::NORMAL);
+                                          Variable::NORMAL,
+                                          kCreatedInitialized);
   AllocateHeapSlot(variable);
 }
 
 
-void Scope::SetDefaults(Type type,
+void Scope::SetDefaults(ScopeType type,
                         Scope* outer_scope,
-                        Handle<SerializedScopeInfo> scope_info) {
+                        Handle<ScopeInfo> scope_info) {
   outer_scope_ = outer_scope;
   type_ = type;
   scope_name_ = isolate_->factory()->empty_symbol();
@@ -201,53 +182,57 @@
   scope_contains_with_ = false;
   scope_calls_eval_ = false;
   // Inherit the strict mode from the parent scope.
-  strict_mode_ = (outer_scope != NULL) && outer_scope->strict_mode_;
-  outer_scope_calls_eval_ = false;
+  language_mode_ = (outer_scope != NULL)
+      ? outer_scope->language_mode_ : CLASSIC_MODE;
   outer_scope_calls_non_strict_eval_ = false;
   inner_scope_calls_eval_ = false;
-  outer_scope_is_eval_scope_ = false;
   force_eager_compilation_ = false;
   num_var_or_const_ = 0;
   num_stack_slots_ = 0;
   num_heap_slots_ = 0;
   scope_info_ = scope_info;
+  start_position_ = RelocInfo::kNoPosition;
+  end_position_ = RelocInfo::kNoPosition;
+  if (!scope_info.is_null()) {
+    scope_calls_eval_ = scope_info->CallsEval();
+    language_mode_ = scope_info->language_mode();
+  }
 }
 
 
-Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
-                                    Scope* global_scope) {
+Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope) {
   // Reconstruct the outer scope chain from a closure's context chain.
-  ASSERT(!info->closure().is_null());
-  Context* context = info->closure()->context();
   Scope* current_scope = NULL;
   Scope* innermost_scope = NULL;
   bool contains_with = false;
   while (!context->IsGlobalContext()) {
     if (context->IsWithContext()) {
+      Scope* with_scope = new Scope(current_scope,
+                                    WITH_SCOPE,
+                                    Handle<ScopeInfo>::null());
+      current_scope = with_scope;
       // All the inner scopes are inside a with.
       contains_with = true;
       for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
         s->scope_inside_with_ = true;
       }
+    } else if (context->IsFunctionContext()) {
+      ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+      current_scope = new Scope(current_scope,
+                                FUNCTION_SCOPE,
+                                Handle<ScopeInfo>(scope_info));
+    } else if (context->IsBlockContext()) {
+      ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
+      current_scope = new Scope(current_scope,
+                                BLOCK_SCOPE,
+                                Handle<ScopeInfo>(scope_info));
     } else {
-      if (context->IsFunctionContext()) {
-        SerializedScopeInfo* scope_info =
-            context->closure()->shared()->scope_info();
-        current_scope = new Scope(current_scope, FUNCTION_SCOPE,
-            Handle<SerializedScopeInfo>(scope_info));
-      } else if (context->IsBlockContext()) {
-        SerializedScopeInfo* scope_info =
-            SerializedScopeInfo::cast(context->extension());
-        current_scope = new Scope(current_scope, BLOCK_SCOPE,
-            Handle<SerializedScopeInfo>(scope_info));
-      } else {
-        ASSERT(context->IsCatchContext());
-        String* name = String::cast(context->extension());
-        current_scope = new Scope(current_scope, Handle<String>(name));
-      }
-      if (contains_with) current_scope->RecordWithStatement();
-      if (innermost_scope == NULL) innermost_scope = current_scope;
+      ASSERT(context->IsCatchContext());
+      String* name = String::cast(context->extension());
+      current_scope = new Scope(current_scope, Handle<String>(name));
     }
+    if (contains_with) current_scope->RecordWithStatement();
+    if (innermost_scope == NULL) innermost_scope = current_scope;
 
     // Forget about a with when we move to a context for a different function.
     if (context->previous()->closure() != context->closure()) {
@@ -257,39 +242,73 @@
   }
 
   global_scope->AddInnerScope(current_scope);
+  global_scope->PropagateScopeInfo(false);
   return (innermost_scope == NULL) ? global_scope : innermost_scope;
 }
 
 
 bool Scope::Analyze(CompilationInfo* info) {
   ASSERT(info->function() != NULL);
-  Scope* top = info->function()->scope();
+  Scope* scope = info->function()->scope();
+  Scope* top = scope;
 
-  while (top->outer_scope() != NULL) top = top->outer_scope();
-  top->AllocateVariables(info->calling_context());
+  // Traverse the scope tree up to the first unresolved scope or the global
+  // scope and start scope resolution and variable allocation from that scope.
+  while (!top->is_global_scope() &&
+         !top->outer_scope()->already_resolved()) {
+    top = top->outer_scope();
+  }
+
+  // Allocate the variables.
+  {
+    AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate());
+    if (!top->AllocateVariables(info, &ast_node_factory)) return false;
+  }
 
 #ifdef DEBUG
   if (info->isolate()->bootstrapper()->IsActive()
           ? FLAG_print_builtin_scopes
           : FLAG_print_scopes) {
-    info->function()->scope()->Print();
+    scope->Print();
+  }
+
+  if (FLAG_harmony_modules && FLAG_print_interfaces && top->is_global_scope()) {
+    PrintF("global : ");
+    top->interface()->Print();
   }
 #endif
 
-  info->SetScope(info->function()->scope());
-  return true;  // Can not fail.
+  if (FLAG_harmony_scoping) {
+    VariableProxy* proxy = scope->CheckAssignmentToConst();
+    if (proxy != NULL) {
+      // Found an assignment to const. Throw a syntax error.
+      MessageLocation location(info->script(),
+                               proxy->position(),
+                               proxy->position());
+      Isolate* isolate = info->isolate();
+      Factory* factory = isolate->factory();
+      Handle<JSArray> array = factory->NewJSArray(0);
+      Handle<Object> result =
+          factory->NewSyntaxError("harmony_const_assign", array);
+      isolate->Throw(*result, &location);
+      return false;
+    }
+  }
+
+  info->SetScope(scope);
+  return true;
 }
 
 
-void Scope::Initialize(bool inside_with) {
+void Scope::Initialize() {
   ASSERT(!already_resolved());
 
   // Add this scope as a new inner scope of the outer scope.
   if (outer_scope_ != NULL) {
     outer_scope_->inner_scopes_.Add(this);
-    scope_inside_with_ = outer_scope_->scope_inside_with_ || inside_with;
+    scope_inside_with_ = outer_scope_->scope_inside_with_ || is_with_scope();
   } else {
-    scope_inside_with_ = inside_with;
+    scope_inside_with_ = is_with_scope();
   }
 
   // Declare convenience variables.
@@ -300,21 +319,19 @@
   // instead load them directly from the stack. Currently, the only
   // such parameter is 'this' which is passed on the stack when
   // invoking scripts
-  if (is_catch_scope() || is_block_scope()) {
-    ASSERT(outer_scope() != NULL);
-    receiver_ = outer_scope()->receiver();
-  } else {
-    ASSERT(is_function_scope() ||
-           is_global_scope() ||
-           is_eval_scope());
+  if (is_declaration_scope()) {
     Variable* var =
         variables_.Declare(this,
                            isolate_->factory()->this_symbol(),
-                           Variable::VAR,
+                           VAR,
                            false,
-                           Variable::THIS);
+                           Variable::THIS,
+                           kCreatedInitialized);
     var->AllocateTo(Variable::PARAMETER, -1);
     receiver_ = var;
+  } else {
+    ASSERT(outer_scope() != NULL);
+    receiver_ = outer_scope()->receiver();
   }
 
   if (is_function_scope()) {
@@ -323,9 +340,10 @@
     // allocated during variable allocation.
     variables_.Declare(this,
                        isolate_->factory()->arguments_symbol(),
-                       Variable::VAR,
+                       VAR,
                        true,
-                       Variable::ARGUMENTS);
+                       Variable::ARGUMENTS,
+                       kCreatedInitialized);
   }
 }
 
@@ -365,34 +383,51 @@
     return result;
   }
   // If we have a serialized scope info, we might find the variable there.
-  //
-  // We should never lookup 'arguments' in this scope as it is implicitly
-  // present in every scope.
-  ASSERT(*name != *isolate_->factory()->arguments_symbol());
   // There should be no local slot with the given name.
   ASSERT(scope_info_->StackSlotIndex(*name) < 0);
 
   // Check context slot lookup.
-  Variable::Mode mode;
-  int index = scope_info_->ContextSlotIndex(*name, &mode);
+  VariableMode mode;
+  InitializationFlag init_flag;
+  int index = scope_info_->ContextSlotIndex(*name, &mode, &init_flag);
   if (index < 0) {
     // Check parameters.
-    mode = Variable::VAR;
+    mode = VAR;
+    init_flag = kCreatedInitialized;
     index = scope_info_->ParameterIndex(*name);
-    if (index < 0) {
-      // Check the function name.
-      index = scope_info_->FunctionContextSlotIndex(*name);
-      if (index < 0) return NULL;
-    }
+    if (index < 0) return NULL;
   }
 
   Variable* var =
-      variables_.Declare(this, name, mode, true, Variable::NORMAL);
+      variables_.Declare(this,
+                         name,
+                         mode,
+                         true,
+                         Variable::NORMAL,
+                         init_flag);
   var->AllocateTo(Variable::CONTEXT, index);
   return var;
 }
 
 
+Variable* Scope::LookupFunctionVar(Handle<String> name,
+                                   AstNodeFactory<AstNullVisitor>* factory) {
+  if (function_ != NULL && function_->name().is_identical_to(name)) {
+    return function_->var();
+  } else if (!scope_info_.is_null()) {
+    // If we are backed by a scope info, try to lookup the variable there.
+    VariableMode mode;
+    int index = scope_info_->FunctionContextSlotIndex(*name, &mode);
+    if (index < 0) return NULL;
+    Variable* var = DeclareFunctionVar(name, mode, factory);
+    var->AllocateTo(Variable::CONTEXT, index);
+    return var;
+  } else {
+    return NULL;
+  }
+}
+
+
 Variable* Scope::Lookup(Handle<String> name) {
   for (Scope* scope = this;
        scope != NULL;
@@ -404,56 +439,41 @@
 }
 
 
-Variable* Scope::DeclareFunctionVar(Handle<String> name) {
-  ASSERT(is_function_scope() && function_ == NULL);
-  Variable* function_var =
-      new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
-  function_ = new(isolate_->zone()) VariableProxy(isolate_, function_var);
-  return function_var;
-}
-
-
-void Scope::DeclareParameter(Handle<String> name, Variable::Mode mode) {
+void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
   ASSERT(!already_resolved());
   ASSERT(is_function_scope());
-  Variable* var =
-      variables_.Declare(this, name, mode, true, Variable::NORMAL);
+  Variable* var = variables_.Declare(
+      this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
   params_.Add(var);
 }
 
 
-Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
+Variable* Scope::DeclareLocal(Handle<String> name,
+                              VariableMode mode,
+                              InitializationFlag init_flag,
+                              Interface* interface) {
   ASSERT(!already_resolved());
   // This function handles VAR and CONST modes.  DYNAMIC variables are
   // introduces during variable allocation, INTERNAL variables are allocated
   // explicitly, and TEMPORARY variables are allocated via NewTemporary().
-  ASSERT(mode == Variable::VAR ||
-         mode == Variable::CONST ||
-         mode == Variable::LET);
+  ASSERT(mode == VAR ||
+         mode == CONST ||
+         mode == CONST_HARMONY ||
+         mode == LET);
   ++num_var_or_const_;
-  return variables_.Declare(this, name, mode, true, Variable::NORMAL);
+  return variables_.Declare(
+      this, name, mode, true, Variable::NORMAL, init_flag, interface);
 }
 
 
 Variable* Scope::DeclareGlobal(Handle<String> name) {
   ASSERT(is_global_scope());
-  return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL,
+  return variables_.Declare(this,
+                            name,
+                            DYNAMIC_GLOBAL,
                             true,
-                            Variable::NORMAL);
-}
-
-
-VariableProxy* Scope::NewUnresolved(Handle<String> name,
-                                    bool inside_with,
-                                    int position) {
-  // Note that we must not share the unresolved variables with
-  // the same name because they may be removed selectively via
-  // RemoveUnresolved().
-  ASSERT(!already_resolved());
-  VariableProxy* proxy = new(isolate_->zone()) VariableProxy(
-      isolate_, name, false, inside_with, position);
-  unresolved_.Add(proxy);
-  return proxy;
+                            Variable::NORMAL,
+                            kCreatedInitialized);
 }
 
 
@@ -473,9 +493,10 @@
   ASSERT(!already_resolved());
   Variable* var = new Variable(this,
                                name,
-                               Variable::TEMPORARY,
+                               TEMPORARY,
                                true,
-                               Variable::NORMAL);
+                               Variable::NORMAL,
+                               kCreatedInitialized);
   temps_.Add(var);
   return var;
 }
@@ -505,84 +526,97 @@
   int length = decls_.length();
   for (int i = 0; i < length; i++) {
     Declaration* decl = decls_[i];
-    if (decl->mode() != Variable::VAR) continue;
+    if (decl->mode() != VAR) continue;
     Handle<String> name = decl->proxy()->name();
-    bool cond = true;
-    for (Scope* scope = decl->scope(); cond ; scope = scope->outer_scope_) {
+
+    // Iterate through all scopes until and including the declaration scope.
+    Scope* previous = NULL;
+    Scope* current = decl->scope();
+    do {
       // There is a conflict if there exists a non-VAR binding.
-      Variable* other_var = scope->variables_.Lookup(name);
-      if (other_var != NULL && other_var->mode() != Variable::VAR) {
+      Variable* other_var = current->variables_.Lookup(name);
+      if (other_var != NULL && other_var->mode() != VAR) {
         return decl;
       }
-
-      // Include declaration scope in the iteration but stop after.
-      if (!scope->is_block_scope() && !scope->is_catch_scope()) cond = false;
-    }
+      previous = current;
+      current = current->outer_scope_;
+    } while (!previous->is_declaration_scope());
   }
   return NULL;
 }
 
 
-template<class Allocator>
-void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
-  // Collect variables in this scope.
-  // Note that the function_ variable - if present - is not
-  // collected here but handled separately in ScopeInfo
-  // which is the current user of this function).
+VariableProxy* Scope::CheckAssignmentToConst() {
+  // Check this scope.
+  if (is_extended_mode()) {
+    for (int i = 0; i < unresolved_.length(); i++) {
+      ASSERT(unresolved_[i]->var() != NULL);
+      if (unresolved_[i]->var()->is_const_mode() &&
+          unresolved_[i]->IsLValue()) {
+        return unresolved_[i];
+      }
+    }
+  }
+
+  // Check inner scopes.
+  for (int i = 0; i < inner_scopes_.length(); i++) {
+    VariableProxy* proxy = inner_scopes_[i]->CheckAssignmentToConst();
+    if (proxy != NULL) return proxy;
+  }
+
+  // No assignments to const found.
+  return NULL;
+}
+
+
+void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+                                         ZoneList<Variable*>* context_locals) {
+  ASSERT(stack_locals != NULL);
+  ASSERT(context_locals != NULL);
+
+  // Collect temporaries which are always allocated on the stack.
   for (int i = 0; i < temps_.length(); i++) {
     Variable* var = temps_[i];
     if (var->is_used()) {
-      locals->Add(var);
+      ASSERT(var->IsStackLocal());
+      stack_locals->Add(var);
     }
   }
+
+  // Collect declared local variables.
   for (VariableMap::Entry* p = variables_.Start();
        p != NULL;
        p = variables_.Next(p)) {
     Variable* var = reinterpret_cast<Variable*>(p->value);
     if (var->is_used()) {
-      locals->Add(var);
+      if (var->IsStackLocal()) {
+        stack_locals->Add(var);
+      } else if (var->IsContextSlot()) {
+        context_locals->Add(var);
+      }
     }
   }
 }
 
 
-// Make sure the method gets instantiated by the template system.
-template void Scope::CollectUsedVariables(
-    List<Variable*, FreeStoreAllocationPolicy>* locals);
-template void Scope::CollectUsedVariables(
-    List<Variable*, PreallocatedStorage>* locals);
-template void Scope::CollectUsedVariables(
-    List<Variable*, ZoneListAllocationPolicy>* locals);
-
-
-void Scope::AllocateVariables(Handle<Context> context) {
-  ASSERT(outer_scope_ == NULL);  // eval or global scopes only
-
+bool Scope::AllocateVariables(CompilationInfo* info,
+                              AstNodeFactory<AstNullVisitor>* factory) {
   // 1) Propagate scope information.
-  // If we are in an eval scope, we may have other outer scopes about
-  // which we don't know anything at this point. Thus we must be conservative
-  // and assume they may invoke eval themselves. Eventually we could capture
-  // this information in the ScopeInfo and then use it here (by traversing
-  // the call chain stack, at compile time).
-
-  bool eval_scope = is_eval_scope();
-  bool outer_scope_calls_eval = false;
   bool outer_scope_calls_non_strict_eval = false;
-  if (!is_global_scope()) {
-    context->ComputeEvalScopeInfo(&outer_scope_calls_eval,
-                                  &outer_scope_calls_non_strict_eval);
+  if (outer_scope_ != NULL) {
+    outer_scope_calls_non_strict_eval =
+        outer_scope_->outer_scope_calls_non_strict_eval() |
+        outer_scope_->calls_non_strict_eval();
   }
-  PropagateScopeInfo(outer_scope_calls_eval,
-                     outer_scope_calls_non_strict_eval,
-                     eval_scope);
+  PropagateScopeInfo(outer_scope_calls_non_strict_eval);
 
   // 2) Resolve variables.
-  Scope* global_scope = NULL;
-  if (is_global_scope()) global_scope = this;
-  ResolveVariablesRecursively(global_scope, context);
+  if (!ResolveVariablesRecursively(info, factory)) return false;
 
   // 3) Allocate variables.
   AllocateVariablesRecursively();
+
+  return true;
 }
 
 
@@ -627,30 +661,49 @@
 
 Scope* Scope::DeclarationScope() {
   Scope* scope = this;
-  while (scope->is_catch_scope() ||
-         scope->is_block_scope()) {
+  while (!scope->is_declaration_scope()) {
     scope = scope->outer_scope();
   }
   return scope;
 }
 
 
-Handle<SerializedScopeInfo> Scope::GetSerializedScopeInfo() {
+Handle<ScopeInfo> Scope::GetScopeInfo() {
   if (scope_info_.is_null()) {
-    scope_info_ = SerializedScopeInfo::Create(this);
+    scope_info_ = ScopeInfo::Create(this);
   }
   return scope_info_;
 }
 
 
+void Scope::GetNestedScopeChain(
+    List<Handle<ScopeInfo> >* chain,
+    int position) {
+  if (!is_eval_scope()) chain->Add(Handle<ScopeInfo>(GetScopeInfo()));
+
+  for (int i = 0; i < inner_scopes_.length(); i++) {
+    Scope* scope = inner_scopes_[i];
+    int beg_pos = scope->start_position();
+    int end_pos = scope->end_position();
+    ASSERT(beg_pos >= 0 && end_pos >= 0);
+    if (beg_pos <= position && position < end_pos) {
+      scope->GetNestedScopeChain(chain, position);
+      return;
+    }
+  }
+}
+
+
 #ifdef DEBUG
-static const char* Header(Scope::Type type) {
+static const char* Header(ScopeType type) {
   switch (type) {
-    case Scope::EVAL_SCOPE: return "eval";
-    case Scope::FUNCTION_SCOPE: return "function";
-    case Scope::GLOBAL_SCOPE: return "global";
-    case Scope::CATCH_SCOPE: return "catch";
-    case Scope::BLOCK_SCOPE: return "block";
+    case EVAL_SCOPE: return "eval";
+    case FUNCTION_SCOPE: return "function";
+    case MODULE_SCOPE: return "module";
+    case GLOBAL_SCOPE: return "global";
+    case CATCH_SCOPE: return "catch";
+    case BLOCK_SCOPE: return "block";
+    case WITH_SCOPE: return "with";
   }
   UNREACHABLE();
   return NULL;
@@ -695,9 +748,9 @@
     PrintName(var->name());
     PrintF(";  // ");
     PrintLocation(var);
-    if (var->is_accessed_from_inner_scope()) {
+    if (var->has_forced_context_allocation()) {
       if (!var->IsUnallocated()) PrintF(", ");
-      PrintF("inner scope access");
+      PrintF("forced context allocation");
     }
     PrintF("\n");
   }
@@ -733,7 +786,7 @@
     PrintF(")");
   }
 
-  PrintF(" {\n");
+  PrintF(" { // (%d, %d)\n", start_position(), end_position());
 
   // Function name, if any (named function literals, only).
   if (function_ != NULL) {
@@ -746,18 +799,23 @@
   if (HasTrivialOuterContext()) {
     Indent(n1, "// scope has trivial outer context\n");
   }
-  if (is_strict_mode()) Indent(n1, "// strict mode scope\n");
+  switch (language_mode()) {
+    case CLASSIC_MODE:
+      break;
+    case STRICT_MODE:
+      Indent(n1, "// strict mode scope\n");
+      break;
+    case EXTENDED_MODE:
+      Indent(n1, "// extended mode scope\n");
+      break;
+  }
   if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
   if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
   if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
-  if (outer_scope_calls_eval_) Indent(n1, "// outer scope calls 'eval'\n");
   if (outer_scope_calls_non_strict_eval_) {
     Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
   }
   if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
-  if (outer_scope_is_eval_scope_) {
-    Indent(n1, "// outer scope is 'eval' scope\n");
-  }
   if (num_stack_slots_ > 0) { Indent(n1, "// ");
   PrintF("%d stack slots\n", num_stack_slots_); }
   if (num_heap_slots_ > 0) { Indent(n1, "// ");
@@ -779,9 +837,9 @@
 
   Indent(n1, "// dynamic vars\n");
   if (dynamics_ != NULL) {
-    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC));
-    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL));
-    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL));
+    PrintMap(n1, dynamics_->GetMap(DYNAMIC));
+    PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
+    PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
   }
 
   // Print inner scopes (disable by providing negative n).
@@ -797,13 +855,20 @@
 #endif  // DEBUG
 
 
-Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
+Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) {
   if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
   VariableMap* map = dynamics_->GetMap(mode);
   Variable* var = map->Lookup(name);
   if (var == NULL) {
     // Declare a new non-local.
-    var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
+    InitializationFlag init_flag = (mode == VAR)
+        ? kCreatedInitialized : kNeedsInitialization;
+    var = map->Declare(NULL,
+                       name,
+                       mode,
+                       true,
+                       Variable::NORMAL,
+                       init_flag);
     // Allocate it by giving it a dynamic lookup.
     var->AllocateTo(Variable::LOOKUP, -1);
   }
@@ -811,198 +876,178 @@
 }
 
 
-// Lookup a variable starting with this scope. The result is either
-// the statically resolved variable belonging to an outer scope, or
-// NULL. It may be NULL because a) we couldn't find a variable, or b)
-// because the variable is just a guess (and may be shadowed by
-// another variable that is introduced dynamically via an 'eval' call
-// or a 'with' statement).
 Variable* Scope::LookupRecursive(Handle<String> name,
-                                 bool from_inner_scope,
-                                 Variable** invalidated_local) {
-  // If we find a variable, but the current scope calls 'eval', the found
-  // variable may not be the correct one (the 'eval' may introduce a
-  // property with the same name). In that case, remember that the variable
-  // found is just a guess.
-  bool guess = scope_calls_eval_;
-
+                                 BindingKind* binding_kind,
+                                 AstNodeFactory<AstNullVisitor>* factory) {
+  ASSERT(binding_kind != NULL);
   // Try to find the variable in this scope.
   Variable* var = LocalLookup(name);
 
+  // We found a variable and we are done. (Even if there is an 'eval' in
+  // this scope which introduces the same variable again, the resulting
+  // variable remains the same.)
   if (var != NULL) {
-    // We found a variable. If this is not an inner lookup, we are done.
-    // (Even if there is an 'eval' in this scope which introduces the
-    // same variable again, the resulting variable remains the same.
-    // Note that enclosing 'with' statements are handled at the call site.)
-    if (!from_inner_scope)
-      return var;
+    *binding_kind = BOUND;
+    return var;
+  }
 
-  } else {
-    // We did not find a variable locally. Check against the function variable,
-    // if any. We can do this for all scopes, since the function variable is
-    // only present - if at all - for function scopes.
-    //
-    // This lookup corresponds to a lookup in the "intermediate" scope sitting
-    // between this scope and the outer scope. (ECMA-262, 3rd., requires that
-    // the name of named function literal is kept in an intermediate scope
-    // in between this scope and the next outer scope.)
-    if (function_ != NULL && function_->name().is_identical_to(name)) {
-      var = function_->var();
-
-    } else if (outer_scope_ != NULL) {
-      var = outer_scope_->LookupRecursive(name, true, invalidated_local);
-      // We may have found a variable in an outer scope. However, if
-      // the current scope is inside a 'with', the actual variable may
-      // be a property introduced via the 'with' statement. Then, the
-      // variable we may have found is just a guess.
-      if (scope_inside_with_)
-        guess = true;
+  // We did not find a variable locally. Check against the function variable,
+  // if any. We can do this for all scopes, since the function variable is
+  // only present - if at all - for function scopes.
+  *binding_kind = UNBOUND;
+  var = LookupFunctionVar(name, factory);
+  if (var != NULL) {
+    *binding_kind = BOUND;
+  } else if (outer_scope_ != NULL) {
+    var = outer_scope_->LookupRecursive(name, binding_kind, factory);
+    if (*binding_kind == BOUND && (is_function_scope() || is_with_scope())) {
+      var->ForceContextAllocation();
     }
-
-    // If we did not find a variable, we are done.
-    if (var == NULL)
-      return NULL;
+  } else {
+    ASSERT(is_global_scope());
   }
 
-  ASSERT(var != NULL);
-
-  // If this is a lookup from an inner scope, mark the variable.
-  if (from_inner_scope) {
-    var->MarkAsAccessedFromInnerScope();
+  if (is_with_scope()) {
+    // The current scope is a with scope, so the variable binding can not be
+    // statically resolved. However, note that it was necessary to do a lookup
+    // in the outer scope anyway, because if a binding exists in an outer scope,
+    // the associated variable has to be marked as potentially being accessed
+    // from inside of an inner with scope (the property may not be in the 'with'
+    // object).
+    *binding_kind = DYNAMIC_LOOKUP;
+    return NULL;
+  } else if (calls_non_strict_eval()) {
+    // A variable binding may have been found in an outer scope, but the current
+    // scope makes a non-strict 'eval' call, so the found variable may not be
+    // the correct one (the 'eval' may introduce a binding with the same name).
+    // In that case, change the lookup result to reflect this situation.
+    if (*binding_kind == BOUND) {
+      *binding_kind = BOUND_EVAL_SHADOWED;
+    } else if (*binding_kind == UNBOUND) {
+      *binding_kind = UNBOUND_EVAL_SHADOWED;
+    }
   }
-
-  // If the variable we have found is just a guess, invalidate the
-  // result. If the found variable is local, record that fact so we
-  // can generate fast code to get it if it is not shadowed by eval.
-  if (guess) {
-    if (!var->is_global()) *invalidated_local = var;
-    var = NULL;
-  }
-
   return var;
 }
 
 
-void Scope::ResolveVariable(Scope* global_scope,
-                            Handle<Context> context,
-                            VariableProxy* proxy) {
-  ASSERT(global_scope == NULL || global_scope->is_global_scope());
+bool Scope::ResolveVariable(CompilationInfo* info,
+                            VariableProxy* proxy,
+                            AstNodeFactory<AstNullVisitor>* factory) {
+  ASSERT(info->global_scope()->is_global_scope());
 
   // If the proxy is already resolved there's nothing to do
   // (functions and consts may be resolved by the parser).
-  if (proxy->var() != NULL) return;
+  if (proxy->var() != NULL) return true;
 
   // Otherwise, try to resolve the variable.
-  Variable* invalidated_local = NULL;
-  Variable* var = LookupRecursive(proxy->name(), false, &invalidated_local);
+  BindingKind binding_kind;
+  Variable* var = LookupRecursive(proxy->name(), &binding_kind, factory);
+  switch (binding_kind) {
+    case BOUND:
+      // We found a variable binding.
+      break;
 
-  if (proxy->inside_with()) {
-    // If we are inside a local 'with' statement, all bets are off
-    // and we cannot resolve the proxy to a local variable even if
-    // we found an outer matching variable.
-    // Note that we must do a lookup anyway, because if we find one,
-    // we must mark that variable as potentially accessed from this
-    // inner scope (the property may not be in the 'with' object).
-    var = NonLocal(proxy->name(), Variable::DYNAMIC);
-
-  } else {
-    // We are not inside a local 'with' statement.
-
-    if (var == NULL) {
-      // We did not find the variable. We have a global variable
-      // if we are in the global scope (we know already that we
-      // are outside a 'with' statement) or if there is no way
-      // that the variable might be introduced dynamically (through
-      // a local or outer eval() call, or an outer 'with' statement),
-      // or we don't know about the outer scope (because we are
-      // in an eval scope).
-      if (is_global_scope() ||
-          !(scope_inside_with_ || outer_scope_is_eval_scope_ ||
-            scope_calls_eval_ || outer_scope_calls_eval_)) {
-        // We must have a global variable.
-        ASSERT(global_scope != NULL);
-        var = global_scope->DeclareGlobal(proxy->name());
-
-      } else if (scope_inside_with_) {
-        // If we are inside a with statement we give up and look up
-        // the variable at runtime.
-        var = NonLocal(proxy->name(), Variable::DYNAMIC);
-
-      } else if (invalidated_local != NULL) {
-        // No with statements are involved and we found a local
-        // variable that might be shadowed by eval introduced
-        // variables.
-        var = NonLocal(proxy->name(), Variable::DYNAMIC_LOCAL);
-        var->set_local_if_not_shadowed(invalidated_local);
-
-      } else if (outer_scope_is_eval_scope_) {
-        // No with statements and we did not find a local and the code
-        // is executed with a call to eval.  The context contains
-        // scope information that we can use to determine if the
-        // variable is global if it is not shadowed by eval-introduced
-        // variables.
-        if (context->GlobalIfNotShadowedByEval(proxy->name())) {
-          var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
-
-        } else {
-          var = NonLocal(proxy->name(), Variable::DYNAMIC);
-        }
-
+    case BOUND_EVAL_SHADOWED:
+      // We found a variable variable binding that might be shadowed
+      // by 'eval' introduced variable bindings.
+      if (var->is_global()) {
+        var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
       } else {
-        // No with statements and we did not find a local and the code
-        // is not executed with a call to eval.  We know that this
-        // variable is global unless it is shadowed by eval-introduced
-        // variables.
-        var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
+        Variable* invalidated = var;
+        var = NonLocal(proxy->name(), DYNAMIC_LOCAL);
+        var->set_local_if_not_shadowed(invalidated);
       }
+      break;
+
+    case UNBOUND:
+      // No binding has been found. Declare a variable in global scope.
+      var = info->global_scope()->DeclareGlobal(proxy->name());
+      break;
+
+    case UNBOUND_EVAL_SHADOWED:
+      // No binding has been found. But some scope makes a
+      // non-strict 'eval' call.
+      var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
+      break;
+
+    case DYNAMIC_LOOKUP:
+      // The variable could not be resolved statically.
+      var = NonLocal(proxy->name(), DYNAMIC);
+      break;
+  }
+
+  ASSERT(var != NULL);
+  proxy->BindTo(var);
+
+  if (FLAG_harmony_modules) {
+    bool ok;
+#ifdef DEBUG
+    if (FLAG_print_interface_details)
+      PrintF("# Resolve %s:\n", var->name()->ToAsciiArray());
+#endif
+    proxy->interface()->Unify(var->interface(), &ok);
+    if (!ok) {
+#ifdef DEBUG
+      if (FLAG_print_interfaces) {
+        PrintF("SCOPES TYPE ERROR\n");
+        PrintF("proxy: ");
+        proxy->interface()->Print();
+        PrintF("var: ");
+        var->interface()->Print();
+      }
+#endif
+
+      // Inconsistent use of module. Throw a syntax error.
+      // TODO(rossberg): generate more helpful error message.
+      MessageLocation location(info->script(),
+                               proxy->position(),
+                               proxy->position());
+      Isolate* isolate = Isolate::Current();
+      Factory* factory = isolate->factory();
+      Handle<JSArray> array = factory->NewJSArray(1);
+      USE(JSObject::SetElement(array, 0, var->name(), NONE, kStrictMode));
+      Handle<Object> result =
+          factory->NewSyntaxError("module_type_error", array);
+      isolate->Throw(*result, &location);
+      return false;
     }
   }
 
-  proxy->BindTo(var);
+  return true;
 }
 
 
-void Scope::ResolveVariablesRecursively(Scope* global_scope,
-                                        Handle<Context> context) {
-  ASSERT(global_scope == NULL || global_scope->is_global_scope());
+bool Scope::ResolveVariablesRecursively(
+    CompilationInfo* info,
+    AstNodeFactory<AstNullVisitor>* factory) {
+  ASSERT(info->global_scope()->is_global_scope());
 
   // Resolve unresolved variables for this scope.
   for (int i = 0; i < unresolved_.length(); i++) {
-    ResolveVariable(global_scope, context, unresolved_[i]);
+    if (!ResolveVariable(info, unresolved_[i], factory)) return false;
   }
 
   // Resolve unresolved variables for inner scopes.
   for (int i = 0; i < inner_scopes_.length(); i++) {
-    inner_scopes_[i]->ResolveVariablesRecursively(global_scope, context);
+    if (!inner_scopes_[i]->ResolveVariablesRecursively(info, factory))
+      return false;
   }
+
+  return true;
 }
 
 
-bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval,
-                               bool outer_scope_calls_non_strict_eval,
-                               bool outer_scope_is_eval_scope) {
-  if (outer_scope_calls_eval) {
-    outer_scope_calls_eval_ = true;
-  }
-
+bool Scope::PropagateScopeInfo(bool outer_scope_calls_non_strict_eval ) {
   if (outer_scope_calls_non_strict_eval) {
     outer_scope_calls_non_strict_eval_ = true;
   }
 
-  if (outer_scope_is_eval_scope) {
-    outer_scope_is_eval_scope_ = true;
-  }
-
-  bool calls_eval = scope_calls_eval_ || outer_scope_calls_eval_;
-  bool is_eval = is_eval_scope() || outer_scope_is_eval_scope_;
   bool calls_non_strict_eval =
-      (scope_calls_eval_ && !is_strict_mode()) ||
-      outer_scope_calls_non_strict_eval_;
+      this->calls_non_strict_eval() || outer_scope_calls_non_strict_eval_;
   for (int i = 0; i < inner_scopes_.length(); i++) {
     Scope* inner_scope = inner_scopes_[i];
-    if (inner_scope->PropagateScopeInfo(calls_eval,
-                                        calls_non_strict_eval,
-                                        is_eval)) {
+    if (inner_scope->PropagateScopeInfo(calls_non_strict_eval)) {
       inner_scope_calls_eval_ = true;
     }
     if (inner_scope->force_eager_compilation_) {
@@ -1019,7 +1064,7 @@
   // via an eval() call.  This is only possible if the variable has a
   // visible name.
   if ((var->is_this() || var->name()->length() > 0) &&
-      (var->is_accessed_from_inner_scope() ||
+      (var->has_forced_context_allocation() ||
        scope_calls_eval_ ||
        inner_scope_calls_eval_ ||
        scope_contains_with_ ||
@@ -1040,9 +1085,9 @@
   //
   // Exceptions: temporary variables are never allocated in a context;
   // catch-bound variables are always allocated in a context.
-  if (var->mode() == Variable::TEMPORARY) return false;
+  if (var->mode() == TEMPORARY) return false;
   if (is_catch_scope() || is_block_scope()) return true;
-  return var->is_accessed_from_inner_scope() ||
+  return var->has_forced_context_allocation() ||
       scope_calls_eval_ ||
       inner_scope_calls_eval_ ||
       scope_contains_with_ ||
@@ -1095,7 +1140,7 @@
     // In strict mode 'arguments' does not alias formal parameters.
     // Therefore in strict mode we allocate parameters as if 'arguments'
     // were not used.
-    uses_nonstrict_arguments = !is_strict_mode();
+    uses_nonstrict_arguments = is_classic_mode();
   }
 
   // The same parameter may occur multiple times in the parameters_ list.
@@ -1106,9 +1151,8 @@
     Variable* var = params_[i];
     ASSERT(var->scope() == this);
     if (uses_nonstrict_arguments) {
-      // Give the parameter a use from an inner scope, to force allocation
-      // to the context.
-      var->MarkAsAccessedFromInnerScope();
+      // Force context allocation of the parameter.
+      var->ForceContextAllocation();
     }
 
     if (MustAllocate(var)) {
@@ -1183,21 +1227,15 @@
   if (is_function_scope()) AllocateParameterLocals();
   AllocateNonParameterLocals();
 
-  // Allocate context if necessary.
-  bool must_have_local_context = false;
-  if (scope_calls_eval_ || scope_contains_with_) {
-    // The context for the eval() call or 'with' statement in this scope.
-    // Unless we are in the global or an eval scope, we need a local
-    // context even if we didn't statically allocate any locals in it,
-    // and the compiler will access the context variable. If we are
-    // not in an inner scope, the scope is provided from the outside.
-    must_have_local_context = is_function_scope();
-  }
+  // Force allocation of a context for this scope if necessary. For a 'with'
+  // scope and for a function scope that makes an 'eval' call we need a context,
+  // even if no local variables were statically allocated in the scope.
+  bool must_have_context = is_with_scope() ||
+      (is_function_scope() && calls_eval());
 
   // If we didn't allocate any locals in the local context, then we only
-  // need the minimal number of slots if we must have a local context.
-  if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS &&
-      !must_have_local_context) {
+  // need the minimal number of slots if we must have a context.
+  if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS && !must_have_context) {
     num_heap_slots_ = 0;
   }
 
@@ -1205,4 +1243,17 @@
   ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
 }
 
+
+int Scope::StackLocalCount() const {
+  return num_stack_slots() -
+      (function_ != NULL && function_->var()->IsStackLocal() ? 1 : 0);
+}
+
+
+int Scope::ContextLocalCount() const {
+  if (num_heap_slots() == 0) return 0;
+  return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
+      (function_ != NULL && function_->var()->IsContextSlot() ? 1 : 0);
+}
+
 } }  // namespace v8::internal
diff --git a/src/scopes.h b/src/scopes.h
index 2917a63..d315b7e 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 #define V8_SCOPES_H_
 
 #include "ast.h"
-#include "hashmap.h"
+#include "zone.h"
 
 namespace v8 {
 namespace internal {
@@ -38,21 +38,19 @@
 
 
 // A hash map to support fast variable declaration and lookup.
-class VariableMap: public HashMap {
+class VariableMap: public ZoneHashMap {
  public:
   VariableMap();
 
-  // Dummy constructor.  This constructor doesn't set up the map
-  // properly so don't use it unless you have a good reason.
-  explicit VariableMap(bool gotta_love_static_overloading);
-
   virtual ~VariableMap();
 
   Variable* Declare(Scope* scope,
                     Handle<String> name,
-                    Variable::Mode mode,
+                    VariableMode mode,
                     bool is_valid_lhs,
-                    Variable::Kind kind);
+                    Variable::Kind kind,
+                    InitializationFlag initialization_flag,
+                    Interface* interface = Interface::NewValue());
 
   Variable* Lookup(Handle<String> name);
 };
@@ -64,8 +62,8 @@
 // and setup time for scopes that don't need them.
 class DynamicScopePart : public ZoneObject {
  public:
-  VariableMap* GetMap(Variable::Mode mode) {
-    int index = mode - Variable::DYNAMIC;
+  VariableMap* GetMap(VariableMode mode) {
+    int index = mode - DYNAMIC;
     ASSERT(index >= 0 && index < 3);
     return &maps_[index];
   }
@@ -89,28 +87,19 @@
   // ---------------------------------------------------------------------------
   // Construction
 
-  enum Type {
-    EVAL_SCOPE,      // The top-level scope for an eval source.
-    FUNCTION_SCOPE,  // The top-level scope for a function.
-    GLOBAL_SCOPE,    // The top-level scope for a program or a top-level eval.
-    CATCH_SCOPE,     // The scope introduced by catch.
-    BLOCK_SCOPE      // The scope introduced by a new block.
-  };
-
-  Scope(Scope* outer_scope, Type type);
+  Scope(Scope* outer_scope, ScopeType type);
 
   // Compute top scope and allocate variables. For lazy compilation the top
   // scope only contains the single lazily compiled function, so this
   // doesn't re-allocate variables repeatedly.
   static bool Analyze(CompilationInfo* info);
 
-  static Scope* DeserializeScopeChain(CompilationInfo* info,
-                                      Scope* innermost_scope);
+  static Scope* DeserializeScopeChain(Context* context, Scope* global_scope);
 
   // The scope name is only used for printing/debugging.
   void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
 
-  void Initialize(bool inside_with);
+  void Initialize();
 
   // Checks if the block scope is redundant, i.e. it does not contain any
   // block scoped declarations. In that case it is removed from the scope
@@ -123,6 +112,13 @@
   // Lookup a variable in this scope. Returns the variable or NULL if not found.
   Variable* LocalLookup(Handle<String> name);
 
+  // This lookup corresponds to a lookup in the "intermediate" scope sitting
+  // between this scope and the outer scope. (ECMA-262, 3rd., requires that
+  // the name of named function literal is kept in an intermediate scope
+  // in between this scope and the next outer scope.)
+  Variable* LookupFunctionVar(Handle<String> name,
+                              AstNodeFactory<AstNullVisitor>* factory);
+
   // Lookup a variable in this scope or outer scopes.
   // Returns the variable or NULL if not found.
   Variable* Lookup(Handle<String> name);
@@ -130,16 +126,28 @@
   // Declare the function variable for a function literal. This variable
   // is in an intermediate scope between this function scope and the the
   // outer scope. Only possible for function scopes; at most one variable.
-  Variable* DeclareFunctionVar(Handle<String> name);
+  template<class Visitor>
+  Variable* DeclareFunctionVar(Handle<String> name,
+                               VariableMode mode,
+                               AstNodeFactory<Visitor>* factory) {
+    ASSERT(is_function_scope() && function_ == NULL);
+    Variable* function_var = new Variable(
+        this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
+    function_ = factory->NewVariableProxy(function_var);
+    return function_var;
+  }
 
   // Declare a parameter in this scope.  When there are duplicated
   // parameters the rightmost one 'wins'.  However, the implementation
   // expects all parameters to be declared and from left to right.
-  void DeclareParameter(Handle<String> name, Variable::Mode mode);
+  void DeclareParameter(Handle<String> name, VariableMode mode);
 
   // Declare a local variable in this scope. If the variable has been
   // declared before, the previously declared variable is returned.
-  Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
+  Variable* DeclareLocal(Handle<String> name,
+                         VariableMode mode,
+                         InitializationFlag init_flag,
+                         Interface* interface = Interface::NewValue());
 
   // Declare an implicit global variable in this scope which must be a
   // global scope.  The variable was introduced (possibly from an inner
@@ -148,9 +156,20 @@
   Variable* DeclareGlobal(Handle<String> name);
 
   // Create a new unresolved variable.
-  VariableProxy* NewUnresolved(Handle<String> name,
-                               bool inside_with,
-                               int position = RelocInfo::kNoPosition);
+  template<class Visitor>
+  VariableProxy* NewUnresolved(AstNodeFactory<Visitor>* factory,
+                               Handle<String> name,
+                               int position = RelocInfo::kNoPosition,
+                               Interface* interface = Interface::NewValue()) {
+    // Note that we must not share the unresolved variables with
+    // the same name because they may be removed selectively via
+    // RemoveUnresolved().
+    ASSERT(!already_resolved());
+    VariableProxy* proxy =
+        factory->NewVariableProxy(name, false, position, interface);
+    unresolved_.Add(proxy);
+    return proxy;
+  }
 
   // Remove a unresolved variable. During parsing, an unresolved variable
   // may have been added optimistically, but then only the variable name
@@ -192,6 +211,11 @@
   // scope over a let binding of the same name.
   Declaration* CheckConflictingVarDeclarations();
 
+  // For harmony block scoping mode: Check if the scope has variable proxies
+  // that are used as lvalues and point to const variables. Assumes that scopes
+  // have been analyzed and variables been resolved.
+  VariableProxy* CheckAssignmentToConst();
+
   // ---------------------------------------------------------------------------
   // Scope-specific info.
 
@@ -199,11 +223,42 @@
   void RecordWithStatement() { scope_contains_with_ = true; }
 
   // Inform the scope that the corresponding code contains an eval call.
-  void RecordEvalCall() { scope_calls_eval_ = true; }
+  void RecordEvalCall() { if (!is_global_scope()) scope_calls_eval_ = true; }
 
-  // Enable strict mode for the scope (unless disabled by a global flag).
-  void EnableStrictMode() {
-    strict_mode_ = FLAG_strict_mode;
+  // Set the strict mode flag (unless disabled by a global flag).
+  void SetLanguageMode(LanguageMode language_mode) {
+    language_mode_ = language_mode;
+  }
+
+  // Position in the source where this scope begins and ends.
+  //
+  // * For the scope of a with statement
+  //     with (obj) stmt
+  //   start position: start position of first token of 'stmt'
+  //   end position: end position of last token of 'stmt'
+  // * For the scope of a block
+  //     { stmts }
+  //   start position: start position of '{'
+  //   end position: end position of '}'
+  // * For the scope of a function literal or decalaration
+  //     function fun(a,b) { stmts }
+  //   start position: start position of '('
+  //   end position: end position of '}'
+  // * For the scope of a catch block
+  //     try { stms } catch(e) { stmts }
+  //   start position: start position of '('
+  //   end position: end position of ')'
+  // * For the scope of a for-statement
+  //     for (let x ...) stmt
+  //   start position: start position of '('
+  //   end position: end position of last token of 'stmt'
+  int start_position() const { return start_position_; }
+  void set_start_position(int statement_pos) {
+    start_position_ = statement_pos;
+  }
+  int end_position() const { return end_position_; }
+  void set_end_position(int statement_pos) {
+    end_position_ = statement_pos;
   }
 
   // ---------------------------------------------------------------------------
@@ -212,17 +267,29 @@
   // Specific scope types.
   bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
   bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
+  bool is_module_scope() const { return type_ == MODULE_SCOPE; }
   bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
   bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
   bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
-  bool is_strict_mode() const { return strict_mode_; }
-  bool is_strict_mode_eval_scope() const {
-    return is_eval_scope() && is_strict_mode();
+  bool is_with_scope() const { return type_ == WITH_SCOPE; }
+  bool is_declaration_scope() const {
+    return is_eval_scope() || is_function_scope() || is_global_scope();
+  }
+  bool is_classic_mode() const {
+    return language_mode() == CLASSIC_MODE;
+  }
+  bool is_extended_mode() const {
+    return language_mode() == EXTENDED_MODE;
+  }
+  bool is_strict_or_extended_eval_scope() const {
+    return is_eval_scope() && !is_classic_mode();
   }
 
   // Information about which scopes calls eval.
   bool calls_eval() const { return scope_calls_eval_; }
-  bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
+  bool calls_non_strict_eval() {
+    return scope_calls_eval_ && is_classic_mode();
+  }
   bool outer_scope_calls_non_strict_eval() const {
     return outer_scope_calls_non_strict_eval_;
   }
@@ -232,12 +299,15 @@
   // Does this scope contain a with statement.
   bool contains_with() const { return scope_contains_with_; }
 
-  // The scope immediately surrounding this scope, or NULL.
-  Scope* outer_scope() const { return outer_scope_; }
-
   // ---------------------------------------------------------------------------
   // Accessors.
 
+  // The type of this scope.
+  ScopeType type() const { return type_; }
+
+  // The language mode of this scope.
+  LanguageMode language_mode() const { return language_mode_; }
+
   // The variable corresponding the 'this' value.
   Variable* receiver() { return receiver_; }
 
@@ -264,23 +334,23 @@
   // Declarations list.
   ZoneList<Declaration*>* declarations() { return &decls_; }
 
+  // Inner scope list.
+  ZoneList<Scope*>* inner_scopes() { return &inner_scopes_; }
+
+  // The scope immediately surrounding this scope, or NULL.
+  Scope* outer_scope() const { return outer_scope_; }
+
+  // The interface as inferred so far; only for module scopes.
+  Interface* interface() const { return interface_; }
 
   // ---------------------------------------------------------------------------
   // Variable allocation.
 
-  // Collect all used locals in this scope.
-  template<class Allocator>
-  void CollectUsedVariables(List<Variable*, Allocator>* locals);
-
-  // Resolve and fill in the allocation information for all variables
-  // in this scopes. Must be called *after* all scopes have been
-  // processed (parsed) to ensure that unresolved variables can be
-  // resolved properly.
-  //
-  // In the case of code compiled and run using 'eval', the context
-  // parameter is the context in which eval was called.  In all other
-  // cases the context parameter is an empty handle.
-  void AllocateVariables(Handle<Context> context);
+  // Collect stack and context allocated local variables in this scope. Note
+  // that the function variable - if present - is not collected and should be
+  // handled separately.
+  void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+                                    ZoneList<Variable*>* context_locals);
 
   // Current number of var or const locals.
   int num_var_or_const() { return num_var_or_const_; }
@@ -289,12 +359,20 @@
   int num_stack_slots() const { return num_stack_slots_; }
   int num_heap_slots() const { return num_heap_slots_; }
 
+  int StackLocalCount() const;
+  int ContextLocalCount() const;
+
   // Make sure this scope and all outer scopes are eagerly compiled.
   void ForceEagerCompilation()  { force_eager_compilation_ = true; }
 
   // Determine if we can use lazy compilation for this scope.
   bool AllowsLazyCompilation() const;
 
+  // True if we can lazily recompile functions with this scope.
+  bool allows_lazy_recompilation() const {
+    return !force_eager_compilation_;
+  }
+
   // True if the outer context of this scope is always the global context.
   bool HasTrivialOuterContext() const;
 
@@ -305,7 +383,14 @@
   // where var declarations will be hoisted to in the implementation.
   Scope* DeclarationScope();
 
-  Handle<SerializedScopeInfo> GetSerializedScopeInfo();
+  Handle<ScopeInfo> GetScopeInfo();
+
+  // Get the chain of nested scopes within this scope for the source statement
+  // position. The scopes will be added to the list from the outermost scope to
+  // the innermost scope. Only nested block, catch or with scopes are tracked
+  // and will be returned, but no inner function scopes.
+  void GetNestedScopeChain(List<Handle<ScopeInfo> >* chain,
+                           int statement_position);
 
   // ---------------------------------------------------------------------------
   // Strict mode support.
@@ -330,8 +415,6 @@
  protected:
   friend class ParserFactory;
 
-  explicit Scope(Type type);
-
   Isolate* const isolate_;
 
   // Scope tree.
@@ -339,7 +422,7 @@
   ZoneList<Scope*> inner_scopes_;  // the immediately enclosed inner scopes
 
   // The scope type.
-  Type type_;
+  ScopeType type_;
 
   // Debugging support.
   Handle<String> scope_name_;
@@ -366,6 +449,8 @@
   VariableProxy* function_;
   // Convenience variable; function scopes only.
   Variable* arguments_;
+  // Interface; module scopes only.
+  Interface* interface_;
 
   // Illegal redeclaration.
   Expression* illegal_redecl_;
@@ -379,14 +464,15 @@
   // This scope or a nested catch scope or with scope contain an 'eval' call. At
   // the 'eval' call site this scope is the declaration scope.
   bool scope_calls_eval_;
-  // This scope is a strict mode scope.
-  bool strict_mode_;
+  // The language mode of this scope.
+  LanguageMode language_mode_;
+  // Source positions.
+  int start_position_;
+  int end_position_;
 
   // Computed via PropagateScopeInfo.
-  bool outer_scope_calls_eval_;
   bool outer_scope_calls_non_strict_eval_;
   bool inner_scope_calls_eval_;
-  bool outer_scope_is_eval_scope_;
   bool force_eager_compilation_;
 
   // True if it doesn't need scope resolution (e.g., if the scope was
@@ -396,32 +482,80 @@
   // Computed as variables are declared.
   int num_var_or_const_;
 
-  // Computed via AllocateVariables; function scopes only.
+  // Computed via AllocateVariables; function, block and catch scopes only.
   int num_stack_slots_;
   int num_heap_slots_;
 
-  // Serialized scopes support.
-  Handle<SerializedScopeInfo> scope_info_;
+  // Serialized scope info support.
+  Handle<ScopeInfo> scope_info_;
   bool already_resolved() { return already_resolved_; }
 
   // Create a non-local variable with a given name.
   // These variables are looked up dynamically at runtime.
-  Variable* NonLocal(Handle<String> name, Variable::Mode mode);
+  Variable* NonLocal(Handle<String> name, VariableMode mode);
 
   // Variable resolution.
+  // Possible results of a recursive variable lookup telling if and how a
+  // variable is bound. These are returned in the output parameter *binding_kind
+  // of the LookupRecursive function.
+  enum BindingKind {
+    // The variable reference could be statically resolved to a variable binding
+    // which is returned. There is no 'with' statement between the reference and
+    // the binding and no scope between the reference scope (inclusive) and
+    // binding scope (exclusive) makes a non-strict 'eval' call.
+    BOUND,
+
+    // The variable reference could be statically resolved to a variable binding
+    // which is returned. There is no 'with' statement between the reference and
+    // the binding, but some scope between the reference scope (inclusive) and
+    // binding scope (exclusive) makes a non-strict 'eval' call, that might
+    // possibly introduce variable bindings shadowing the found one. Thus the
+    // found variable binding is just a guess.
+    BOUND_EVAL_SHADOWED,
+
+    // The variable reference could not be statically resolved to any binding
+    // and thus should be considered referencing a global variable. NULL is
+    // returned. The variable reference is not inside any 'with' statement and
+    // no scope between the reference scope (inclusive) and global scope
+    // (exclusive) makes a non-strict 'eval' call.
+    UNBOUND,
+
+    // The variable reference could not be statically resolved to any binding
+    // NULL is returned. The variable reference is not inside any 'with'
+    // statement, but some scope between the reference scope (inclusive) and
+    // global scope (exclusive) makes a non-strict 'eval' call, that might
+    // possibly introduce a variable binding. Thus the reference should be
+    // considered referencing a global variable unless it is shadowed by an
+    // 'eval' introduced binding.
+    UNBOUND_EVAL_SHADOWED,
+
+    // The variable could not be statically resolved and needs to be looked up
+    // dynamically. NULL is returned. There are two possible reasons:
+    // * A 'with' statement has been encountered and there is no variable
+    //   binding for the name between the variable reference and the 'with'.
+    //   The variable potentially references a property of the 'with' object.
+    // * The code is being executed as part of a call to 'eval' and the calling
+    //   context chain contains either a variable binding for the name or it
+    //   contains a 'with' context.
+    DYNAMIC_LOOKUP
+  };
+
+  // Lookup a variable reference given by name recursively starting with this
+  // scope. If the code is executed because of a call to 'eval', the context
+  // parameter should be set to the calling context of 'eval'.
   Variable* LookupRecursive(Handle<String> name,
-                            bool from_inner_function,
-                            Variable** invalidated_local);
-  void ResolveVariable(Scope* global_scope,
-                       Handle<Context> context,
-                       VariableProxy* proxy);
-  void ResolveVariablesRecursively(Scope* global_scope,
-                                   Handle<Context> context);
+                            BindingKind* binding_kind,
+                            AstNodeFactory<AstNullVisitor>* factory);
+  MUST_USE_RESULT
+  bool ResolveVariable(CompilationInfo* info,
+                       VariableProxy* proxy,
+                       AstNodeFactory<AstNullVisitor>* factory);
+  MUST_USE_RESULT
+  bool ResolveVariablesRecursively(CompilationInfo* info,
+                                   AstNodeFactory<AstNullVisitor>* factory);
 
   // Scope analysis.
-  bool PropagateScopeInfo(bool outer_scope_calls_eval,
-                          bool outer_scope_calls_non_strict_eval,
-                          bool outer_scope_is_eval_scope);
+  bool PropagateScopeInfo(bool outer_scope_calls_non_strict_eval);
   bool HasTrivialContext() const;
 
   // Predicates.
@@ -437,9 +571,21 @@
   void AllocateNonParameterLocals();
   void AllocateVariablesRecursively();
 
+  // Resolve and fill in the allocation information for all variables
+  // in this scopes. Must be called *after* all scopes have been
+  // processed (parsed) to ensure that unresolved variables can be
+  // resolved properly.
+  //
+  // In the case of code compiled and run using 'eval', the context
+  // parameter is the context in which eval was called.  In all other
+  // cases the context parameter is an empty handle.
+  MUST_USE_RESULT
+  bool AllocateVariables(CompilationInfo* info,
+                         AstNodeFactory<AstNullVisitor>* factory);
+
  private:
-  // Construct a function or block scope based on the scope info.
-  Scope(Scope* inner_scope, Type type, Handle<SerializedScopeInfo> scope_info);
+  // Construct a scope based on the scope info.
+  Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info);
 
   // Construct a catch scope with a binding for the name.
   Scope(Scope* inner_scope, Handle<String> catch_variable_name);
@@ -451,9 +597,9 @@
     }
   }
 
-  void SetDefaults(Type type,
+  void SetDefaults(ScopeType type,
                    Scope* outer_scope,
-                   Handle<SerializedScopeInfo> scope_info);
+                   Handle<ScopeInfo> scope_info);
 };
 
 } }  // namespace v8::internal
diff --git a/src/serialize.cc b/src/serialize.cc
index ecb480a..01d5f1c 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -273,14 +273,22 @@
       STUB_CACHE_TABLE,
       2,
       "StubCache::primary_->value");
-  Add(stub_cache->key_reference(StubCache::kSecondary).address(),
+  Add(stub_cache->map_reference(StubCache::kPrimary).address(),
       STUB_CACHE_TABLE,
       3,
+      "StubCache::primary_->map");
+  Add(stub_cache->key_reference(StubCache::kSecondary).address(),
+      STUB_CACHE_TABLE,
+      4,
       "StubCache::secondary_->key");
   Add(stub_cache->value_reference(StubCache::kSecondary).address(),
       STUB_CACHE_TABLE,
-      4,
+      5,
       "StubCache::secondary_->value");
+  Add(stub_cache->map_reference(StubCache::kSecondary).address(),
+      STUB_CACHE_TABLE,
+      6,
+      "StubCache::secondary_->map");
 
   // Runtime entries
   Add(ExternalReference::perform_gc_function(isolate).address(),
@@ -300,16 +308,28 @@
       RUNTIME_ENTRY,
       4,
       "HandleScope::DeleteExtensions");
+  Add(ExternalReference::
+          incremental_marking_record_write_function(isolate).address(),
+      RUNTIME_ENTRY,
+      5,
+      "IncrementalMarking::RecordWrite");
+  Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
+      RUNTIME_ENTRY,
+      6,
+      "StoreBuffer::StoreBufferOverflow");
+  Add(ExternalReference::
+          incremental_evacuation_record_write_function(isolate).address(),
+      RUNTIME_ENTRY,
+      7,
+      "IncrementalMarking::RecordWrite");
+
+
 
   // Miscellaneous
-  Add(ExternalReference::the_hole_value_location(isolate).address(),
-      UNCLASSIFIED,
-      2,
-      "Factory::the_hole_value().location()");
-  Add(ExternalReference::roots_address(isolate).address(),
+  Add(ExternalReference::roots_array_start(isolate).address(),
       UNCLASSIFIED,
       3,
-      "Heap::roots_address()");
+      "Heap::roots_array_start()");
   Add(ExternalReference::address_of_stack_limit(isolate).address(),
       UNCLASSIFIED,
       4,
@@ -351,129 +371,145 @@
       "Heap::always_allocate_scope_depth()");
   Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
       UNCLASSIFIED,
-      13,
+      14,
       "Heap::NewSpaceAllocationLimitAddress()");
   Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
       UNCLASSIFIED,
-      14,
+      15,
       "Heap::NewSpaceAllocationTopAddress()");
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Add(ExternalReference::debug_break(isolate).address(),
       UNCLASSIFIED,
-      15,
+      16,
       "Debug::Break()");
   Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
       UNCLASSIFIED,
-      16,
+      17,
       "Debug::step_in_fp_addr()");
 #endif
   Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
       UNCLASSIFIED,
-      17,
+      18,
       "add_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
       UNCLASSIFIED,
-      18,
+      19,
       "sub_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
       UNCLASSIFIED,
-      19,
+      20,
       "mul_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
       UNCLASSIFIED,
-      20,
+      21,
       "div_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
       UNCLASSIFIED,
-      21,
+      22,
       "mod_two_doubles");
   Add(ExternalReference::compare_doubles(isolate).address(),
       UNCLASSIFIED,
-      22,
+      23,
       "compare_doubles");
 #ifndef V8_INTERPRETED_REGEXP
   Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
       UNCLASSIFIED,
-      23,
+      24,
       "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
   Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
       UNCLASSIFIED,
-      24,
+      25,
       "RegExpMacroAssembler*::CheckStackGuardState()");
   Add(ExternalReference::re_grow_stack(isolate).address(),
       UNCLASSIFIED,
-      25,
+      26,
       "NativeRegExpMacroAssembler::GrowStack()");
   Add(ExternalReference::re_word_character_map().address(),
       UNCLASSIFIED,
-      26,
+      27,
       "NativeRegExpMacroAssembler::word_character_map");
 #endif  // V8_INTERPRETED_REGEXP
   // Keyed lookup cache.
   Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
       UNCLASSIFIED,
-      27,
+      28,
       "KeyedLookupCache::keys()");
   Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
       UNCLASSIFIED,
-      28,
+      29,
       "KeyedLookupCache::field_offsets()");
   Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
       UNCLASSIFIED,
-      29,
+      30,
       "TranscendentalCache::caches()");
   Add(ExternalReference::handle_scope_next_address().address(),
       UNCLASSIFIED,
-      30,
+      31,
       "HandleScope::next");
   Add(ExternalReference::handle_scope_limit_address().address(),
       UNCLASSIFIED,
-      31,
+      32,
       "HandleScope::limit");
   Add(ExternalReference::handle_scope_level_address().address(),
       UNCLASSIFIED,
-      32,
+      33,
       "HandleScope::level");
   Add(ExternalReference::new_deoptimizer_function(isolate).address(),
       UNCLASSIFIED,
-      33,
+      34,
       "Deoptimizer::New()");
   Add(ExternalReference::compute_output_frames_function(isolate).address(),
       UNCLASSIFIED,
-      34,
+      35,
       "Deoptimizer::ComputeOutputFrames()");
   Add(ExternalReference::address_of_min_int().address(),
       UNCLASSIFIED,
-      35,
+      36,
       "LDoubleConstant::min_int");
   Add(ExternalReference::address_of_one_half().address(),
       UNCLASSIFIED,
-      36,
+      37,
       "LDoubleConstant::one_half");
   Add(ExternalReference::isolate_address().address(),
       UNCLASSIFIED,
-      37,
+      38,
       "isolate");
   Add(ExternalReference::address_of_minus_zero().address(),
       UNCLASSIFIED,
-      38,
+      39,
       "LDoubleConstant::minus_zero");
   Add(ExternalReference::address_of_negative_infinity().address(),
       UNCLASSIFIED,
-      39,
+      40,
       "LDoubleConstant::negative_infinity");
   Add(ExternalReference::power_double_double_function(isolate).address(),
       UNCLASSIFIED,
-      40,
+      41,
       "power_double_double_function");
   Add(ExternalReference::power_double_int_function(isolate).address(),
       UNCLASSIFIED,
-      41,
-      "power_double_int_function");
-  Add(ExternalReference::arguments_marker_location(isolate).address(),
-      UNCLASSIFIED,
       42,
-      "Factory::arguments_marker().location()");
+      "power_double_int_function");
+  Add(ExternalReference::store_buffer_top(isolate).address(),
+      UNCLASSIFIED,
+      43,
+      "store_buffer_top");
+  Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
+      UNCLASSIFIED,
+      44,
+      "canonical_nan");
+  Add(ExternalReference::address_of_the_hole_nan().address(),
+      UNCLASSIFIED,
+      45,
+      "the_hole_nan");
+  Add(ExternalReference::get_date_field_function(isolate).address(),
+      UNCLASSIFIED,
+      46,
+      "JSDate::GetField");
+  Add(ExternalReference::date_cache_stamp(isolate).address(),
+      UNCLASSIFIED,
+      47,
+      "date_cache_stamp");
 }
 
 
@@ -569,6 +605,7 @@
       maybe_new_allocation =
           reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
     }
+    ASSERT(!maybe_new_allocation->IsFailure());
     Object* new_allocation = maybe_new_allocation->ToObjectUnchecked();
     HeapObject* new_object = HeapObject::cast(new_allocation);
     address = new_object->address();
@@ -577,14 +614,13 @@
     ASSERT(SpaceIsLarge(space_index));
     LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
     Object* new_allocation;
-    if (space_index == kLargeData) {
-      new_allocation = lo_space->AllocateRaw(size)->ToObjectUnchecked();
-    } else if (space_index == kLargeFixedArray) {
+    if (space_index == kLargeData || space_index == kLargeFixedArray) {
       new_allocation =
-          lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked();
+          lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked();
     } else {
       ASSERT_EQ(kLargeCode, space_index);
-      new_allocation = lo_space->AllocateRawCode(size)->ToObjectUnchecked();
+      new_allocation =
+          lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked();
     }
     HeapObject* new_object = HeapObject::cast(new_allocation);
     // Record all large objects in the same space.
@@ -629,6 +665,7 @@
 
 void Deserializer::Deserialize() {
   isolate_ = Isolate::Current();
+  ASSERT(isolate_ != NULL);
   // Don't GC while deserializing - just expand the heap.
   AlwaysAllocateScope always_allocate;
   // Don't use the free lists while deserializing.
@@ -648,6 +685,14 @@
 
   isolate_->heap()->set_global_contexts_list(
       isolate_->heap()->undefined_value());
+
+  // Update data pointers to the external strings containing natives sources.
+  for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+    Object* source = isolate_->heap()->natives_source_cache()->get(i);
+    if (!source->IsUndefined()) {
+      ExternalAsciiString::cast(source)->update_data_cache();
+    }
+  }
 }
 
 
@@ -685,9 +730,8 @@
 // This routine writes the new object into the pointer provided and then
 // returns true if the new object was in young space and false otherwise.
 // The reason for this strange interface is that otherwise the object is
-// written very late, which means the ByteArray map is not set up by the
-// time we need to use it to mark the space at the end of a page free (by
-// making it into a byte array).
+// written very late, which means the FreeSpace map is not set up by the
+// time we need to use it to mark the space at the end of a page free.
 void Deserializer::ReadObject(int space_number,
                               Space* space,
                               Object** write_back) {
@@ -737,8 +781,13 @@
 void Deserializer::ReadChunk(Object** current,
                              Object** limit,
                              int source_space,
-                             Address address) {
+                             Address current_object_address) {
   Isolate* const isolate = isolate_;
+  bool write_barrier_needed = (current_object_address != NULL &&
+                               source_space != NEW_SPACE &&
+                               source_space != CELL_SPACE &&
+                               source_space != CODE_SPACE &&
+                               source_space != OLD_DATA_SPACE);
   while (current < limit) {
     int data = source_->Get();
     switch (data) {
@@ -758,8 +807,7 @@
         if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
           ASSIGN_DEST_SPACE(space_number)                                      \
           ReadObject(space_number, dest_space, current);                       \
-          emit_write_barrier =                                                 \
-            (space_number == NEW_SPACE && source_space != NEW_SPACE);          \
+          emit_write_barrier = (space_number == NEW_SPACE);                    \
         } else {                                                               \
           Object* new_object = NULL;  /* May not be a real Object pointer. */  \
           if (where == kNewObject) {                                           \
@@ -767,25 +815,25 @@
             ReadObject(space_number, dest_space, &new_object);                 \
           } else if (where == kRootArray) {                                    \
             int root_id = source_->GetInt();                                   \
-            new_object = isolate->heap()->roots_address()[root_id];            \
+            new_object = isolate->heap()->roots_array_start()[root_id];        \
+            emit_write_barrier = isolate->heap()->InNewSpace(new_object);      \
           } else if (where == kPartialSnapshotCache) {                         \
             int cache_index = source_->GetInt();                               \
             new_object = isolate->serialize_partial_snapshot_cache()           \
                 [cache_index];                                                 \
+            emit_write_barrier = isolate->heap()->InNewSpace(new_object);      \
           } else if (where == kExternalReference) {                            \
             int reference_id = source_->GetInt();                              \
             Address address = external_reference_decoder_->                    \
                 Decode(reference_id);                                          \
             new_object = reinterpret_cast<Object*>(address);                   \
           } else if (where == kBackref) {                                      \
-            emit_write_barrier =                                               \
-              (space_number == NEW_SPACE && source_space != NEW_SPACE);        \
+            emit_write_barrier = (space_number == NEW_SPACE);                  \
             new_object = GetAddressFromEnd(data & kSpaceMask);                 \
           } else {                                                             \
             ASSERT(where == kFromStart);                                       \
             if (offset_from_start == kUnknownOffsetFromStart) {                \
-              emit_write_barrier =                                             \
-                (space_number == NEW_SPACE && source_space != NEW_SPACE);      \
+              emit_write_barrier = (space_number == NEW_SPACE);                \
               new_object = GetAddressFromStart(data & kSpaceMask);             \
             } else {                                                           \
               Address object_address = pages_[space_number][0] +               \
@@ -801,23 +849,24 @@
           if (how == kFromCode) {                                              \
             Address location_of_branch_data =                                  \
                 reinterpret_cast<Address>(current);                            \
-            Assembler::set_target_at(location_of_branch_data,                  \
-                                     reinterpret_cast<Address>(new_object));   \
-            if (within == kFirstInstruction) {                                 \
-              location_of_branch_data += Assembler::kCallTargetSize;           \
-              current = reinterpret_cast<Object**>(location_of_branch_data);   \
-              current_was_incremented = true;                                  \
-            }                                                                  \
+            Assembler::deserialization_set_special_target_at(                  \
+                location_of_branch_data,                                       \
+                reinterpret_cast<Address>(new_object));                        \
+            location_of_branch_data += Assembler::kSpecialTargetSize;          \
+            current = reinterpret_cast<Object**>(location_of_branch_data);     \
+            current_was_incremented = true;                                    \
           } else {                                                             \
             *current = new_object;                                             \
           }                                                                    \
         }                                                                      \
-        if (emit_write_barrier) {                                              \
-          isolate->heap()->RecordWrite(address, static_cast<int>(              \
-              reinterpret_cast<Address>(current) - address));                  \
+        if (emit_write_barrier && write_barrier_needed) {                      \
+          Address current_address = reinterpret_cast<Address>(current);        \
+          isolate->heap()->RecordWrite(                                        \
+              current_object_address,                                          \
+              static_cast<int>(current_address - current_object_address));     \
         }                                                                      \
         if (!current_was_incremented) {                                        \
-          current++;   /* Increment current if it wasn't done above. */        \
+          current++;                                                           \
         }                                                                      \
         break;                                                                 \
       }                                                                        \
@@ -864,11 +913,17 @@
   CASE_STATEMENT(where, how, within, kLargeCode)                               \
   CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart)
 
-#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number,                    \
-                                       space_number,                           \
-                                       offset_from_start)                      \
-  CASE_STATEMENT(kFromStart, kPlain, kStartOfObject, pseudo_space_number)      \
-  CASE_BODY(kFromStart, kPlain, kStartOfObject, space_number, offset_from_start)
+#define FOUR_CASES(byte_code)             \
+  case byte_code:                         \
+  case byte_code + 1:                     \
+  case byte_code + 2:                     \
+  case byte_code + 3:
+
+#define SIXTEEN_CASES(byte_code)          \
+  FOUR_CASES(byte_code)                   \
+  FOUR_CASES(byte_code + 4)               \
+  FOUR_CASES(byte_code + 8)               \
+  FOUR_CASES(byte_code + 12)
 
       // We generate 15 cases and bodies that process special tags that combine
       // the raw data tag and the length into one byte.
@@ -892,6 +947,38 @@
         break;
       }
 
+      SIXTEEN_CASES(kRootArrayLowConstants)
+      SIXTEEN_CASES(kRootArrayHighConstants) {
+        int root_id = RootArrayConstantFromByteCode(data);
+        Object* object = isolate->heap()->roots_array_start()[root_id];
+        ASSERT(!isolate->heap()->InNewSpace(object));
+        *current++ = object;
+        break;
+      }
+
+      case kRepeat: {
+        int repeats = source_->GetInt();
+        Object* object = current[-1];
+        ASSERT(!isolate->heap()->InNewSpace(object));
+        for (int i = 0; i < repeats; i++) current[i] = object;
+        current += repeats;
+        break;
+      }
+
+      STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
+                    Heap::kOldSpaceRoots);
+      STATIC_ASSERT(kMaxRepeats == 12);
+      FOUR_CASES(kConstantRepeat)
+      FOUR_CASES(kConstantRepeat + 4)
+      FOUR_CASES(kConstantRepeat + 8) {
+        int repeats = RepeatsForCode(data);
+        Object* object = current[-1];
+        ASSERT(!isolate->heap()->InNewSpace(object));
+        for (int i = 0; i < repeats; i++) current[i] = object;
+        current += repeats;
+        break;
+      }
+
       // Deserialize a new object and write a pointer to it to the current
       // object.
       ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
@@ -903,6 +990,21 @@
       // Find a recently deserialized object using its offset from the current
       // allocation point and write a pointer to it to the current object.
       ALL_SPACES(kBackref, kPlain, kStartOfObject)
+#if V8_TARGET_ARCH_MIPS
+      // Deserialize a new object from pointer found in code and write
+      // a pointer to it to the current object. Required only for MIPS, and
+      // omitted on the other architectures because it is fully unrolled and
+      // would cause bloat.
+      ONE_PER_SPACE(kNewObject, kFromCode, kStartOfObject)
+      // Find a recently deserialized code object using its offset from the
+      // current allocation point and write a pointer to it to the current
+      // object. Required only for MIPS.
+      ALL_SPACES(kBackref, kFromCode, kStartOfObject)
+      // Find an already deserialized code object using its offset from
+      // the start and write a pointer to it to the current object.
+      // Required only for MIPS.
+      ALL_SPACES(kFromStart, kFromCode, kStartOfObject)
+#endif
       // Find a recently deserialized code object using its offset from the
       // current allocation point and write a pointer to its first instruction
       // to the current code object or the instruction pointer in a function
@@ -917,9 +1019,6 @@
       // start and write a pointer to its first instruction to the current code
       // object.
       ALL_SPACES(kFromStart, kFromCode, kFirstInstruction)
-      // Find an already deserialized object at one of the predetermined popular
-      // offsets from the start and write a pointer to it in the current object.
-      COMMON_REFERENCE_PATTERNS(EMIT_COMMON_REFERENCE_PATTERNS)
       // Find an object in the roots array and write a pointer to it to the
       // current object.
       CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
@@ -961,7 +1060,6 @@
 #undef CASE_BODY
 #undef ONE_PER_SPACE
 #undef ALL_SPACES
-#undef EMIT_COMMON_REFERENCE_PATTERNS
 #undef ASSIGN_DEST_SPACE
 
       case kNewPage: {
@@ -973,6 +1071,11 @@
         break;
       }
 
+      case kSkip: {
+        current++;
+        break;
+      }
+
       case kNativesStringResource: {
         int index = source_->Get();
         Vector<const char> source_vector = Natives::GetRawScriptSource(index);
@@ -1008,45 +1111,17 @@
   PutSection(static_cast<int>(integer & 0x7f), "IntLastPart");
 }
 
-#ifdef DEBUG
-
-void Deserializer::Synchronize(const char* tag) {
-  int data = source_->Get();
-  // If this assert fails then that indicates that you have a mismatch between
-  // the number of GC roots when serializing and deserializing.
-  ASSERT_EQ(kSynchronize, data);
-  do {
-    int character = source_->Get();
-    if (character == 0) break;
-    if (FLAG_debug_serialization) {
-      PrintF("%c", character);
-    }
-  } while (true);
-  if (FLAG_debug_serialization) {
-    PrintF("\n");
-  }
-}
-
-
-void Serializer::Synchronize(const char* tag) {
-  sink_->Put(kSynchronize, tag);
-  int character;
-  do {
-    character = *tag++;
-    sink_->PutSection(character, "TagCharacter");
-  } while (character != 0);
-}
-
-#endif
 
 Serializer::Serializer(SnapshotByteSink* sink)
     : sink_(sink),
       current_root_index_(0),
       external_reference_encoder_(new ExternalReferenceEncoder),
-      large_object_total_(0) {
+      large_object_total_(0),
+      root_index_wave_front_(0) {
+  isolate_ = Isolate::Current();
   // The serializer is meant to be used only to generate initial heap images
   // from a context in which there is only one isolate.
-  ASSERT(Isolate::Current()->IsDefaultIsolate());
+  ASSERT(isolate_->IsDefaultIsolate());
   for (int i = 0; i <= LAST_SPACE; i++) {
     fullness_[i] = 0;
   }
@@ -1066,11 +1141,8 @@
   CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
   CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
   // We don't support serializing installed extensions.
-  for (RegisteredExtension* ext = v8::RegisteredExtension::first_extension();
-       ext != NULL;
-       ext = ext->next()) {
-    CHECK_NE(v8::INSTALLED, ext->state());
-  }
+  CHECK(!isolate->has_installed_extensions());
+
   HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
 }
 
@@ -1097,8 +1169,17 @@
 
 
 void Serializer::VisitPointers(Object** start, Object** end) {
+  Isolate* isolate = Isolate::Current();
+
   for (Object** current = start; current < end; current++) {
-    if ((*current)->IsSmi()) {
+    if (start == isolate->heap()->roots_array_start()) {
+      root_index_wave_front_ =
+          Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
+    }
+    if (reinterpret_cast<Address>(current) ==
+        isolate->heap()->store_buffer()->TopAddress()) {
+      sink_->Put(kSkip, "Skip");
+    } else if ((*current)->IsSmi()) {
       sink_->Put(kRawData, "RawData");
       sink_->PutInt(kPointerSize, "length");
       for (int i = 0; i < kPointerSize; i++) {
@@ -1162,10 +1243,23 @@
 }
 
 
-int PartialSerializer::RootIndex(HeapObject* heap_object) {
-  for (int i = 0; i < Heap::kRootListLength; i++) {
-    Object* root = HEAP->roots_address()[i];
-    if (root == heap_object) return i;
+int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
+  Heap* heap = HEAP;
+  if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
+  for (int i = 0; i < root_index_wave_front_; i++) {
+    Object* root = heap->roots_array_start()[i];
+    if (!root->IsSmi() && root == heap_object) {
+#if V8_TARGET_ARCH_MIPS
+      if (from == kFromCode) {
+        // In order to avoid code bloat in the deserializer we don't have
+        // support for the encoding that specifies a particular root should
+        // be written into the lui/ori instructions on MIPS.  Therefore we
+        // should not generate such serialization data for MIPS.
+        return kInvalidRootIndex;
+      }
+#endif
+      return i;
+    }
   }
   return kInvalidRootIndex;
 }
@@ -1201,18 +1295,8 @@
   // all objects) then we should shift out the bits that are always 0.
   if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
   if (from_start) {
-#define COMMON_REFS_CASE(pseudo_space, actual_space, offset)                   \
-    if (space == actual_space && address == offset &&                          \
-        how_to_code == kPlain && where_to_point == kStartOfObject) {           \
-      sink_->Put(kFromStart + how_to_code + where_to_point +                   \
-                 pseudo_space, "RefSer");                                      \
-    } else  /* NOLINT */
-    COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
-#undef COMMON_REFS_CASE
-    {  /* NOLINT */
-      sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
-      sink_->PutInt(address, "address");
-    }
+    sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
+    sink_->PutInt(address, "address");
   } else {
     sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
     sink_->PutInt(address, "address");
@@ -1227,6 +1311,12 @@
   CHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
 
+  int root_index;
+  if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
+    PutRoot(root_index, heap_object, how_to_code, where_to_point);
+    return;
+  }
+
   if (address_mapper_.IsMapped(heap_object)) {
     int space = SpaceOfAlreadySerializedObject(heap_object);
     int address = address_mapper_.MappedTo(heap_object);
@@ -1257,6 +1347,28 @@
 }
 
 
+void Serializer::PutRoot(int root_index,
+                         HeapObject* object,
+                         SerializerDeserializer::HowToCode how_to_code,
+                         SerializerDeserializer::WhereToPoint where_to_point) {
+  if (how_to_code == kPlain &&
+      where_to_point == kStartOfObject &&
+      root_index < kRootArrayNumberOfConstantEncodings &&
+      !HEAP->InNewSpace(object)) {
+    if (root_index < kRootArrayNumberOfLowConstantEncodings) {
+      sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant");
+    } else {
+      sink_->Put(kRootArrayHighConstants + root_index -
+                     kRootArrayNumberOfLowConstantEncodings,
+                 "RootHiConstant");
+    }
+  } else {
+    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
+    sink_->PutInt(root_index, "root_index");
+  }
+}
+
+
 void PartialSerializer::SerializeObject(
     Object* o,
     HowToCode how_to_code,
@@ -1264,10 +1376,16 @@
   CHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
 
+  if (heap_object->IsMap()) {
+    // The code-caches link to context-specific code objects, which
+    // the startup and context serializes cannot currently handle.
+    ASSERT(Map::cast(heap_object)->code_cache() ==
+           heap_object->GetHeap()->raw_unchecked_empty_fixed_array());
+  }
+
   int root_index;
-  if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
-    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
-    sink_->PutInt(root_index, "root_index");
+  if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
+    PutRoot(root_index, heap_object, how_to_code, where_to_point);
     return;
   }
 
@@ -1345,14 +1463,48 @@
     if (current < end) OutputRawData(reinterpret_cast<Address>(current));
 
     while (current < end && !(*current)->IsSmi()) {
-      serializer_->SerializeObject(*current, kPlain, kStartOfObject);
-      bytes_processed_so_far_ += kPointerSize;
-      current++;
+      HeapObject* current_contents = HeapObject::cast(*current);
+      int root_index = serializer_->RootIndex(current_contents, kPlain);
+      // Repeats are not subject to the write barrier so there are only some
+      // objects that can be used in a repeat encoding.  These are the early
+      // ones in the root array that are never in new space.
+      if (current != start &&
+          root_index != kInvalidRootIndex &&
+          root_index < kRootArrayNumberOfConstantEncodings &&
+          current_contents == current[-1]) {
+        ASSERT(!HEAP->InNewSpace(current_contents));
+        int repeat_count = 1;
+        while (current < end - 1 && current[repeat_count] == current_contents) {
+          repeat_count++;
+        }
+        current += repeat_count;
+        bytes_processed_so_far_ += repeat_count * kPointerSize;
+        if (repeat_count > kMaxRepeats) {
+          sink_->Put(kRepeat, "SerializeRepeats");
+          sink_->PutInt(repeat_count, "SerializeRepeats");
+        } else {
+          sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
+        }
+      } else {
+        serializer_->SerializeObject(current_contents, kPlain, kStartOfObject);
+        bytes_processed_so_far_ += kPointerSize;
+        current++;
+      }
     }
   }
 }
 
 
+void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
+  Object** current = rinfo->target_object_address();
+
+  OutputRawData(rinfo->target_address_address());
+  HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+  serializer_->SerializeObject(*current, representation, kStartOfObject);
+  bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+
 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
                                                            Address* end) {
   Address references_start = reinterpret_cast<Address>(start);
@@ -1367,6 +1519,20 @@
 }
 
 
+void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
+  Address references_start = rinfo->target_address_address();
+  OutputRawData(references_start);
+
+  Address* current = rinfo->target_reference_address();
+  int representation = rinfo->IsCodedSpecially() ?
+                       kFromCode + kStartOfObject : kPlain + kStartOfObject;
+  sink_->Put(kExternalReference + representation, "ExternalRef");
+  int reference_id = serializer_->EncodeExternalReference(*current);
+  sink_->PutInt(reference_id, "reference id");
+  bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+
 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
   Address target_start = rinfo->target_address_address();
   OutputRawData(target_start);
@@ -1420,7 +1586,7 @@
     if (!source->IsUndefined()) {
       ExternalAsciiString* string = ExternalAsciiString::cast(source);
       typedef v8::String::ExternalAsciiStringResource Resource;
-      Resource* resource = string->resource();
+      const Resource* resource = string->resource();
       if (resource == *resource_pointer) {
         sink_->Put(kNativesStringResource, "NativesStringResource");
         sink_->PutSection(i, "NativesStringResourceEnd");
@@ -1518,8 +1684,8 @@
     // serialized address.
     CHECK(IsPowerOf2(Page::kPageSize));
     int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
-    CHECK(size <= Page::kObjectAreaSize);
-    if (used_in_this_page + size > Page::kObjectAreaSize) {
+    CHECK(size <= SpaceAreaSize(space));
+    if (used_in_this_page + size > SpaceAreaSize(space)) {
       *new_page = true;
       fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
     }
@@ -1530,4 +1696,13 @@
 }
 
 
+int Serializer::SpaceAreaSize(int space) {
+  if (space == CODE_SPACE) {
+    return isolate_->memory_allocator()->CodePageAreaSize();
+  } else {
+    return Page::kPageSize - Page::kObjectStartOffset;
+  }
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/serialize.h b/src/serialize.h
index 66d6fb5..f50e23e 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -187,24 +187,6 @@
 };
 
 
-// It is very common to have a reference to objects at certain offsets in the
-// heap.  These offsets have been determined experimentally.  We code
-// references to such objects in a single byte that encodes the way the pointer
-// is written (only plain pointers allowed), the space number and the offset.
-// This only works for objects in the first page of a space.  Don't use this for
-// things in newspace since it bypasses the write barrier.
-
-static const int k64 = (sizeof(uintptr_t) - 4) / 4;
-
-#define COMMON_REFERENCE_PATTERNS(f)                               \
-  f(kNumberOfSpaces, 2, (11 - k64))                                \
-  f((kNumberOfSpaces + 1), 2, 0)                                   \
-  f((kNumberOfSpaces + 2), 2, (142 - 16 * k64))                    \
-  f((kNumberOfSpaces + 3), 2, (74 - 15 * k64))                     \
-  f((kNumberOfSpaces + 4), 2, 5)                                   \
-  f((kNumberOfSpaces + 5), 1, 135)                                 \
-  f((kNumberOfSpaces + 6), 2, (228 - 39 * k64))
-
 #define COMMON_RAW_LENGTHS(f)        \
   f(1, 1)  \
   f(2, 2)  \
@@ -238,14 +220,15 @@
     kRootArray = 0x9,               // Object is found in root array.
     kPartialSnapshotCache = 0xa,    // Object is in the cache.
     kExternalReference = 0xb,       // Pointer to an external reference.
-    // 0xc-0xf                         Free.
+    kSkip = 0xc,                    // Skip a pointer sized cell.
+    // 0xd-0xf                         Free.
     kBackref = 0x10,                 // Object is described relative to end.
     // 0x11-0x18                       One per space.
-    // 0x19-0x1f                       Common backref offsets.
+    // 0x19-0x1f                       Free.
     kFromStart = 0x20,              // Object is described relative to start.
     // 0x21-0x28                       One per space.
     // 0x29-0x2f                       Free.
-    // 0x30-0x3f                       Used by misc tags below.
+    // 0x30-0x3f                       Used by misc. tags below.
     kPointedToMask = 0x3f
   };
 
@@ -278,9 +261,29 @@
   // is referred to from external strings in the snapshot.
   static const int kNativesStringResource = 0x71;
   static const int kNewPage = 0x72;
-  // 0x73-0x7f                            Free.
-  // 0xb0-0xbf                            Free.
-  // 0xf0-0xff                            Free.
+  static const int kRepeat = 0x73;
+  static const int kConstantRepeat = 0x74;
+  // 0x74-0x7f            Repeat last word (subtract 0x73 to get the count).
+  static const int kMaxRepeats = 0x7f - 0x73;
+  static int CodeForRepeats(int repeats) {
+    ASSERT(repeats >= 1 && repeats <= kMaxRepeats);
+    return 0x73 + repeats;
+  }
+  static int RepeatsForCode(int byte_code) {
+    ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f);
+    return byte_code - 0x73;
+  }
+  static const int kRootArrayLowConstants = 0xb0;
+  // 0xb0-0xbf            Things from the first 16 elements of the root array.
+  static const int kRootArrayHighConstants = 0xf0;
+  // 0xf0-0xff            Things from the next 16 elements of the root array.
+  static const int kRootArrayNumberOfConstantEncodings = 0x20;
+  static const int kRootArrayNumberOfLowConstantEncodings = 0x10;
+  static int RootArrayConstantFromByteCode(int byte_code) {
+    int constant = (byte_code & 0xf) | ((byte_code & 0x40) >> 2);
+    ASSERT(constant >= 0 && constant < kRootArrayNumberOfConstantEncodings);
+    return constant;
+  }
 
 
   static const int kLargeData = LAST_SPACE;
@@ -338,10 +341,6 @@
   // Deserialize a single object and the objects reachable from it.
   void DeserializePartial(Object** root);
 
-#ifdef DEBUG
-  virtual void Synchronize(const char* tag);
-#endif
-
  private:
   virtual void VisitPointers(Object** start, Object** end);
 
@@ -353,7 +352,13 @@
     UNREACHABLE();
   }
 
-  void ReadChunk(Object** start, Object** end, int space, Address address);
+  // Fills in some heap data in an area from start to end (non-inclusive).  The
+  // space id is used for the write barrier.  The object_address is the address
+  // of the object we are writing into, or NULL if we are not writing into an
+  // object, i.e. if we are writing a series of tagged values that are not on
+  // the heap.
+  void ReadChunk(
+      Object** start, Object** end, int space, Address object_address);
   HeapObject* GetAddressFromStart(int space);
   inline HeapObject* GetAddressFromEnd(int space);
   Address Allocate(int space_number, Space* space, int size);
@@ -474,14 +479,19 @@
   static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
   static bool enabled() { return serialization_enabled_; }
   SerializationAddressMapper* address_mapper() { return &address_mapper_; }
-#ifdef DEBUG
-  virtual void Synchronize(const char* tag);
-#endif
+  void PutRoot(
+      int index, HeapObject* object, HowToCode how, WhereToPoint where);
 
  protected:
   static const int kInvalidRootIndex = -1;
-  virtual int RootIndex(HeapObject* heap_object) = 0;
+
+  int RootIndex(HeapObject* heap_object, HowToCode from);
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
+  intptr_t root_index_wave_front() { return root_index_wave_front_; }
+  void set_root_index_wave_front(intptr_t value) {
+    ASSERT(value >= root_index_wave_front_);
+    root_index_wave_front_ = value;
+  }
 
   class ObjectSerializer : public ObjectVisitor {
    public:
@@ -497,7 +507,9 @@
         bytes_processed_so_far_(0) { }
     void Serialize();
     void VisitPointers(Object** start, Object** end);
+    void VisitEmbeddedPointer(RelocInfo* target);
     void VisitExternalReferences(Address* start, Address* end);
+    void VisitExternalReference(RelocInfo* rinfo);
     void VisitCodeTarget(RelocInfo* target);
     void VisitCodeEntry(Address entry_address);
     void VisitGlobalPropertyCell(RelocInfo* rinfo);
@@ -544,6 +556,9 @@
     return external_reference_encoder_->Encode(addr);
   }
 
+  int SpaceAreaSize(int space);
+
+  Isolate* isolate_;
   // Keep track of the fullness of each space in order to generate
   // relative addresses for back references.  Large objects are
   // just numbered sequentially since relative addresses make no
@@ -557,10 +572,12 @@
   static bool too_late_to_enable_now_;
   int large_object_total_;
   SerializationAddressMapper address_mapper_;
+  intptr_t root_index_wave_front_;
 
   friend class ObjectSerializer;
   friend class Deserializer;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(Serializer);
 };
 
@@ -571,6 +588,7 @@
                     SnapshotByteSink* sink)
     : Serializer(sink),
       startup_serializer_(startup_snapshot_serializer) {
+    set_root_index_wave_front(Heap::kStrongRootListLength);
   }
 
   // Serialize the objects reachable from a single object pointer.
@@ -580,7 +598,6 @@
                                WhereToPoint where_to_point);
 
  protected:
-  virtual int RootIndex(HeapObject* o);
   virtual int PartialSnapshotCacheIndex(HeapObject* o);
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
     // Scripts should be referred only through shared function infos.  We can't
@@ -590,7 +607,7 @@
     ASSERT(!o->IsScript());
     return o->IsString() || o->IsSharedFunctionInfo() ||
            o->IsHeapNumber() || o->IsCode() ||
-           o->IsSerializedScopeInfo() ||
+           o->IsScopeInfo() ||
            o->map() == HEAP->fixed_cow_array_map();
   }
 
@@ -605,14 +622,14 @@
   explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
     // Clear the cache of objects used by the partial snapshot.  After the
     // strong roots have been serialized we can create a partial snapshot
-    // which will repopulate the cache with objects neede by that partial
+    // which will repopulate the cache with objects needed by that partial
     // snapshot.
     Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
   }
   // Serialize the current state of the heap.  The order is:
   // 1) Strong references.
   // 2) Partial snapshot cache.
-  // 3) Weak references (eg the symbol table).
+  // 3) Weak references (e.g. the symbol table).
   virtual void SerializeStrongReferences();
   virtual void SerializeObject(Object* o,
                                HowToCode how_to_code,
@@ -624,7 +641,6 @@
   }
 
  private:
-  virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
     return false;
   }
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 35d7224..3709009 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,398 +37,253 @@
 
 
 // -----------------------------------------------------------------------------
+// Bitmap
+
+void Bitmap::Clear(MemoryChunk* chunk) {
+  Bitmap* bitmap = chunk->markbits();
+  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
+  chunk->ResetLiveBytes();
+}
+
+
+// -----------------------------------------------------------------------------
 // PageIterator
 
+
+PageIterator::PageIterator(PagedSpace* space)
+    : space_(space),
+      prev_page_(&space->anchor_),
+      next_page_(prev_page_->next_page()) { }
+
+
 bool PageIterator::has_next() {
-  return prev_page_ != stop_page_;
+  return next_page_ != &space_->anchor_;
 }
 
 
 Page* PageIterator::next() {
   ASSERT(has_next());
-  prev_page_ = (prev_page_ == NULL)
-               ? space_->first_page_
-               : prev_page_->next_page();
+  prev_page_ = next_page_;
+  next_page_ = next_page_->next_page();
   return prev_page_;
 }
 
 
 // -----------------------------------------------------------------------------
-// Page
+// NewSpacePageIterator
 
-Page* Page::next_page() {
-  return heap_->isolate()->memory_allocator()->GetNextPage(this);
+
+NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
+    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
+      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
+      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
+
+NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
+    : prev_page_(space->anchor()),
+      next_page_(prev_page_->next_page()),
+      last_page_(prev_page_->prev_page()) { }
+
+NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
+    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
+      next_page_(NewSpacePage::FromAddress(start)),
+      last_page_(NewSpacePage::FromLimit(limit)) {
+  SemiSpace::AssertValidRange(start, limit);
 }
 
 
-Address Page::AllocationTop() {
-  PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
-  return owner->PageAllocationTop(this);
+bool NewSpacePageIterator::has_next() {
+  return prev_page_ != last_page_;
 }
 
 
-Address Page::AllocationWatermark() {
-  PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
-  if (this == owner->AllocationTopPage()) {
-    return owner->top();
-  }
-  return address() + AllocationWatermarkOffset();
+NewSpacePage* NewSpacePageIterator::next() {
+  ASSERT(has_next());
+  prev_page_ = next_page_;
+  next_page_ = next_page_->next_page();
+  return prev_page_;
 }
 
 
-uint32_t Page::AllocationWatermarkOffset() {
-  return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
-                               kAllocationWatermarkOffsetShift);
-}
-
-
-void Page::SetAllocationWatermark(Address allocation_watermark) {
-  if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
-    // When iterating intergenerational references during scavenge
-    // we might decide to promote an encountered young object.
-    // We will allocate a space for such an object and put it
-    // into the promotion queue to process it later.
-    // If space for object was allocated somewhere beyond allocation
-    // watermark this might cause garbage pointers to appear under allocation
-    // watermark. To avoid visiting them during dirty regions iteration
-    // which might be still in progress we store a valid allocation watermark
-    // value and mark this page as having an invalid watermark.
-    SetCachedAllocationWatermark(AllocationWatermark());
-    InvalidateWatermark(true);
-  }
-
-  flags_ = (flags_ & kFlagsMask) |
-           Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
-  ASSERT(AllocationWatermarkOffset()
-         == static_cast<uint32_t>(Offset(allocation_watermark)));
-}
-
-
-void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
-  mc_first_forwarded = allocation_watermark;
-}
-
-
-Address Page::CachedAllocationWatermark() {
-  return mc_first_forwarded;
-}
-
-
-uint32_t Page::GetRegionMarks() {
-  return dirty_regions_;
-}
-
-
-void Page::SetRegionMarks(uint32_t marks) {
-  dirty_regions_ = marks;
-}
-
-
-int Page::GetRegionNumberForAddress(Address addr) {
-  // Each page is divided into 256 byte regions. Each region has a corresponding
-  // dirty mark bit in the page header. Region can contain intergenerational
-  // references iff its dirty mark is set.
-  // A normal 8K page contains exactly 32 regions so all region marks fit
-  // into 32-bit integer field. To calculate a region number we just divide
-  // offset inside page by region size.
-  // A large page can contain more then 32 regions. But we want to avoid
-  // additional write barrier code for distinguishing between large and normal
-  // pages so we just ignore the fact that addr points into a large page and
-  // calculate region number as if addr pointed into a normal 8K page. This way
-  // we get a region number modulo 32 so for large pages several regions might
-  // be mapped to a single dirty mark.
-  ASSERT_PAGE_ALIGNED(this->address());
-  STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
-
-  // We are using masking with kPageAlignmentMask instead of Page::Offset()
-  // to get an offset to the beginning of 8K page containing addr not to the
-  // beginning of actual page which can be bigger then 8K.
-  intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
-  return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
-}
-
-
-uint32_t Page::GetRegionMaskForAddress(Address addr) {
-  return 1 << GetRegionNumberForAddress(addr);
-}
-
-
-uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
-  uint32_t result = 0;
-  static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1;
-  if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) {
-    result = kAllRegionsDirtyMarks;
-  } else if (length_in_bytes > 0) {
-    int start_region = GetRegionNumberForAddress(start);
-    int end_region =
-        GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
-    uint32_t start_mask = (~0) << start_region;
-    uint32_t end_mask = ~((~1) << end_region);
-    result = start_mask & end_mask;
-    // if end_region < start_region, the mask is ored.
-    if (result == 0) result = start_mask | end_mask;
-  }
-#ifdef DEBUG
-  if (FLAG_enable_slow_asserts) {
-    uint32_t expected = 0;
-    for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
-      expected |= GetRegionMaskForAddress(a);
+// -----------------------------------------------------------------------------
+// HeapObjectIterator
+HeapObject* HeapObjectIterator::FromCurrentPage() {
+  while (cur_addr_ != cur_end_) {
+    if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
+      cur_addr_ = space_->limit();
+      continue;
     }
-    ASSERT(expected == result);
+    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+    cur_addr_ += obj_size;
+    ASSERT(cur_addr_ <= cur_end_);
+    if (!obj->IsFiller()) {
+      ASSERT_OBJECT_SIZE(obj_size);
+      return obj;
+    }
   }
-#endif
-  return result;
-}
-
-
-void Page::MarkRegionDirty(Address address) {
-  SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
-}
-
-
-bool Page::IsRegionDirty(Address address) {
-  return GetRegionMarks() & GetRegionMaskForAddress(address);
-}
-
-
-void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
-  int rstart = GetRegionNumberForAddress(start);
-  int rend = GetRegionNumberForAddress(end);
-
-  if (reaches_limit) {
-    end += 1;
-  }
-
-  if ((rend - rstart) == 0) {
-    return;
-  }
-
-  uint32_t bitmask = 0;
-
-  if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
-      || (start == ObjectAreaStart())) {
-    // First region is fully covered
-    bitmask = 1 << rstart;
-  }
-
-  while (++rstart < rend) {
-    bitmask |= 1 << rstart;
-  }
-
-  if (bitmask) {
-    SetRegionMarks(GetRegionMarks() & ~bitmask);
-  }
-}
-
-
-void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
-  heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
-}
-
-
-bool Page::IsWatermarkValid() {
-  return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
-      heap_->page_watermark_invalidated_mark_;
-}
-
-
-void Page::InvalidateWatermark(bool value) {
-  if (value) {
-    flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
-             heap_->page_watermark_invalidated_mark_;
-  } else {
-    flags_ =
-        (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
-        (heap_->page_watermark_invalidated_mark_ ^
-         (1 << WATERMARK_INVALIDATED));
-  }
-
-  ASSERT(IsWatermarkValid() == !value);
-}
-
-
-bool Page::GetPageFlag(PageFlag flag) {
-  return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
-}
-
-
-void Page::SetPageFlag(PageFlag flag, bool value) {
-  if (value) {
-    flags_ |= static_cast<intptr_t>(1 << flag);
-  } else {
-    flags_ &= ~static_cast<intptr_t>(1 << flag);
-  }
-}
-
-
-void Page::ClearPageFlags() {
-  flags_ = 0;
-}
-
-
-void Page::ClearGCFields() {
-  InvalidateWatermark(true);
-  SetAllocationWatermark(ObjectAreaStart());
-  if (heap_->gc_state() == Heap::SCAVENGE) {
-    SetCachedAllocationWatermark(ObjectAreaStart());
-  }
-  SetRegionMarks(kAllRegionsCleanMarks);
-}
-
-
-bool Page::WasInUseBeforeMC() {
-  return GetPageFlag(WAS_IN_USE_BEFORE_MC);
-}
-
-
-void Page::SetWasInUseBeforeMC(bool was_in_use) {
-  SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
-}
-
-
-bool Page::IsLargeObjectPage() {
-  return !GetPageFlag(IS_NORMAL_PAGE);
-}
-
-
-void Page::SetIsLargeObjectPage(bool is_large_object_page) {
-  SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
-}
-
-Executability Page::PageExecutability() {
-  return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
-}
-
-
-void Page::SetPageExecutability(Executability executable) {
-  SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE);
+  return NULL;
 }
 
 
 // -----------------------------------------------------------------------------
 // MemoryAllocator
 
-void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
-  address_ = a;
-  size_ = s;
-  owner_ = o;
-  executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
-  owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
+#ifdef ENABLE_HEAP_PROTECTION
+
+void MemoryAllocator::Protect(Address start, size_t size) {
+  OS::Protect(start, size);
 }
 
 
-bool MemoryAllocator::IsValidChunk(int chunk_id) {
-  if (!IsValidChunkId(chunk_id)) return false;
-
-  ChunkInfo& c = chunks_[chunk_id];
-  return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
+void MemoryAllocator::Unprotect(Address start,
+                                size_t size,
+                                Executability executable) {
+  OS::Unprotect(start, size, executable);
 }
 
 
-bool MemoryAllocator::IsValidChunkId(int chunk_id) {
-  return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
+void MemoryAllocator::ProtectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  OS::Protect(chunks_[id].address(), chunks_[id].size());
 }
 
 
-bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
-  ASSERT(p->is_valid());
-
-  int chunk_id = GetChunkId(p);
-  if (!IsValidChunkId(chunk_id)) return false;
-
-  ChunkInfo& c = chunks_[chunk_id];
-  return (c.address() <= p->address()) &&
-         (p->address() < c.address() + c.size()) &&
-         (space == c.owner());
+void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
+                chunks_[id].owner()->executable() == EXECUTABLE);
 }
 
-
-Page* MemoryAllocator::GetNextPage(Page* p) {
-  ASSERT(p->is_valid());
-  intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
-  return Page::FromAddress(AddressFrom<Address>(raw_addr));
-}
-
-
-int MemoryAllocator::GetChunkId(Page* p) {
-  ASSERT(p->is_valid());
-  return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
-}
-
-
-void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
-  ASSERT(prev->is_valid());
-  int chunk_id = GetChunkId(prev);
-  ASSERT_PAGE_ALIGNED(next->address());
-  prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
-}
-
-
-PagedSpace* MemoryAllocator::PageOwner(Page* page) {
-  int chunk_id = GetChunkId(page);
-  ASSERT(IsValidChunk(chunk_id));
-  return chunks_[chunk_id].owner();
-}
-
-
-bool MemoryAllocator::InInitialChunk(Address address) {
-  if (initial_chunk_ == NULL) return false;
-
-  Address start = static_cast<Address>(initial_chunk_->address());
-  return (start <= address) && (address < start + initial_chunk_->size());
-}
+#endif
 
 
 // --------------------------------------------------------------------------
 // PagedSpace
+Page* Page::Initialize(Heap* heap,
+                       MemoryChunk* chunk,
+                       Executability executable,
+                       PagedSpace* owner) {
+  Page* page = reinterpret_cast<Page*>(chunk);
+  ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
+  ASSERT(chunk->owner() == owner);
+  owner->IncreaseCapacity(page->area_size());
+  owner->Free(page->area_start(), page->area_size());
+
+  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+  return page;
+}
+
 
 bool PagedSpace::Contains(Address addr) {
   Page* p = Page::FromAddress(addr);
   if (!p->is_valid()) return false;
-  return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
+  return p->owner() == this;
+}
+
+
+void MemoryChunk::set_scan_on_scavenge(bool scan) {
+  if (scan) {
+    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
+    SetFlag(SCAN_ON_SCAVENGE);
+  } else {
+    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
+    ClearFlag(SCAN_ON_SCAVENGE);
+  }
+  heap_->incremental_marking()->SetOldSpacePageFlags(this);
+}
+
+
+MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
+  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
+      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
+  if (maybe->owner() != NULL) return maybe;
+  LargeObjectIterator iterator(HEAP->lo_space());
+  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
+    // Fixed arrays are the only pointer-containing objects in large object
+    // space.
+    if (o->IsFixedArray()) {
+      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
+      if (chunk->Contains(addr)) {
+        return chunk;
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+PointerChunkIterator::PointerChunkIterator(Heap* heap)
+    : state_(kOldPointerState),
+      old_pointer_iterator_(heap->old_pointer_space()),
+      map_iterator_(heap->map_space()),
+      lo_iterator_(heap->lo_space()) { }
+
+
+Page* Page::next_page() {
+  ASSERT(next_chunk()->owner() == owner());
+  return static_cast<Page*>(next_chunk());
+}
+
+
+Page* Page::prev_page() {
+  ASSERT(prev_chunk()->owner() == owner());
+  return static_cast<Page*>(prev_chunk());
+}
+
+
+void Page::set_next_page(Page* page) {
+  ASSERT(page->owner() == owner());
+  set_next_chunk(page);
+}
+
+
+void Page::set_prev_page(Page* page) {
+  ASSERT(page->owner() == owner());
+  set_prev_chunk(page);
 }
 
 
 // Try linear allocation in the page of alloc_info's allocation top.  Does
-// not contain slow case logic (eg, move to the next page or try free list
+// not contain slow case logic (e.g. move to the next page or try free list
 // allocation) so it can be used by all the allocation functions and for all
 // the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
-                                         int size_in_bytes) {
-  Address current_top = alloc_info->top;
+HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
+  Address current_top = allocation_info_.top;
   Address new_top = current_top + size_in_bytes;
-  if (new_top > alloc_info->limit) return NULL;
+  if (new_top > allocation_info_.limit) return NULL;
 
-  alloc_info->top = new_top;
-  ASSERT(alloc_info->VerifyPagedAllocation());
-  accounting_stats_.AllocateBytes(size_in_bytes);
+  allocation_info_.top = new_top;
   return HeapObject::FromAddress(current_top);
 }
 
 
 // Raw allocation.
 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
-  ASSERT(HasBeenSetup());
-  ASSERT_OBJECT_SIZE(size_in_bytes);
-  HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
-  if (object != NULL) return object;
+  HeapObject* object = AllocateLinearly(size_in_bytes);
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    return object;
+  }
+
+  object = free_list_.Allocate(size_in_bytes);
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    return object;
+  }
 
   object = SlowAllocateRaw(size_in_bytes);
-  if (object != NULL) return object;
-
-  return Failure::RetryAfterGC(identity());
-}
-
-
-// Reallocating (and promoting) objects during a compacting collection.
-MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
-  ASSERT(HasBeenSetup());
-  ASSERT_OBJECT_SIZE(size_in_bytes);
-  HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
-  if (object != NULL) return object;
-
-  object = SlowMCAllocateRaw(size_in_bytes);
-  if (object != NULL) return object;
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    return object;
+  }
 
   return Failure::RetryAfterGC(identity());
 }
@@ -437,27 +292,29 @@
 // -----------------------------------------------------------------------------
 // NewSpace
 
-MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
-                                           AllocationInfo* alloc_info) {
-  Address new_top = alloc_info->top + size_in_bytes;
-  if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
 
-  Object* obj = HeapObject::FromAddress(alloc_info->top);
-  alloc_info->top = new_top;
-#ifdef DEBUG
-  SemiSpace* space =
-      (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
-  ASSERT(space->low() <= alloc_info->top
-         && alloc_info->top <= space->high()
-         && alloc_info->limit == space->high());
-#endif
+MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
+  Address old_top = allocation_info_.top;
+  if (allocation_info_.limit - old_top < size_in_bytes) {
+    return SlowAllocateRaw(size_in_bytes);
+  }
+
+  Object* obj = HeapObject::FromAddress(allocation_info_.top);
+  allocation_info_.top += size_in_bytes;
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
   return obj;
 }
 
 
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
+  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+  return static_cast<LargePage*>(chunk);
+}
+
+
 intptr_t LargeObjectSpace::Available() {
-  return LargeObjectChunk::ObjectSizeFor(
-      heap()->isolate()->memory_allocator()->Available());
+  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
 }
 
 
@@ -467,16 +324,23 @@
   ASSERT(string->IsSeqString());
   ASSERT(string->address() + StringType::SizeFor(string->length()) ==
          allocation_info_.top);
+  Address old_top = allocation_info_.top;
   allocation_info_.top =
       string->address() + StringType::SizeFor(length);
   string->set_length(length);
+  if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
+    int delta = static_cast<int>(old_top - allocation_info_.top);
+    MemoryChunk::IncrementLiveBytesFromMutator(string->address(), -delta);
+  }
 }
 
 
 bool FreeListNode::IsFreeListNode(HeapObject* object) {
-  return object->map() == HEAP->raw_unchecked_byte_array_map()
-      || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
-      || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
+  Map* map = object->map();
+  Heap* heap = object->GetHeap();
+  return map == heap->raw_unchecked_free_space_map()
+      || map == heap->raw_unchecked_one_pointer_filler_map()
+      || map == heap->raw_unchecked_two_pointer_filler_map();
 }
 
 } }  // namespace v8::internal
diff --git a/src/spaces.cc b/src/spaces.cc
index 97c6d2a..defe352 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -35,112 +35,87 @@
 namespace v8 {
 namespace internal {
 
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
-  ASSERT((space).low() <= (info).top                  \
-         && (info).top <= (space).high()              \
-         && (info).limit == (space).high())
 
 // ----------------------------------------------------------------------------
 // HeapObjectIterator
 
 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
-  Initialize(space->bottom(), space->top(), NULL);
+  // You can't actually iterate over the anchor page.  It is not a real page,
+  // just an anchor for the double linked page list.  Initialize as if we have
+  // reached the end of the anchor page, then the first iteration will move on
+  // to the first page.
+  Initialize(space,
+             NULL,
+             NULL,
+             kAllPagesInSpace,
+             NULL);
 }
 
 
 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
                                        HeapObjectCallback size_func) {
-  Initialize(space->bottom(), space->top(), size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
-  Initialize(start, space->top(), NULL);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
-                                       HeapObjectCallback size_func) {
-  Initialize(start, space->top(), size_func);
+  // You can't actually iterate over the anchor page.  It is not a real page,
+  // just an anchor for the double linked page list.  Initialize the current
+  // address and end as NULL, then the first iteration will move on
+  // to the first page.
+  Initialize(space,
+             NULL,
+             NULL,
+             kAllPagesInSpace,
+             size_func);
 }
 
 
 HeapObjectIterator::HeapObjectIterator(Page* page,
                                        HeapObjectCallback size_func) {
-  Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
+  Space* owner = page->owner();
+  ASSERT(owner == HEAP->old_pointer_space() ||
+         owner == HEAP->old_data_space() ||
+         owner == HEAP->map_space() ||
+         owner == HEAP->cell_space() ||
+         owner == HEAP->code_space());
+  Initialize(reinterpret_cast<PagedSpace*>(owner),
+             page->area_start(),
+             page->area_end(),
+             kOnePageOnly,
+             size_func);
+  ASSERT(page->WasSweptPrecisely());
 }
 
 
-void HeapObjectIterator::Initialize(Address cur, Address end,
+void HeapObjectIterator::Initialize(PagedSpace* space,
+                                    Address cur, Address end,
+                                    HeapObjectIterator::PageMode mode,
                                     HeapObjectCallback size_f) {
+  // Check that we actually can iterate this space.
+  ASSERT(!space->was_swept_conservatively());
+
+  space_ = space;
   cur_addr_ = cur;
-  end_addr_ = end;
-  end_page_ = Page::FromAllocationTop(end);
+  cur_end_ = end;
+  page_mode_ = mode;
   size_func_ = size_f;
-  Page* p = Page::FromAllocationTop(cur_addr_);
-  cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
-
-#ifdef DEBUG
-  Verify();
-#endif
 }
 
 
-HeapObject* HeapObjectIterator::FromNextPage() {
-  if (cur_addr_ == end_addr_) return NULL;
-
-  Page* cur_page = Page::FromAllocationTop(cur_addr_);
-  cur_page = cur_page->next_page();
-  ASSERT(cur_page->is_valid());
-
-  cur_addr_ = cur_page->ObjectAreaStart();
-  cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
-
-  if (cur_addr_ == end_addr_) return NULL;
-  ASSERT(cur_addr_ < cur_limit_);
-#ifdef DEBUG
-  Verify();
-#endif
-  return FromCurrentPage();
-}
-
-
-#ifdef DEBUG
-void HeapObjectIterator::Verify() {
-  Page* p = Page::FromAllocationTop(cur_addr_);
-  ASSERT(p == Page::FromAllocationTop(cur_limit_));
-  ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// PageIterator
-
-PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
-  prev_page_ = NULL;
-  switch (mode) {
-    case PAGES_IN_USE:
-      stop_page_ = space->AllocationTopPage();
-      break;
-    case PAGES_USED_BY_MC:
-      stop_page_ = space->MCRelocationTopPage();
-      break;
-    case ALL_PAGES:
-#ifdef DEBUG
-      // Verify that the cached last page in the space is actually the
-      // last page.
-      for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
-        if (!p->next_page()->is_valid()) {
-          ASSERT(space->last_page_ == p);
-        }
-      }
-#endif
-      stop_page_ = space->last_page_;
-      break;
+// We have hit the end of the page and should advance to the next block of
+// objects.  This happens at the end of the page.
+bool HeapObjectIterator::AdvanceToNextPage() {
+  ASSERT(cur_addr_ == cur_end_);
+  if (page_mode_ == kOnePageOnly) return false;
+  Page* cur_page;
+  if (cur_addr_ == NULL) {
+    cur_page = space_->anchor();
+  } else {
+    cur_page = Page::FromAddress(cur_addr_ - 1);
+    ASSERT(cur_addr_ == cur_page->area_end());
   }
+  cur_page = cur_page->next_page();
+  if (cur_page == space_->anchor()) return false;
+  cur_addr_ = cur_page->area_start();
+  cur_end_ = cur_page->area_end();
+  ASSERT(cur_page->WasSweptPrecisely());
+  return true;
 }
 
 
@@ -157,7 +132,7 @@
 }
 
 
-bool CodeRange::Setup(const size_t requested) {
+bool CodeRange::SetUp(const size_t requested) {
   ASSERT(code_range_ == NULL);
 
   code_range_ = new VirtualMemory(requested);
@@ -171,7 +146,12 @@
   // We are sure that we have mapped a block of requested addresses.
   ASSERT(code_range_->size() == requested);
   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
-  allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
+  Address base = reinterpret_cast<Address>(code_range_->address());
+  Address aligned_base =
+      RoundUp(reinterpret_cast<Address>(code_range_->address()),
+              MemoryChunk::kAlignment);
+  size_t size = code_range_->size() - (aligned_base - base);
+  allocation_list_.Add(FreeBlock(aligned_base, size));
   current_allocation_block_index_ = 0;
   return true;
 }
@@ -228,7 +208,8 @@
 
 
 
-void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
+Address CodeRange::AllocateRawMemory(const size_t requested,
+                                     size_t* allocated) {
   ASSERT(current_allocation_block_index_ < allocation_list_.length());
   if (requested > allocation_list_[current_allocation_block_index_].size) {
     // Find an allocation block large enough.  This function call may
@@ -236,14 +217,19 @@
     GetNextAllocationBlock(requested);
   }
   // Commit the requested memory at the start of the current allocation block.
-  *allocated = RoundUp(requested, Page::kPageSize);
+  size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
   FreeBlock current = allocation_list_[current_allocation_block_index_];
-  if (*allocated >= current.size - Page::kPageSize) {
+  if (aligned_requested >= (current.size - Page::kPageSize)) {
     // Don't leave a small free block, useless for a large object or chunk.
     *allocated = current.size;
+  } else {
+    *allocated = aligned_requested;
   }
   ASSERT(*allocated <= current.size);
-  if (!code_range_->Commit(current.start, *allocated, true)) {
+  ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
+  if (!MemoryAllocator::CommitCodePage(code_range_,
+                                       current.start,
+                                       *allocated)) {
     *allocated = 0;
     return NULL;
   }
@@ -256,7 +242,8 @@
 }
 
 
-void CodeRange::FreeRawMemory(void* address, size_t length) {
+void CodeRange::FreeRawMemory(Address address, size_t length) {
+  ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
   free_list_.Add(FreeBlock(address, length));
   code_range_->Uncommit(address, length);
 }
@@ -274,149 +261,379 @@
 // MemoryAllocator
 //
 
-// 270 is an estimate based on the static default heap size of a pair of 256K
-// semispaces and a 64M old generation.
-const int kEstimatedNumberOfChunks = 270;
-
-
 MemoryAllocator::MemoryAllocator(Isolate* isolate)
     : isolate_(isolate),
       capacity_(0),
       capacity_executable_(0),
       size_(0),
-      size_executable_(0),
-      initial_chunk_(NULL),
-      chunks_(kEstimatedNumberOfChunks),
-      free_chunk_ids_(kEstimatedNumberOfChunks),
-      max_nof_chunks_(0),
-      top_(0) {
+      size_executable_(0) {
 }
 
 
-void MemoryAllocator::Push(int free_chunk_id) {
-  ASSERT(max_nof_chunks_ > 0);
-  ASSERT(top_ < max_nof_chunks_);
-  free_chunk_ids_[top_++] = free_chunk_id;
-}
-
-
-int MemoryAllocator::Pop() {
-  ASSERT(top_ > 0);
-  return free_chunk_ids_[--top_];
-}
-
-
-bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
+bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
   capacity_ = RoundUp(capacity, Page::kPageSize);
   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
   ASSERT_GE(capacity_, capacity_executable_);
 
-  // Over-estimate the size of chunks_ array.  It assumes the expansion of old
-  // space is always in the unit of a chunk (kChunkSize) except the last
-  // expansion.
-  //
-  // Due to alignment, allocated space might be one page less than required
-  // number (kPagesPerChunk) of pages for old spaces.
-  //
-  // Reserve two chunk ids for semispaces, one for map space, one for old
-  // space, and one for code space.
-  max_nof_chunks_ =
-      static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
-  if (max_nof_chunks_ > kMaxNofChunks) return false;
-
   size_ = 0;
   size_executable_ = 0;
-  ChunkInfo info;  // uninitialized element.
-  for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
-    chunks_.Add(info);
-    free_chunk_ids_.Add(i);
-  }
-  top_ = max_nof_chunks_;
+
   return true;
 }
 
 
 void MemoryAllocator::TearDown() {
-  for (int i = 0; i < max_nof_chunks_; i++) {
-    if (chunks_[i].address() != NULL) DeleteChunk(i);
-  }
-  chunks_.Clear();
-  free_chunk_ids_.Clear();
-
-  if (initial_chunk_ != NULL) {
-    LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
-    delete initial_chunk_;
-    initial_chunk_ = NULL;
-  }
-
-  ASSERT(top_ == max_nof_chunks_);  // all chunks are free
-  top_ = 0;
+  // Check that spaces were torn down before MemoryAllocator.
+  ASSERT(size_ == 0);
+  // TODO(gc) this will be true again when we fix FreeMemory.
+  // ASSERT(size_executable_ == 0);
   capacity_ = 0;
   capacity_executable_ = 0;
-  size_ = 0;
-  max_nof_chunks_ = 0;
 }
 
 
-void* MemoryAllocator::AllocateRawMemory(const size_t requested,
-                                         size_t* allocated,
-                                         Executability executable) {
-  if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
-    return NULL;
+void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
+                                 Executability executable) {
+  // TODO(gc) make code_range part of memory allocator?
+  ASSERT(reservation->IsReserved());
+  size_t size = reservation->size();
+  ASSERT(size_ >= size);
+  size_ -= size;
+
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (executable == EXECUTABLE) {
+    ASSERT(size_executable_ >= size);
+    size_executable_ -= size;
+  }
+  // Code which is part of the code-range does not have its own VirtualMemory.
+  ASSERT(!isolate_->code_range()->contains(
+      static_cast<Address>(reservation->address())));
+  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+  reservation->Release();
+}
+
+
+void MemoryAllocator::FreeMemory(Address base,
+                                 size_t size,
+                                 Executability executable) {
+  // TODO(gc) make code_range part of memory allocator?
+  ASSERT(size_ >= size);
+  size_ -= size;
+
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (executable == EXECUTABLE) {
+    ASSERT(size_executable_ >= size);
+    size_executable_ -= size;
+  }
+  if (isolate_->code_range()->contains(static_cast<Address>(base))) {
+    ASSERT(executable == EXECUTABLE);
+    isolate_->code_range()->FreeRawMemory(base, size);
+  } else {
+    ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+    bool result = VirtualMemory::ReleaseRegion(base, size);
+    USE(result);
+    ASSERT(result);
+  }
+}
+
+
+Address MemoryAllocator::ReserveAlignedMemory(size_t size,
+                                              size_t alignment,
+                                              VirtualMemory* controller) {
+  VirtualMemory reservation(size, alignment);
+
+  if (!reservation.IsReserved()) return NULL;
+  size_ += reservation.size();
+  Address base = RoundUp(static_cast<Address>(reservation.address()),
+                         alignment);
+  controller->TakeControl(&reservation);
+  return base;
+}
+
+
+Address MemoryAllocator::AllocateAlignedMemory(size_t size,
+                                               size_t alignment,
+                                               Executability executable,
+                                               VirtualMemory* controller) {
+  VirtualMemory reservation;
+  Address base = ReserveAlignedMemory(size, alignment, &reservation);
+  if (base == NULL) return NULL;
+
+  if (executable == EXECUTABLE) {
+    CommitCodePage(&reservation, base, size);
+  } else {
+    if (!reservation.Commit(base,
+                            size,
+                            executable == EXECUTABLE)) {
+      return NULL;
+    }
   }
 
-  void* mem;
+  controller->TakeControl(&reservation);
+  return base;
+}
+
+
+void Page::InitializeAsAnchor(PagedSpace* owner) {
+  set_owner(owner);
+  set_prev_page(this);
+  set_next_page(this);
+}
+
+
+NewSpacePage* NewSpacePage::Initialize(Heap* heap,
+                                       Address start,
+                                       SemiSpace* semi_space) {
+  Address area_start = start + NewSpacePage::kObjectStartOffset;
+  Address area_end = start + Page::kPageSize;
+
+  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
+                                               start,
+                                               Page::kPageSize,
+                                               area_start,
+                                               area_end,
+                                               NOT_EXECUTABLE,
+                                               semi_space);
+  chunk->set_next_chunk(NULL);
+  chunk->set_prev_chunk(NULL);
+  chunk->initialize_scan_on_scavenge(true);
+  bool in_to_space = (semi_space->id() != kFromSpace);
+  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+                             : MemoryChunk::IN_FROM_SPACE);
+  ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+                                       : MemoryChunk::IN_TO_SPACE));
+  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+  heap->incremental_marking()->SetNewSpacePageFlags(page);
+  return page;
+}
+
+
+void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
+  set_owner(semi_space);
+  set_next_chunk(this);
+  set_prev_chunk(this);
+  // Flags marks this invalid page as not being in new-space.
+  // All real new-space pages will be in new-space.
+  SetFlags(0, ~0);
+}
+
+
+MemoryChunk* MemoryChunk::Initialize(Heap* heap,
+                                     Address base,
+                                     size_t size,
+                                     Address area_start,
+                                     Address area_end,
+                                     Executability executable,
+                                     Space* owner) {
+  MemoryChunk* chunk = FromAddress(base);
+
+  ASSERT(base == chunk->address());
+
+  chunk->heap_ = heap;
+  chunk->size_ = size;
+  chunk->area_start_ = area_start;
+  chunk->area_end_ = area_end;
+  chunk->flags_ = 0;
+  chunk->set_owner(owner);
+  chunk->InitializeReservedMemory();
+  chunk->slots_buffer_ = NULL;
+  chunk->skip_list_ = NULL;
+  chunk->ResetLiveBytes();
+  Bitmap::Clear(chunk);
+  chunk->initialize_scan_on_scavenge(false);
+  chunk->SetFlag(WAS_SWEPT_PRECISELY);
+
+  ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
+  ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
+
   if (executable == EXECUTABLE) {
+    chunk->SetFlag(IS_EXECUTABLE);
+  }
+
+  if (owner == heap->old_data_space()) {
+    chunk->SetFlag(CONTAINS_ONLY_DATA);
+  }
+
+  return chunk;
+}
+
+
+void MemoryChunk::InsertAfter(MemoryChunk* other) {
+  next_chunk_ = other->next_chunk_;
+  prev_chunk_ = other;
+  other->next_chunk_->prev_chunk_ = this;
+  other->next_chunk_ = this;
+}
+
+
+void MemoryChunk::Unlink() {
+  if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
+    heap_->decrement_scan_on_scavenge_pages();
+    ClearFlag(SCAN_ON_SCAVENGE);
+  }
+  next_chunk_->prev_chunk_ = prev_chunk_;
+  prev_chunk_->next_chunk_ = next_chunk_;
+  prev_chunk_ = NULL;
+  next_chunk_ = NULL;
+}
+
+
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
+                                            Executability executable,
+                                            Space* owner) {
+  size_t chunk_size;
+  Heap* heap = isolate_->heap();
+  Address base = NULL;
+  VirtualMemory reservation;
+  Address area_start = NULL;
+  Address area_end = NULL;
+  if (executable == EXECUTABLE) {
+    chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
+                         OS::CommitPageSize()) + CodePageGuardSize();
+
     // Check executable memory limit.
-    if (size_executable_ + requested >
-        static_cast<size_t>(capacity_executable_)) {
+    if (size_executable_ + chunk_size > capacity_executable_) {
       LOG(isolate_,
           StringEvent("MemoryAllocator::AllocateRawMemory",
                       "V8 Executable Allocation capacity exceeded"));
       return NULL;
     }
+
     // Allocate executable memory either from code range or from the
     // OS.
     if (isolate_->code_range()->exists()) {
-      mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
+      base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
+      ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
+                       MemoryChunk::kAlignment));
+      if (base == NULL) return NULL;
+      size_ += chunk_size;
+      // Update executable memory size.
+      size_executable_ += chunk_size;
     } else {
-      mem = OS::Allocate(requested, allocated, true);
+      base = AllocateAlignedMemory(chunk_size,
+                                   MemoryChunk::kAlignment,
+                                   executable,
+                                   &reservation);
+      if (base == NULL) return NULL;
+      // Update executable memory size.
+      size_executable_ += reservation.size();
     }
-    // Update executable memory size.
-    size_executable_ += static_cast<int>(*allocated);
-  } else {
-    mem = OS::Allocate(requested, allocated, false);
-  }
-  int alloced = static_cast<int>(*allocated);
-  size_ += alloced;
 
 #ifdef DEBUG
-  ZapBlock(reinterpret_cast<Address>(mem), alloced);
+    ZapBlock(base, CodePageGuardStartOffset());
+    ZapBlock(base + CodePageAreaStartOffset(), body_size);
 #endif
-  isolate_->counters()->memory_allocated()->Increment(alloced);
-  return mem;
+    area_start = base + CodePageAreaStartOffset();
+    area_end = area_start + body_size;
+  } else {
+    chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+    base = AllocateAlignedMemory(chunk_size,
+                                 MemoryChunk::kAlignment,
+                                 executable,
+                                 &reservation);
+
+    if (base == NULL) return NULL;
+
+#ifdef DEBUG
+    ZapBlock(base, chunk_size);
+#endif
+
+    area_start = base + Page::kObjectStartOffset;
+    area_end = base + chunk_size;
+  }
+
+  isolate_->counters()->memory_allocated()->
+      Increment(static_cast<int>(chunk_size));
+
+  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
+  if (owner != NULL) {
+    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
+  }
+
+  MemoryChunk* result = MemoryChunk::Initialize(heap,
+                                                base,
+                                                chunk_size,
+                                                area_start,
+                                                area_end,
+                                                executable,
+                                                owner);
+  result->set_reserved_memory(&reservation);
+  return result;
 }
 
 
-void MemoryAllocator::FreeRawMemory(void* mem,
-                                    size_t length,
+Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
                                     Executability executable) {
-#ifdef DEBUG
-  // Do not try to zap the guard page.
-  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
-  ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
-#endif
-  if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
-    isolate_->code_range()->FreeRawMemory(mem, length);
-  } else {
-    OS::Free(mem, length);
-  }
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
-  size_ -= static_cast<int>(length);
-  if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
+  MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
+                                     executable,
+                                     owner);
 
-  ASSERT(size_ >= 0);
-  ASSERT(size_executable_ >= 0);
+  if (chunk == NULL) return NULL;
+
+  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
+}
+
+
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
+                                              Executability executable,
+                                              Space* owner) {
+  MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
+  if (chunk == NULL) return NULL;
+  return LargePage::Initialize(isolate_->heap(), chunk);
+}
+
+
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+  if (chunk->owner() != NULL) {
+    ObjectSpace space =
+        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
+    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
+  }
+
+  isolate_->heap()->RememberUnmappedPage(
+      reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
+
+  delete chunk->slots_buffer();
+  delete chunk->skip_list();
+
+  VirtualMemory* reservation = chunk->reserved_memory();
+  if (reservation->IsReserved()) {
+    FreeMemory(reservation, chunk->executable());
+  } else {
+    FreeMemory(chunk->address(),
+               chunk->size(),
+               chunk->executable());
+  }
+}
+
+
+bool MemoryAllocator::CommitBlock(Address start,
+                                  size_t size,
+                                  Executability executable) {
+  if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
+#ifdef DEBUG
+  ZapBlock(start, size);
+#endif
+  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
+  return true;
+}
+
+
+bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
+  if (!VirtualMemory::UncommitRegion(start, size)) return false;
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+  return true;
+}
+
+
+void MemoryAllocator::ZapBlock(Address start, size_t size) {
+  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
+    Memory::Address_at(start + s) = kZapValue;
+  }
 }
 
 
@@ -465,269 +682,6 @@
   UNREACHABLE();
 }
 
-void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
-  ASSERT(initial_chunk_ == NULL);
-
-  initial_chunk_ = new VirtualMemory(requested);
-  CHECK(initial_chunk_ != NULL);
-  if (!initial_chunk_->IsReserved()) {
-    delete initial_chunk_;
-    initial_chunk_ = NULL;
-    return NULL;
-  }
-
-  // We are sure that we have mapped a block of requested addresses.
-  ASSERT(initial_chunk_->size() == requested);
-  LOG(isolate_,
-      NewEvent("InitialChunk", initial_chunk_->address(), requested));
-  size_ += static_cast<int>(requested);
-  return initial_chunk_->address();
-}
-
-
-static int PagesInChunk(Address start, size_t size) {
-  // The first page starts on the first page-aligned address from start onward
-  // and the last page ends on the last page-aligned address before
-  // start+size.  Page::kPageSize is a power of two so we can divide by
-  // shifting.
-  return static_cast<int>((RoundDown(start + size, Page::kPageSize)
-      - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
-}
-
-
-Page* MemoryAllocator::AllocatePages(int requested_pages,
-                                     int* allocated_pages,
-                                     PagedSpace* owner) {
-  if (requested_pages <= 0) return Page::FromAddress(NULL);
-  size_t chunk_size = requested_pages * Page::kPageSize;
-
-  void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
-  if (chunk == NULL) return Page::FromAddress(NULL);
-  LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
-
-  *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
-
-  // We may 'lose' a page due to alignment.
-  ASSERT(*allocated_pages >= kPagesPerChunk - 1);
-
-  size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
-
-  // Check that we got at least one page that we can use.
-  if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
-    FreeRawMemory(chunk,
-                  chunk_size,
-                  owner->executable());
-    LOG(isolate_, DeleteEvent("PagedChunk", chunk));
-    return Page::FromAddress(NULL);
-  }
-
-  if (guard_size != 0) {
-    OS::Guard(chunk, guard_size);
-    chunk_size -= guard_size;
-    chunk = static_cast<Address>(chunk) + guard_size;
-    --*allocated_pages;
-  }
-
-  int chunk_id = Pop();
-  chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
-
-  ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
-  PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
-  Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
-
-  return new_pages;
-}
-
-
-Page* MemoryAllocator::CommitPages(Address start, size_t size,
-                                   PagedSpace* owner, int* num_pages) {
-  ASSERT(start != NULL);
-  *num_pages = PagesInChunk(start, size);
-  ASSERT(*num_pages > 0);
-  ASSERT(initial_chunk_ != NULL);
-  ASSERT(InInitialChunk(start));
-  ASSERT(InInitialChunk(start + size - 1));
-  if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
-    return Page::FromAddress(NULL);
-  }
-#ifdef DEBUG
-  ZapBlock(start, size);
-#endif
-  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
-
-  // So long as we correctly overestimated the number of chunks we should not
-  // run out of chunk ids.
-  CHECK(!OutOfChunkIds());
-  int chunk_id = Pop();
-  chunks_[chunk_id].init(start, size, owner);
-  return InitializePagesInChunk(chunk_id, *num_pages, owner);
-}
-
-
-bool MemoryAllocator::CommitBlock(Address start,
-                                  size_t size,
-                                  Executability executable) {
-  ASSERT(start != NULL);
-  ASSERT(size > 0);
-  ASSERT(initial_chunk_ != NULL);
-  ASSERT(InInitialChunk(start));
-  ASSERT(InInitialChunk(start + size - 1));
-
-  if (!initial_chunk_->Commit(start, size, executable)) return false;
-#ifdef DEBUG
-  ZapBlock(start, size);
-#endif
-  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
-  return true;
-}
-
-
-bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
-  ASSERT(start != NULL);
-  ASSERT(size > 0);
-  ASSERT(initial_chunk_ != NULL);
-  ASSERT(InInitialChunk(start));
-  ASSERT(InInitialChunk(start + size - 1));
-
-  if (!initial_chunk_->Uncommit(start, size)) return false;
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-  return true;
-}
-
-
-void MemoryAllocator::ZapBlock(Address start, size_t size) {
-  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
-    Memory::Address_at(start + s) = kZapValue;
-  }
-}
-
-
-Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
-                                              PagedSpace* owner) {
-  ASSERT(IsValidChunk(chunk_id));
-  ASSERT(pages_in_chunk > 0);
-
-  Address chunk_start = chunks_[chunk_id].address();
-
-  Address low = RoundUp(chunk_start, Page::kPageSize);
-
-#ifdef DEBUG
-  size_t chunk_size = chunks_[chunk_id].size();
-  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
-  ASSERT(pages_in_chunk <=
-        ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
-#endif
-
-  Address page_addr = low;
-  for (int i = 0; i < pages_in_chunk; i++) {
-    Page* p = Page::FromAddress(page_addr);
-    p->heap_ = owner->heap();
-    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
-    p->InvalidateWatermark(true);
-    p->SetIsLargeObjectPage(false);
-    p->SetAllocationWatermark(p->ObjectAreaStart());
-    p->SetCachedAllocationWatermark(p->ObjectAreaStart());
-    page_addr += Page::kPageSize;
-  }
-
-  // Set the next page of the last page to 0.
-  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
-  last_page->opaque_header = OffsetFrom(0) | chunk_id;
-
-  return Page::FromAddress(low);
-}
-
-
-Page* MemoryAllocator::FreePages(Page* p) {
-  if (!p->is_valid()) return p;
-
-  // Find the first page in the same chunk as 'p'
-  Page* first_page = FindFirstPageInSameChunk(p);
-  Page* page_to_return = Page::FromAddress(NULL);
-
-  if (p != first_page) {
-    // Find the last page in the same chunk as 'prev'.
-    Page* last_page = FindLastPageInSameChunk(p);
-    first_page = GetNextPage(last_page);  // first page in next chunk
-
-    // set the next_page of last_page to NULL
-    SetNextPage(last_page, Page::FromAddress(NULL));
-    page_to_return = p;  // return 'p' when exiting
-  }
-
-  while (first_page->is_valid()) {
-    int chunk_id = GetChunkId(first_page);
-    ASSERT(IsValidChunk(chunk_id));
-
-    // Find the first page of the next chunk before deleting this chunk.
-    first_page = GetNextPage(FindLastPageInSameChunk(first_page));
-
-    // Free the current chunk.
-    DeleteChunk(chunk_id);
-  }
-
-  return page_to_return;
-}
-
-
-void MemoryAllocator::FreeAllPages(PagedSpace* space) {
-  for (int i = 0, length = chunks_.length(); i < length; i++) {
-    if (chunks_[i].owner() == space) {
-      DeleteChunk(i);
-    }
-  }
-}
-
-
-void MemoryAllocator::DeleteChunk(int chunk_id) {
-  ASSERT(IsValidChunk(chunk_id));
-
-  ChunkInfo& c = chunks_[chunk_id];
-
-  // We cannot free a chunk contained in the initial chunk because it was not
-  // allocated with AllocateRawMemory.  Instead we uncommit the virtual
-  // memory.
-  if (InInitialChunk(c.address())) {
-    // TODO(1240712): VirtualMemory::Uncommit has a return value which
-    // is ignored here.
-    initial_chunk_->Uncommit(c.address(), c.size());
-    Counters* counters = isolate_->counters();
-    counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
-  } else {
-    LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
-    ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
-    size_t size = c.size();
-    size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
-    FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
-    PerformAllocationCallback(space, kAllocationActionFree, size);
-  }
-  c.init(NULL, 0, NULL);
-  Push(chunk_id);
-}
-
-
-Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
-  int chunk_id = GetChunkId(p);
-  ASSERT(IsValidChunk(chunk_id));
-
-  Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
-  return Page::FromAddress(low);
-}
-
-
-Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
-  int chunk_id = GetChunkId(p);
-  ASSERT(IsValidChunk(chunk_id));
-
-  Address chunk_start = chunks_[chunk_id].address();
-  size_t chunk_size = chunks_[chunk_id].size();
-
-  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
-  ASSERT(chunk_start <= p->address() && p->address() < high);
-
-  return Page::FromAddress(high - Page::kPageSize);
-}
-
 
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
@@ -740,74 +694,75 @@
 #endif
 
 
-void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
-                                                 Page** first_page,
-                                                 Page** last_page,
-                                                 Page** last_page_in_use) {
-  Page* first = NULL;
-  Page* last = NULL;
-
-  for (int i = 0, length = chunks_.length(); i < length; i++) {
-    ChunkInfo& chunk = chunks_[i];
-
-    if (chunk.owner() == space) {
-      if (first == NULL) {
-        Address low = RoundUp(chunk.address(), Page::kPageSize);
-        first = Page::FromAddress(low);
-      }
-      last = RelinkPagesInChunk(i,
-                                chunk.address(),
-                                chunk.size(),
-                                last,
-                                last_page_in_use);
-    }
-  }
-
-  if (first_page != NULL) {
-    *first_page = first;
-  }
-
-  if (last_page != NULL) {
-    *last_page = last;
-  }
+int MemoryAllocator::CodePageGuardStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
 }
 
 
-Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
-                                          Address chunk_start,
-                                          size_t chunk_size,
-                                          Page* prev,
-                                          Page** last_page_in_use) {
-  Address page_addr = RoundUp(chunk_start, Page::kPageSize);
-  int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
-
-  if (prev->is_valid()) {
-    SetNextPage(prev, Page::FromAddress(page_addr));
-  }
-
-  for (int i = 0; i < pages_in_chunk; i++) {
-    Page* p = Page::FromAddress(page_addr);
-    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
-    page_addr += Page::kPageSize;
-
-    p->InvalidateWatermark(true);
-    if (p->WasInUseBeforeMC()) {
-      *last_page_in_use = p;
-    }
-  }
-
-  // Set the next page of the last page to 0.
-  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
-  last_page->opaque_header = OffsetFrom(0) | chunk_id;
-
-  if (last_page->WasInUseBeforeMC()) {
-    *last_page_in_use = last_page;
-  }
-
-  return last_page;
+int MemoryAllocator::CodePageGuardSize() {
+  return static_cast<int>(OS::CommitPageSize());
 }
 
 
+int MemoryAllocator::CodePageAreaStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return CodePageGuardStartOffset() + CodePageGuardSize();
+}
+
+
+int MemoryAllocator::CodePageAreaEndOffset() {
+  // We are guarding code pages: the last OS page will be protected as
+  // non-writable.
+  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
+}
+
+
+bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
+                                     Address start,
+                                     size_t size) {
+  // Commit page header (not executable).
+  if (!vm->Commit(start,
+                  CodePageGuardStartOffset(),
+                  false)) {
+    return false;
+  }
+
+  // Create guard page after the header.
+  if (!vm->Guard(start + CodePageGuardStartOffset())) {
+    return false;
+  }
+
+  // Commit page body (executable).
+  size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
+  if (!vm->Commit(start + CodePageAreaStartOffset(),
+                  area_size,
+                  true)) {
+    return false;
+  }
+
+  // Create guard page after the allocatable area.
+  if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
+    return false;
+  }
+
+  return true;
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryChunk implementation
+
+void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
+  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
+    static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+  }
+  chunk->IncrementLiveBytes(by);
+}
+
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
 
@@ -815,296 +770,171 @@
                        intptr_t max_capacity,
                        AllocationSpace id,
                        Executability executable)
-    : Space(heap, id, executable) {
+    : Space(heap, id, executable),
+      free_list_(this),
+      was_swept_conservatively_(false),
+      first_unswept_page_(Page::FromAddress(NULL)),
+      unswept_free_bytes_(0) {
+  if (id == CODE_SPACE) {
+    area_size_ = heap->isolate()->memory_allocator()->
+        CodePageAreaSize();
+  } else {
+    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
+  }
   max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
-                  * Page::kObjectAreaSize;
+      * AreaSize();
   accounting_stats_.Clear();
 
   allocation_info_.top = NULL;
   allocation_info_.limit = NULL;
 
-  mc_forwarding_info_.top = NULL;
-  mc_forwarding_info_.limit = NULL;
+  anchor_.InitializeAsAnchor(this);
 }
 
 
-bool PagedSpace::Setup(Address start, size_t size) {
-  if (HasBeenSetup()) return false;
-
-  int num_pages = 0;
-  // Try to use the virtual memory range passed to us.  If it is too small to
-  // contain at least one page, ignore it and allocate instead.
-  int pages_in_chunk = PagesInChunk(start, size);
-  if (pages_in_chunk > 0) {
-    first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
-        RoundUp(start, Page::kPageSize),
-        Page::kPageSize * pages_in_chunk,
-        this, &num_pages);
-  } else {
-    int requested_pages =
-        Min(MemoryAllocator::kPagesPerChunk,
-            static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
-    first_page_ =
-        Isolate::Current()->memory_allocator()->AllocatePages(
-            requested_pages, &num_pages, this);
-    if (!first_page_->is_valid()) return false;
-  }
-
-  // We are sure that the first page is valid and that we have at least one
-  // page.
-  ASSERT(first_page_->is_valid());
-  ASSERT(num_pages > 0);
-  accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
-  ASSERT(Capacity() <= max_capacity_);
-
-  // Sequentially clear region marks in the newly allocated
-  // pages and cache the current last page in the space.
-  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
-    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    last_page_ = p;
-  }
-
-  // Use first_page_ for allocation.
-  SetAllocationInfo(&allocation_info_, first_page_);
-
-  page_list_is_chunk_ordered_ = true;
-
+bool PagedSpace::SetUp() {
   return true;
 }
 
 
-bool PagedSpace::HasBeenSetup() {
-  return (Capacity() > 0);
+bool PagedSpace::HasBeenSetUp() {
+  return true;
 }
 
 
 void PagedSpace::TearDown() {
-  Isolate::Current()->memory_allocator()->FreeAllPages(this);
-  first_page_ = NULL;
+  PageIterator iterator(this);
+  while (iterator.has_next()) {
+    heap()->isolate()->memory_allocator()->Free(iterator.next());
+  }
+  anchor_.set_next_page(&anchor_);
+  anchor_.set_prev_page(&anchor_);
   accounting_stats_.Clear();
 }
 
 
-void PagedSpace::MarkAllPagesClean() {
-  PageIterator it(this, PageIterator::ALL_PAGES);
-  while (it.has_next()) {
-    it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
-  }
-}
-
-
 MaybeObject* PagedSpace::FindObject(Address addr) {
-  // Note: this function can only be called before or after mark-compact GC
-  // because it accesses map pointers.
+  // Note: this function can only be called on precisely swept spaces.
   ASSERT(!heap()->mark_compact_collector()->in_use());
 
   if (!Contains(addr)) return Failure::Exception();
 
   Page* p = Page::FromAddress(addr);
-  ASSERT(IsUsed(p));
-  Address cur = p->ObjectAreaStart();
-  Address end = p->AllocationTop();
-  while (cur < end) {
-    HeapObject* obj = HeapObject::FromAddress(cur);
+  HeapObjectIterator it(p, NULL);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    Address cur = obj->address();
     Address next = cur + obj->Size();
     if ((cur <= addr) && (addr < next)) return obj;
-    cur = next;
   }
 
   UNREACHABLE();
   return Failure::Exception();
 }
 
-
-bool PagedSpace::IsUsed(Page* page) {
-  PageIterator it(this, PageIterator::PAGES_IN_USE);
-  while (it.has_next()) {
-    if (page == it.next()) return true;
-  }
-  return false;
-}
-
-
-void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
-  alloc_info->top = p->ObjectAreaStart();
-  alloc_info->limit = p->ObjectAreaEnd();
-  ASSERT(alloc_info->VerifyPagedAllocation());
-}
-
-
-void PagedSpace::MCResetRelocationInfo() {
-  // Set page indexes.
-  int i = 0;
-  PageIterator it(this, PageIterator::ALL_PAGES);
-  while (it.has_next()) {
-    Page* p = it.next();
-    p->mc_page_index = i++;
-  }
-
-  // Set mc_forwarding_info_ to the first page in the space.
-  SetAllocationInfo(&mc_forwarding_info_, first_page_);
-  // All the bytes in the space are 'available'.  We will rediscover
-  // allocated and wasted bytes during GC.
-  accounting_stats_.Reset();
-}
-
-
-int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
-#ifdef DEBUG
-  // The Contains function considers the address at the beginning of a
-  // page in the page, MCSpaceOffsetForAddress considers it is in the
-  // previous page.
-  if (Page::IsAlignedToPageSize(addr)) {
-    ASSERT(Contains(addr - kPointerSize));
-  } else {
-    ASSERT(Contains(addr));
-  }
-#endif
-
-  // If addr is at the end of a page, it belongs to previous page
-  Page* p = Page::IsAlignedToPageSize(addr)
-            ? Page::FromAllocationTop(addr)
-            : Page::FromAddress(addr);
-  int index = p->mc_page_index;
-  return (index * Page::kPageSize) + p->Offset(addr);
-}
-
-
-// Slow case for reallocating and promoting objects during a compacting
-// collection.  This function is not space-specific.
-HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
-  Page* current_page = TopPageOf(mc_forwarding_info_);
-  if (!current_page->next_page()->is_valid()) {
-    if (!Expand(current_page)) {
-      return NULL;
-    }
-  }
-
-  // There are surely more pages in the space now.
-  ASSERT(current_page->next_page()->is_valid());
-  // We do not add the top of page block for current page to the space's
-  // free list---the block may contain live objects so we cannot write
-  // bookkeeping information to it.  Instead, we will recover top of page
-  // blocks when we move objects to their new locations.
-  //
-  // We do however write the allocation pointer to the page.  The encoding
-  // of forwarding addresses is as an offset in terms of live bytes, so we
-  // need quick access to the allocation top of each page to decode
-  // forwarding addresses.
-  current_page->SetAllocationWatermark(mc_forwarding_info_.top);
-  current_page->next_page()->InvalidateWatermark(true);
-  SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
-  return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
-}
-
-
-bool PagedSpace::Expand(Page* last_page) {
-  ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
-  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+bool PagedSpace::CanExpand() {
+  ASSERT(max_capacity_ % AreaSize() == 0);
+  ASSERT(Capacity() % AreaSize() == 0);
 
   if (Capacity() == max_capacity_) return false;
 
   ASSERT(Capacity() < max_capacity_);
-  // Last page must be valid and its next page is invalid.
-  ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
 
-  int available_pages =
-      static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
-  // We don't want to have to handle small chunks near the end so if there are
-  // not kPagesPerChunk pages available without exceeding the max capacity then
-  // act as if memory has run out.
-  if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
+  // Are we going to exceed capacity for this space?
+  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
 
-  int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
-  Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
-      desired_pages, &desired_pages, this);
-  if (!p->is_valid()) return false;
+  return true;
+}
 
-  accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
+bool PagedSpace::Expand() {
+  if (!CanExpand()) return false;
+
+  Page* p = heap()->isolate()->memory_allocator()->
+      AllocatePage(this, executable());
+  if (p == NULL) return false;
+
   ASSERT(Capacity() <= max_capacity_);
 
-  heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
-
-  // Sequentially clear region marks of new pages and and cache the
-  // new last page in the space.
-  while (p->is_valid()) {
-    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    last_page_ = p;
-    p = p->next_page();
-  }
+  p->InsertAfter(anchor_.prev_page());
 
   return true;
 }
 
 
-#ifdef DEBUG
 int PagedSpace::CountTotalPages() {
+  PageIterator it(this);
   int count = 0;
-  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
+  while (it.has_next()) {
+    it.next();
     count++;
   }
   return count;
 }
-#endif
 
 
-void PagedSpace::Shrink() {
-  if (!page_list_is_chunk_ordered_) {
-    // We can't shrink space if pages is not chunk-ordered
-    // (see comment for class MemoryAllocator for definition).
-    return;
+void PagedSpace::ReleasePage(Page* page) {
+  ASSERT(page->LiveBytes() == 0);
+  ASSERT(AreaSize() == page->area_size());
+
+  // Adjust list of unswept pages if the page is the head of the list.
+  if (first_unswept_page_ == page) {
+    first_unswept_page_ = page->next_page();
+    if (first_unswept_page_ == anchor()) {
+      first_unswept_page_ = Page::FromAddress(NULL);
+    }
   }
 
-  // Release half of free pages.
-  Page* top_page = AllocationTopPage();
-  ASSERT(top_page->is_valid());
-
-  // Count the number of pages we would like to free.
-  int pages_to_free = 0;
-  for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
-    pages_to_free++;
+  if (page->WasSwept()) {
+    intptr_t size = free_list_.EvictFreeListItems(page);
+    accounting_stats_.AllocateBytes(size);
+    ASSERT_EQ(AreaSize(), static_cast<int>(size));
+  } else {
+    DecreaseUnsweptFreeBytes(page);
   }
 
-  // Free pages after top_page.
-  Page* p = heap()->isolate()->memory_allocator()->
-      FreePages(top_page->next_page());
-  heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
-
-  // Find out how many pages we failed to free and update last_page_.
-  // Please note pages can only be freed in whole chunks.
-  last_page_ = top_page;
-  for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
-    pages_to_free--;
-    last_page_ = p;
+  if (Page::FromAllocationTop(allocation_info_.top) == page) {
+    allocation_info_.top = allocation_info_.limit = NULL;
   }
 
-  accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
-  ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
+  page->Unlink();
+  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
+    heap()->isolate()->memory_allocator()->Free(page);
+  } else {
+    heap()->QueueMemoryChunkForFree(page);
+  }
+
+  ASSERT(Capacity() > 0);
+  ASSERT(Capacity() % AreaSize() == 0);
+  accounting_stats_.ShrinkSpace(AreaSize());
 }
 
 
-bool PagedSpace::EnsureCapacity(int capacity) {
-  if (Capacity() >= capacity) return true;
-
-  // Start from the allocation top and loop to the last page in the space.
-  Page* last_page = AllocationTopPage();
-  Page* next_page = last_page->next_page();
-  while (next_page->is_valid()) {
-    last_page = heap()->isolate()->memory_allocator()->
-        FindLastPageInSameChunk(next_page);
-    next_page = last_page->next_page();
+void PagedSpace::ReleaseAllUnusedPages() {
+  PageIterator it(this);
+  while (it.has_next()) {
+    Page* page = it.next();
+    if (!page->WasSwept()) {
+      if (page->LiveBytes() == 0) ReleasePage(page);
+    } else {
+      HeapObject* obj = HeapObject::FromAddress(page->area_start());
+      if (obj->IsFreeSpace() &&
+          FreeSpace::cast(obj)->size() == AreaSize()) {
+        // Sometimes we allocate memory from free list but don't
+        // immediately initialize it (e.g. see PagedSpace::ReserveSpace
+        // called from Heap::ReserveSpace that can cause GC before
+        // reserved space is actually initialized).
+        // Thus we can't simply assume that obj represents a valid
+        // node still owned by a free list
+        // Instead we should verify that the page is fully covered
+        // by free list items.
+        FreeList::SizeStats sizes;
+        free_list_.CountFreeListItems(page, &sizes);
+        if (sizes.Total() == AreaSize()) {
+          ReleasePage(page);
+        }
+      }
+    }
   }
-
-  // Expand the space until it has the required capacity or expansion fails.
-  do {
-    if (!Expand(last_page)) return false;
-    ASSERT(last_page->next_page()->is_valid());
-    last_page =
-        heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
-            last_page->next_page());
-  } while (Capacity() < capacity);
-
-  return true;
+  heap()->FreeQueuedChunks();
 }
 
 
@@ -1114,61 +944,52 @@
 
 
 #ifdef DEBUG
-// We do not assume that the PageIterator works, because it depends on the
-// invariants we are checking during verification.
 void PagedSpace::Verify(ObjectVisitor* visitor) {
-  // The allocation pointer should be valid, and it should be in a page in the
-  // space.
-  ASSERT(allocation_info_.VerifyPagedAllocation());
-  Page* top_page = Page::FromAllocationTop(allocation_info_.top);
-  ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
+  // We can only iterate over the pages if they were swept precisely.
+  if (was_swept_conservatively_) return;
 
-  // Loop over all the pages.
-  bool above_allocation_top = false;
-  Page* current_page = first_page_;
-  while (current_page->is_valid()) {
-    if (above_allocation_top) {
-      // We don't care what's above the allocation top.
-    } else {
-      Address top = current_page->AllocationTop();
-      if (current_page == top_page) {
-        ASSERT(top == allocation_info_.top);
-        // The next page will be above the allocation top.
-        above_allocation_top = true;
-      }
-
-      // It should be packed with objects from the bottom to the top.
-      Address current = current_page->ObjectAreaStart();
-      while (current < top) {
-        HeapObject* object = HeapObject::FromAddress(current);
-
-        // The first word should be a map, and we expect all map pointers to
-        // be in map space.
-        Map* map = object->map();
-        ASSERT(map->IsMap());
-        ASSERT(heap()->map_space()->Contains(map));
-
-        // Perform space-specific object verification.
-        VerifyObject(object);
-
-        // The object itself should look OK.
-        object->Verify();
-
-        // All the interior pointers should be contained in the heap and
-        // have page regions covering intergenerational references should be
-        // marked dirty.
-        int size = object->Size();
-        object->IterateBody(map->instance_type(), size, visitor);
-
-        current += size;
-      }
-
-      // The allocation pointer should not be in the middle of an object.
-      ASSERT(current == top);
+  bool allocation_pointer_found_in_space =
+      (allocation_info_.top == allocation_info_.limit);
+  PageIterator page_iterator(this);
+  while (page_iterator.has_next()) {
+    Page* page = page_iterator.next();
+    ASSERT(page->owner() == this);
+    if (page == Page::FromAllocationTop(allocation_info_.top)) {
+      allocation_pointer_found_in_space = true;
     }
+    ASSERT(page->WasSweptPrecisely());
+    HeapObjectIterator it(page, NULL);
+    Address end_of_previous_object = page->area_start();
+    Address top = page->area_end();
+    int black_size = 0;
+    for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+      ASSERT(end_of_previous_object <= object->address());
 
-    current_page = current_page->next_page();
+      // The first word should be a map, and we expect all map pointers to
+      // be in map space.
+      Map* map = object->map();
+      ASSERT(map->IsMap());
+      ASSERT(heap()->map_space()->Contains(map));
+
+      // Perform space-specific object verification.
+      VerifyObject(object);
+
+      // The object itself should look OK.
+      object->Verify();
+
+      // All the interior pointers should be contained in the heap.
+      int size = object->Size();
+      object->IterateBody(map->instance_type(), size, visitor);
+      if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
+        black_size += size;
+      }
+
+      ASSERT(object->address() + size <= top);
+      end_of_previous_object = object->address() + size;
+    }
+    ASSERT_LE(black_size, page->LiveBytes());
   }
+  ASSERT(allocation_pointer_found_in_space);
 }
 #endif
 
@@ -1177,18 +998,28 @@
 // NewSpace implementation
 
 
-bool NewSpace::Setup(Address start, int size) {
-  // Setup new space based on the preallocated memory block defined by
+bool NewSpace::SetUp(int reserved_semispace_capacity,
+                     int maximum_semispace_capacity) {
+  // Set up new space based on the preallocated memory block defined by
   // start and size. The provided space is divided into two semi-spaces.
   // To support fast containment testing in the new space, the size of
   // this chunk must be a power of two and it must be aligned to its size.
   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
-  int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
+
+  size_t size = 2 * reserved_semispace_capacity;
+  Address base =
+      heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
+          size, size, &reservation_);
+  if (base == NULL) return false;
+
+  chunk_base_ = base;
+  chunk_size_ = static_cast<uintptr_t>(size);
+  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
 
   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
   ASSERT(IsPowerOf2(maximum_semispace_capacity));
 
-  // Allocate and setup the histogram arrays if necessary.
+  // Allocate and set up the histogram arrays if necessary.
   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
 
@@ -1197,31 +1028,28 @@
   INSTANCE_TYPE_LIST(SET_NAME)
 #undef SET_NAME
 
-  ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
-  ASSERT(IsAddressAligned(start, size, 0));
+  ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
+  ASSERT(static_cast<intptr_t>(chunk_size_) >=
+         2 * heap()->ReservedSemiSpaceSize());
+  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
 
-  if (!to_space_.Setup(start,
-                       initial_semispace_capacity,
-                       maximum_semispace_capacity)) {
-    return false;
-  }
-  if (!from_space_.Setup(start + maximum_semispace_capacity,
-                         initial_semispace_capacity,
-                         maximum_semispace_capacity)) {
+  to_space_.SetUp(chunk_base_,
+                  initial_semispace_capacity,
+                  maximum_semispace_capacity);
+  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
+                    initial_semispace_capacity,
+                    maximum_semispace_capacity);
+  if (!to_space_.Commit()) {
     return false;
   }
 
-  start_ = start;
-  address_mask_ = ~(size - 1);
+  start_ = chunk_base_;
+  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
   object_mask_ = address_mask_ | kHeapObjectTagMask;
-  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
+  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
 
-  allocation_info_.top = to_space_.low();
-  allocation_info_.limit = to_space_.high();
-  mc_forwarding_info_.top = NULL;
-  mc_forwarding_info_.limit = NULL;
+  ResetAllocationInfo();
 
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
   return true;
 }
 
@@ -1239,28 +1067,34 @@
   start_ = NULL;
   allocation_info_.top = NULL;
   allocation_info_.limit = NULL;
-  mc_forwarding_info_.top = NULL;
-  mc_forwarding_info_.limit = NULL;
 
   to_space_.TearDown();
   from_space_.TearDown();
+
+  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
+
+  ASSERT(reservation_.IsReserved());
+  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
+                                                    NOT_EXECUTABLE);
+  chunk_base_ = NULL;
+  chunk_size_ = 0;
 }
 
 
 void NewSpace::Flip() {
-  SemiSpace tmp = from_space_;
-  from_space_ = to_space_;
-  to_space_ = tmp;
+  SemiSpace::Swap(&from_space_, &to_space_);
 }
 
 
 void NewSpace::Grow() {
+  // Double the semispace size but only up to maximum capacity.
   ASSERT(Capacity() < MaximumCapacity());
-  if (to_space_.Grow()) {
-    // Only grow from space if we managed to grow to space.
-    if (!from_space_.Grow()) {
-      // If we managed to grow to space but couldn't grow from space,
-      // attempt to shrink to space.
+  int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
+  if (to_space_.GrowTo(new_capacity)) {
+    // Only grow from space if we managed to grow to-space.
+    if (!from_space_.GrowTo(new_capacity)) {
+      // If we managed to grow to-space but couldn't grow from-space,
+      // attempt to shrink to-space.
       if (!to_space_.ShrinkTo(from_space_.Capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
@@ -1268,21 +1102,20 @@
       }
     }
   }
-  allocation_info_.limit = to_space_.high();
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 void NewSpace::Shrink() {
   int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
-  int rounded_new_capacity =
-      RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
+  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
   if (rounded_new_capacity < Capacity() &&
       to_space_.ShrinkTo(rounded_new_capacity))  {
-    // Only shrink from space if we managed to shrink to space.
+    // Only shrink from-space if we managed to shrink to-space.
+    from_space_.Reset();
     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
-      // If we managed to shrink to space but couldn't shrink from
-      // space, attempt to grow to space again.
+      // If we managed to shrink to-space but couldn't shrink from
+      // space, attempt to grow to-space again.
       if (!to_space_.GrowTo(from_space_.Capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
@@ -1290,36 +1123,98 @@
       }
     }
   }
-  allocation_info_.limit = to_space_.high();
+  allocation_info_.limit = to_space_.page_high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::UpdateAllocationInfo() {
+  allocation_info_.top = to_space_.page_low();
+  allocation_info_.limit = to_space_.page_high();
+
+  // Lower limit during incremental marking.
+  if (heap()->incremental_marking()->IsMarking() &&
+      inline_allocation_limit_step() != 0) {
+    Address new_limit =
+        allocation_info_.top + inline_allocation_limit_step();
+    allocation_info_.limit = Min(new_limit, allocation_info_.limit);
+  }
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 void NewSpace::ResetAllocationInfo() {
-  allocation_info_.top = to_space_.low();
-  allocation_info_.limit = to_space_.high();
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+  to_space_.Reset();
+  UpdateAllocationInfo();
+  pages_used_ = 0;
+  // Clear all mark-bits in the to-space.
+  NewSpacePageIterator it(&to_space_);
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
 }
 
 
-void NewSpace::MCResetRelocationInfo() {
-  mc_forwarding_info_.top = from_space_.low();
-  mc_forwarding_info_.limit = from_space_.high();
-  ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
+bool NewSpace::AddFreshPage() {
+  Address top = allocation_info_.top;
+  if (NewSpacePage::IsAtStart(top)) {
+    // The current page is already empty. Don't try to make another.
+
+    // We should only get here if someone asks to allocate more
+    // than what can be stored in a single page.
+    // TODO(gc): Change the limit on new-space allocation to prevent this
+    // from happening (all such allocations should go directly to LOSpace).
+    return false;
+  }
+  if (!to_space_.AdvancePage()) {
+    // Failed to get a new page in to-space.
+    return false;
+  }
+
+  // Clear remainder of current page.
+  Address limit = NewSpacePage::FromLimit(top)->area_end();
+  if (heap()->gc_state() == Heap::SCAVENGE) {
+    heap()->promotion_queue()->SetNewLimit(limit);
+    heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
+  }
+
+  int remaining_in_page = static_cast<int>(limit - top);
+  heap()->CreateFillerObjectAt(top, remaining_in_page);
+  pages_used_++;
+  UpdateAllocationInfo();
+
+  return true;
 }
 
 
-void NewSpace::MCCommitRelocationInfo() {
-  // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
-  // valid allocation info for the to space.
-  allocation_info_.top = mc_forwarding_info_.top;
-  allocation_info_.limit = to_space_.high();
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
+  Address old_top = allocation_info_.top;
+  Address new_top = old_top + size_in_bytes;
+  Address high = to_space_.page_high();
+  if (allocation_info_.limit < high) {
+    // Incremental marking has lowered the limit to get a
+    // chance to do a step.
+    allocation_info_.limit = Min(
+        allocation_info_.limit + inline_allocation_limit_step_,
+        high);
+    int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
+    heap()->incremental_marking()->Step(bytes_allocated);
+    top_on_previous_step_ = new_top;
+    return AllocateRaw(size_in_bytes);
+  } else if (AddFreshPage()) {
+    // Switched to new page. Try allocating again.
+    int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
+    heap()->incremental_marking()->Step(bytes_allocated);
+    top_on_previous_step_ = to_space_.page_low();
+    return AllocateRaw(size_in_bytes);
+  } else {
+    return Failure::RetryAfterGC();
+  }
 }
 
 
 #ifdef DEBUG
-// We do not use the SemispaceIterator because verification doesn't assume
+// We do not use the SemiSpaceIterator because verification doesn't assume
 // that it works (it depends on the invariants we are checking).
 void NewSpace::Verify() {
   // The allocation pointer should be in the space or at the very end.
@@ -1327,63 +1222,57 @@
 
   // There should be objects packed in from the low address up to the
   // allocation pointer.
-  Address current = to_space_.low();
-  while (current < top()) {
-    HeapObject* object = HeapObject::FromAddress(current);
+  Address current = to_space_.first_page()->area_start();
+  CHECK_EQ(current, to_space_.space_start());
 
-    // The first word should be a map, and we expect all map pointers to
-    // be in map space.
-    Map* map = object->map();
-    ASSERT(map->IsMap());
-    ASSERT(heap()->map_space()->Contains(map));
+  while (current != top()) {
+    if (!NewSpacePage::IsAtEnd(current)) {
+      // The allocation pointer should not be in the middle of an object.
+      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
+            current < top());
 
-    // The object should not be code or a map.
-    ASSERT(!object->IsMap());
-    ASSERT(!object->IsCode());
+      HeapObject* object = HeapObject::FromAddress(current);
 
-    // The object itself should look OK.
-    object->Verify();
+      // The first word should be a map, and we expect all map pointers to
+      // be in map space.
+      Map* map = object->map();
+      CHECK(map->IsMap());
+      CHECK(heap()->map_space()->Contains(map));
 
-    // All the interior pointers should be contained in the heap.
-    VerifyPointersVisitor visitor;
-    int size = object->Size();
-    object->IterateBody(map->instance_type(), size, &visitor);
+      // The object should not be code or a map.
+      CHECK(!object->IsMap());
+      CHECK(!object->IsCode());
 
-    current += size;
+      // The object itself should look OK.
+      object->Verify();
+
+      // All the interior pointers should be contained in the heap.
+      VerifyPointersVisitor visitor;
+      int size = object->Size();
+      object->IterateBody(map->instance_type(), size, &visitor);
+
+      current += size;
+    } else {
+      // At end of page, switch to next page.
+      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
+      // Next page should be valid.
+      CHECK(!page->is_anchor());
+      current = page->area_start();
+    }
   }
 
-  // The allocation pointer should not be in the middle of an object.
-  ASSERT(current == top());
+  // Check semi-spaces.
+  ASSERT_EQ(from_space_.id(), kFromSpace);
+  ASSERT_EQ(to_space_.id(), kToSpace);
+  from_space_.Verify();
+  to_space_.Verify();
 }
 #endif
 
-
-bool SemiSpace::Commit() {
-  ASSERT(!is_committed());
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      start_, capacity_, executable())) {
-    return false;
-  }
-  committed_ = true;
-  return true;
-}
-
-
-bool SemiSpace::Uncommit() {
-  ASSERT(is_committed());
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
-      start_, capacity_)) {
-    return false;
-  }
-  committed_ = false;
-  return true;
-}
-
-
 // -----------------------------------------------------------------------------
 // SemiSpace implementation
 
-bool SemiSpace::Setup(Address start,
+void SemiSpace::SetUp(Address start,
                       int initial_capacity,
                       int maximum_capacity) {
   // Creates a space in the young generation. The constructor does not
@@ -1392,18 +1281,16 @@
   // otherwise.  In the mark-compact collector, the memory region of the from
   // space is used as the marking stack. It requires contiguous memory
   // addresses.
-  initial_capacity_ = initial_capacity;
+  ASSERT(maximum_capacity >= Page::kPageSize);
+  initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
   capacity_ = initial_capacity;
-  maximum_capacity_ = maximum_capacity;
+  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
   committed_ = false;
-
   start_ = start;
   address_mask_ = ~(maximum_capacity - 1);
   object_mask_ = address_mask_ | kHeapObjectTagMask;
   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
   age_mark_ = start_;
-
-  return Commit();
 }
 
 
@@ -1413,81 +1300,266 @@
 }
 
 
-bool SemiSpace::Grow() {
-  // Double the semispace size but only up to maximum capacity.
-  int maximum_extra = maximum_capacity_ - capacity_;
-  int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
-                  maximum_extra);
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      high(), extra, executable())) {
+bool SemiSpace::Commit() {
+  ASSERT(!is_committed());
+  int pages = capacity_ / Page::kPageSize;
+  Address end = start_ + maximum_capacity_;
+  Address start = end - pages * Page::kPageSize;
+  if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
+                                                          capacity_,
+                                                          executable())) {
     return false;
   }
-  capacity_ += extra;
+
+  NewSpacePage* page = anchor();
+  for (int i = 1; i <= pages; i++) {
+    NewSpacePage* new_page =
+      NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
+    new_page->InsertAfter(page);
+    page = new_page;
+  }
+
+  committed_ = true;
+  Reset();
+  return true;
+}
+
+
+bool SemiSpace::Uncommit() {
+  ASSERT(is_committed());
+  Address start = start_ + maximum_capacity_ - capacity_;
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
+    return false;
+  }
+  anchor()->set_next_page(anchor());
+  anchor()->set_prev_page(anchor());
+
+  committed_ = false;
   return true;
 }
 
 
 bool SemiSpace::GrowTo(int new_capacity) {
+  if (!is_committed()) {
+    if (!Commit()) return false;
+  }
+  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
   ASSERT(new_capacity <= maximum_capacity_);
   ASSERT(new_capacity > capacity_);
+  int pages_before = capacity_ / Page::kPageSize;
+  int pages_after = new_capacity / Page::kPageSize;
+
+  Address end = start_ + maximum_capacity_;
+  Address start = end - new_capacity;
   size_t delta = new_capacity - capacity_;
+
   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
   if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      high(), delta, executable())) {
+      start, delta, executable())) {
     return false;
   }
   capacity_ = new_capacity;
+  NewSpacePage* last_page = anchor()->prev_page();
+  ASSERT(last_page != anchor());
+  for (int i = pages_before + 1; i <= pages_after; i++) {
+    Address page_address = end - i * Page::kPageSize;
+    NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
+                                                      page_address,
+                                                      this);
+    new_page->InsertAfter(last_page);
+    Bitmap::Clear(new_page);
+    // Duplicate the flags that was set on the old page.
+    new_page->SetFlags(last_page->GetFlags(),
+                       NewSpacePage::kCopyOnFlipFlagsMask);
+    last_page = new_page;
+  }
   return true;
 }
 
 
 bool SemiSpace::ShrinkTo(int new_capacity) {
+  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
   ASSERT(new_capacity >= initial_capacity_);
   ASSERT(new_capacity < capacity_);
-  size_t delta = capacity_ - new_capacity;
-  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
-      high() - delta, delta)) {
-    return false;
+  if (is_committed()) {
+    // Semispaces grow backwards from the end of their allocated capacity,
+    // so we find the before and after start addresses relative to the
+    // end of the space.
+    Address space_end = start_ + maximum_capacity_;
+    Address old_start = space_end - capacity_;
+    size_t delta = capacity_ - new_capacity;
+    ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+
+    MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
+    if (!allocator->UncommitBlock(old_start, delta)) {
+      return false;
+    }
+
+    int pages_after = new_capacity / Page::kPageSize;
+    NewSpacePage* new_last_page =
+        NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
+    new_last_page->set_next_page(anchor());
+    anchor()->set_prev_page(new_last_page);
+    ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
   }
+
   capacity_ = new_capacity;
+
   return true;
 }
 
 
+void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
+  anchor_.set_owner(this);
+  // Fixup back-pointers to anchor. Address of anchor changes
+  // when we swap.
+  anchor_.prev_page()->set_next_page(&anchor_);
+  anchor_.next_page()->set_prev_page(&anchor_);
+
+  bool becomes_to_space = (id_ == kFromSpace);
+  id_ = becomes_to_space ? kToSpace : kFromSpace;
+  NewSpacePage* page = anchor_.next_page();
+  while (page != &anchor_) {
+    page->set_owner(this);
+    page->SetFlags(flags, mask);
+    if (becomes_to_space) {
+      page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
+      page->SetFlag(MemoryChunk::IN_TO_SPACE);
+      page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+      page->ResetLiveBytes();
+    } else {
+      page->SetFlag(MemoryChunk::IN_FROM_SPACE);
+      page->ClearFlag(MemoryChunk::IN_TO_SPACE);
+    }
+    ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+    ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
+           page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
+    page = page->next_page();
+  }
+}
+
+
+void SemiSpace::Reset() {
+  ASSERT(anchor_.next_page() != &anchor_);
+  current_page_ = anchor_.next_page();
+}
+
+
+void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
+  // We won't be swapping semispaces without data in them.
+  ASSERT(from->anchor_.next_page() != &from->anchor_);
+  ASSERT(to->anchor_.next_page() != &to->anchor_);
+
+  // Swap bits.
+  SemiSpace tmp = *from;
+  *from = *to;
+  *to = tmp;
+
+  // Fixup back-pointers to the page list anchor now that its address
+  // has changed.
+  // Swap to/from-space bits on pages.
+  // Copy GC flags from old active space (from-space) to new (to-space).
+  intptr_t flags = from->current_page()->GetFlags();
+  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
+
+  from->FlipPages(0, 0);
+}
+
+
+void SemiSpace::set_age_mark(Address mark) {
+  ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
+  age_mark_ = mark;
+  // Mark all pages up to the one containing mark.
+  NewSpacePageIterator it(space_start(), mark);
+  while (it.has_next()) {
+    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+  }
+}
+
+
 #ifdef DEBUG
 void SemiSpace::Print() { }
 
 
-void SemiSpace::Verify() { }
+void SemiSpace::Verify() {
+  bool is_from_space = (id_ == kFromSpace);
+  NewSpacePage* page = anchor_.next_page();
+  CHECK(anchor_.semi_space() == this);
+  while (page != &anchor_) {
+    CHECK(page->semi_space() == this);
+    CHECK(page->InNewSpace());
+    CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
+                                        : MemoryChunk::IN_TO_SPACE));
+    CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
+                                         : MemoryChunk::IN_FROM_SPACE));
+    CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
+    if (!is_from_space) {
+      // The pointers-from-here-are-interesting flag isn't updated dynamically
+      // on from-space pages, so it might be out of sync with the marking state.
+      if (page->heap()->incremental_marking()->IsMarking()) {
+        CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+      } else {
+        CHECK(!page->IsFlagSet(
+            MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+      }
+      // TODO(gc): Check that the live_bytes_count_ field matches the
+      // black marking on the page (if we make it match in new-space).
+    }
+    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+    CHECK(page->prev_page()->next_page() == page);
+    page = page->next_page();
+  }
+}
+
+
+void SemiSpace::AssertValidRange(Address start, Address end) {
+  // Addresses belong to same semi-space
+  NewSpacePage* page = NewSpacePage::FromLimit(start);
+  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
+  SemiSpace* space = page->semi_space();
+  CHECK_EQ(space, end_page->semi_space());
+  // Start address is before end address, either on same page,
+  // or end address is on a later page in the linked list of
+  // semi-space pages.
+  if (page == end_page) {
+    CHECK(start <= end);
+  } else {
+    while (page != end_page) {
+      page = page->next_page();
+      CHECK_NE(page, space->anchor());
+    }
+  }
+}
 #endif
 
 
 // -----------------------------------------------------------------------------
 // SemiSpaceIterator implementation.
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
-  Initialize(space, space->bottom(), space->top(), NULL);
+  Initialize(space->bottom(), space->top(), NULL);
 }
 
 
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
                                      HeapObjectCallback size_func) {
-  Initialize(space, space->bottom(), space->top(), size_func);
+  Initialize(space->bottom(), space->top(), size_func);
 }
 
 
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
-  Initialize(space, start, space->top(), NULL);
+  Initialize(start, space->top(), NULL);
 }
 
 
-void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
+SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
+  Initialize(from, to, NULL);
+}
+
+
+void SemiSpaceIterator::Initialize(Address start,
                                    Address end,
                                    HeapObjectCallback size_func) {
-  ASSERT(space->ToSpaceContains(start));
-  ASSERT(space->ToSpaceLow() <= end
-         && end <= space->ToSpaceHigh());
-  space_ = &space->to_space_;
+  SemiSpace::AssertValidRange(start, end);
   current_ = start;
   limit_ = end;
   size_func_ = size_func;
@@ -1623,7 +1695,7 @@
 void NewSpace::CollectStatistics() {
   ClearHistograms();
   SemiSpaceIterator it(this);
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
     RecordAllocation(obj);
 }
 
@@ -1699,7 +1771,6 @@
   promoted_histogram_[type].increment_bytes(obj->Size());
 }
 
-
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces implementation
 
@@ -1708,493 +1779,440 @@
   ASSERT(IsAligned(size_in_bytes, kPointerSize));
 
   // We write a map and possibly size information to the block.  If the block
-  // is big enough to be a ByteArray with at least one extra word (the next
-  // pointer), we set its map to be the byte array map and its size to an
+  // is big enough to be a FreeSpace with at least one extra word (the next
+  // pointer), we set its map to be the free space map and its size to an
   // appropriate array length for the desired size from HeapObject::Size().
   // If the block is too small (eg, one or two words), to hold both a size
   // field and a next pointer, we give it a filler map that gives it the
   // correct size.
-  if (size_in_bytes > ByteArray::kHeaderSize) {
-    set_map(heap->raw_unchecked_byte_array_map());
-    // Can't use ByteArray::cast because it fails during deserialization.
-    ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
-    this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
+  if (size_in_bytes > FreeSpace::kHeaderSize) {
+    set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
+    // Can't use FreeSpace::cast because it fails during deserialization.
+    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
+    this_as_free_space->set_size(size_in_bytes);
   } else if (size_in_bytes == kPointerSize) {
-    set_map(heap->raw_unchecked_one_pointer_filler_map());
+    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
   } else if (size_in_bytes == 2 * kPointerSize) {
-    set_map(heap->raw_unchecked_two_pointer_filler_map());
+    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
   } else {
     UNREACHABLE();
   }
   // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
-  // deserialization because the byte array map is not done yet.
+  // deserialization because the free space map is not done yet.
 }
 
 
-Address FreeListNode::next(Heap* heap) {
+FreeListNode* FreeListNode::next() {
   ASSERT(IsFreeListNode(this));
-  if (map() == heap->raw_unchecked_byte_array_map()) {
-    ASSERT(Size() >= kNextOffset + kPointerSize);
-    return Memory::Address_at(address() + kNextOffset);
+  if (map() == HEAP->raw_unchecked_free_space_map()) {
+    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kNextOffset));
   } else {
-    return Memory::Address_at(address() + kPointerSize);
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kPointerSize));
   }
 }
 
 
-void FreeListNode::set_next(Heap* heap, Address next) {
+FreeListNode** FreeListNode::next_address() {
   ASSERT(IsFreeListNode(this));
-  if (map() == heap->raw_unchecked_byte_array_map()) {
+  if (map() == HEAP->raw_unchecked_free_space_map()) {
     ASSERT(Size() >= kNextOffset + kPointerSize);
-    Memory::Address_at(address() + kNextOffset) = next;
+    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
   } else {
-    Memory::Address_at(address() + kPointerSize) = next;
+    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
   }
 }
 
 
-OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
-  : heap_(heap),
-    owner_(owner) {
+void FreeListNode::set_next(FreeListNode* next) {
+  ASSERT(IsFreeListNode(this));
+  // While we are booting the VM the free space map will actually be null.  So
+  // we have to make sure that we don't try to use it for anything at that
+  // stage.
+  if (map() == HEAP->raw_unchecked_free_space_map()) {
+    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    Memory::Address_at(address() + kNextOffset) =
+        reinterpret_cast<Address>(next);
+  } else {
+    Memory::Address_at(address() + kPointerSize) =
+        reinterpret_cast<Address>(next);
+  }
+}
+
+
+FreeList::FreeList(PagedSpace* owner)
+    : owner_(owner), heap_(owner->heap()) {
   Reset();
 }
 
 
-void OldSpaceFreeList::Reset() {
+void FreeList::Reset() {
   available_ = 0;
-  for (int i = 0; i < kFreeListsLength; i++) {
-    free_[i].head_node_ = NULL;
-  }
-  needs_rebuild_ = false;
-  finger_ = kHead;
-  free_[kHead].next_size_ = kEnd;
+  small_list_ = NULL;
+  medium_list_ = NULL;
+  large_list_ = NULL;
+  huge_list_ = NULL;
 }
 
 
-void OldSpaceFreeList::RebuildSizeList() {
-  ASSERT(needs_rebuild_);
-  int cur = kHead;
-  for (int i = cur + 1; i < kFreeListsLength; i++) {
-    if (free_[i].head_node_ != NULL) {
-      free_[cur].next_size_ = i;
-      cur = i;
-    }
-  }
-  free_[cur].next_size_ = kEnd;
-  needs_rebuild_ = false;
-}
-
-
-int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
-#ifdef DEBUG
-  Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
-#endif
+int FreeList::Free(Address start, int size_in_bytes) {
+  if (size_in_bytes == 0) return 0;
   FreeListNode* node = FreeListNode::FromAddress(start);
   node->set_size(heap_, size_in_bytes);
 
-  // We don't use the freelists in compacting mode.  This makes it more like a
-  // GC that only has mark-sweep-compact and doesn't have a mark-sweep
-  // collector.
-  if (FLAG_always_compact) {
-    return size_in_bytes;
-  }
+  // Early return to drop too-small blocks on the floor.
+  if (size_in_bytes < kSmallListMin) return size_in_bytes;
 
-  // Early return to drop too-small blocks on the floor (one or two word
-  // blocks cannot hold a map pointer, a size field, and a pointer to the
-  // next block in the free list).
-  if (size_in_bytes < kMinBlockSize) {
-    return size_in_bytes;
+  // Insert other blocks at the head of a free list of the appropriate
+  // magnitude.
+  if (size_in_bytes <= kSmallListMax) {
+    node->set_next(small_list_);
+    small_list_ = node;
+  } else if (size_in_bytes <= kMediumListMax) {
+    node->set_next(medium_list_);
+    medium_list_ = node;
+  } else if (size_in_bytes <= kLargeListMax) {
+    node->set_next(large_list_);
+    large_list_ = node;
+  } else {
+    node->set_next(huge_list_);
+    huge_list_ = node;
   }
-
-  // Insert other blocks at the head of an exact free list.
-  int index = size_in_bytes >> kPointerSizeLog2;
-  node->set_next(heap_, free_[index].head_node_);
-  free_[index].head_node_ = node->address();
   available_ += size_in_bytes;
-  needs_rebuild_ = true;
+  ASSERT(IsVeryLong() || available_ == SumFreeLists());
   return 0;
 }
 
 
-MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
-  ASSERT(0 < size_in_bytes);
-  ASSERT(size_in_bytes <= kMaxBlockSize);
-  ASSERT(IsAligned(size_in_bytes, kPointerSize));
+FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
+  FreeListNode* node = *list;
 
-  if (needs_rebuild_) RebuildSizeList();
-  int index = size_in_bytes >> kPointerSizeLog2;
-  // Check for a perfect fit.
-  if (free_[index].head_node_ != NULL) {
-    FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
-    // If this was the last block of its size, remove the size.
-    if ((free_[index].head_node_ = node->next(heap_)) == NULL)
-      RemoveSize(index);
-    available_ -= size_in_bytes;
-    *wasted_bytes = 0;
-    ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
-    return node;
+  if (node == NULL) return NULL;
+
+  while (node != NULL &&
+         Page::FromAddress(node->address())->IsEvacuationCandidate()) {
+    available_ -= node->Size();
+    node = node->next();
   }
-  // Search the size list for the best fit.
-  int prev = finger_ < index ? finger_ : kHead;
-  int cur = FindSize(index, &prev);
-  ASSERT(index < cur);
-  if (cur == kEnd) {
-    // No large enough size in list.
-    *wasted_bytes = 0;
-    return Failure::RetryAfterGC(owner_);
-  }
-  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
-  int rem = cur - index;
-  int rem_bytes = rem << kPointerSizeLog2;
-  FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
-  ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
-  FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
-                                                     size_in_bytes);
-  // Distinguish the cases prev < rem < cur and rem <= prev < cur
-  // to avoid many redundant tests and calls to Insert/RemoveSize.
-  if (prev < rem) {
-    // Simple case: insert rem between prev and cur.
-    finger_ = prev;
-    free_[prev].next_size_ = rem;
-    // If this was the last block of size cur, remove the size.
-    if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
-      free_[rem].next_size_ = free_[cur].next_size_;
-    } else {
-      free_[rem].next_size_ = cur;
-    }
-    // Add the remainder block.
-    rem_node->set_size(heap_, rem_bytes);
-    rem_node->set_next(heap_, free_[rem].head_node_);
-    free_[rem].head_node_ = rem_node->address();
+
+  if (node != NULL) {
+    *node_size = node->Size();
+    *list = node->next();
   } else {
-    // If this was the last block of size cur, remove the size.
-    if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
-      finger_ = prev;
-      free_[prev].next_size_ = free_[cur].next_size_;
-    }
-    if (rem_bytes < kMinBlockSize) {
-      // Too-small remainder is wasted.
-      rem_node->set_size(heap_, rem_bytes);
-      available_ -= size_in_bytes + rem_bytes;
-      *wasted_bytes = rem_bytes;
-      return cur_node;
-    }
-    // Add the remainder block and, if needed, insert its size.
-    rem_node->set_size(heap_, rem_bytes);
-    rem_node->set_next(heap_, free_[rem].head_node_);
-    free_[rem].head_node_ = rem_node->address();
-    if (rem_node->next(heap_) == NULL) InsertSize(rem);
-  }
-  available_ -= size_in_bytes;
-  *wasted_bytes = 0;
-  return cur_node;
-}
-
-
-void OldSpaceFreeList::MarkNodes() {
-  for (int i = 0; i < kFreeListsLength; i++) {
-    Address cur_addr = free_[i].head_node_;
-    while (cur_addr != NULL) {
-      FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
-      cur_addr = cur_node->next(heap_);
-      cur_node->SetMark();
-    }
-  }
-}
-
-
-#ifdef DEBUG
-bool OldSpaceFreeList::Contains(FreeListNode* node) {
-  for (int i = 0; i < kFreeListsLength; i++) {
-    Address cur_addr = free_[i].head_node_;
-    while (cur_addr != NULL) {
-      FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
-      if (cur_node == node) return true;
-      cur_addr = cur_node->next(heap_);
-    }
-  }
-  return false;
-}
-#endif
-
-
-FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
-                                     AllocationSpace owner,
-                                     int object_size)
-    : heap_(heap), owner_(owner), object_size_(object_size) {
-  Reset();
-}
-
-
-void FixedSizeFreeList::Reset() {
-  available_ = 0;
-  head_ = tail_ = NULL;
-}
-
-
-void FixedSizeFreeList::Free(Address start) {
-#ifdef DEBUG
-  Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
-#endif
-  // We only use the freelists with mark-sweep.
-  ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
-  FreeListNode* node = FreeListNode::FromAddress(start);
-  node->set_size(heap_, object_size_);
-  node->set_next(heap_, NULL);
-  if (head_ == NULL) {
-    tail_ = head_ = node->address();
-  } else {
-    FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
-    tail_ = node->address();
-  }
-  available_ += object_size_;
-}
-
-
-MaybeObject* FixedSizeFreeList::Allocate() {
-  if (head_ == NULL) {
-    return Failure::RetryAfterGC(owner_);
+    *list = NULL;
   }
 
-  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
-  FreeListNode* node = FreeListNode::FromAddress(head_);
-  head_ = node->next(heap_);
-  available_ -= object_size_;
   return node;
 }
 
 
-void FixedSizeFreeList::MarkNodes() {
-  Address cur_addr = head_;
-  while (cur_addr != NULL && cur_addr != tail_) {
-    FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
-    cur_addr = cur_node->next(heap_);
-    cur_node->SetMark();
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+  FreeListNode* node = NULL;
+
+  if (size_in_bytes <= kSmallAllocationMax) {
+    node = PickNodeFromList(&small_list_, node_size);
+    if (node != NULL) return node;
+  }
+
+  if (size_in_bytes <= kMediumAllocationMax) {
+    node = PickNodeFromList(&medium_list_, node_size);
+    if (node != NULL) return node;
+  }
+
+  if (size_in_bytes <= kLargeAllocationMax) {
+    node = PickNodeFromList(&large_list_, node_size);
+    if (node != NULL) return node;
+  }
+
+  for (FreeListNode** cur = &huge_list_;
+       *cur != NULL;
+       cur = (*cur)->next_address()) {
+    FreeListNode* cur_node = *cur;
+    while (cur_node != NULL &&
+           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
+      available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
+      cur_node = cur_node->next();
+    }
+
+    *cur = cur_node;
+    if (cur_node == NULL) break;
+
+    ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
+    int size = cur_as_free_space->Size();
+    if (size >= size_in_bytes) {
+      // Large enough node found.  Unlink it from the list.
+      node = *cur;
+      *node_size = size;
+      *cur = node->next();
+      break;
+    }
+  }
+
+  return node;
+}
+
+
+// Allocation on the old space free list.  If it succeeds then a new linear
+// allocation space has been set up with the top and limit of the space.  If
+// the allocation fails then NULL is returned, and the caller can perform a GC
+// or allocate a new page before retrying.
+HeapObject* FreeList::Allocate(int size_in_bytes) {
+  ASSERT(0 < size_in_bytes);
+  ASSERT(size_in_bytes <= kMaxBlockSize);
+  ASSERT(IsAligned(size_in_bytes, kPointerSize));
+  // Don't free list allocate if there is linear space available.
+  ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
+
+  int new_node_size = 0;
+  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+  if (new_node == NULL) return NULL;
+
+  available_ -= new_node_size;
+  ASSERT(IsVeryLong() || available_ == SumFreeLists());
+
+  int bytes_left = new_node_size - size_in_bytes;
+  ASSERT(bytes_left >= 0);
+
+  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
+  // Mark the old linear allocation area with a free space map so it can be
+  // skipped when scanning the heap.  This also puts it back in the free list
+  // if it is big enough.
+  owner_->Free(owner_->top(), old_linear_size);
+
+#ifdef DEBUG
+  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+    reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
+  }
+#endif
+
+  owner_->heap()->incremental_marking()->OldSpaceStep(
+      size_in_bytes - old_linear_size);
+
+  // The old-space-step might have finished sweeping and restarted marking.
+  // Verify that it did not turn the page of the new node into an evacuation
+  // candidate.
+  ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+
+  // Memory in the linear allocation area is counted as allocated.  We may free
+  // a little of this again immediately - see below.
+  owner_->Allocate(new_node_size);
+
+  if (bytes_left > kThreshold &&
+      owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
+      FLAG_incremental_marking_steps) {
+    int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+    // We don't want to give too large linear areas to the allocator while
+    // incremental marking is going on, because we won't check again whether
+    // we want to do another increment until the linear area is used up.
+    owner_->Free(new_node->address() + size_in_bytes + linear_size,
+                 new_node_size - size_in_bytes - linear_size);
+    owner_->SetTop(new_node->address() + size_in_bytes,
+                   new_node->address() + size_in_bytes + linear_size);
+  } else if (bytes_left > 0) {
+    // Normally we give the rest of the node to the allocator as its new
+    // linear allocation area.
+    owner_->SetTop(new_node->address() + size_in_bytes,
+                   new_node->address() + new_node_size);
+  } else {
+    // TODO(gc) Try not freeing linear allocation region when bytes_left
+    // are zero.
+    owner_->SetTop(NULL, NULL);
+  }
+
+  return new_node;
+}
+
+
+static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
+  intptr_t sum = 0;
+  while (n != NULL) {
+    if (Page::FromAddress(n->address()) == p) {
+      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
+      sum += free_space->Size();
+    }
+    n = n->next();
+  }
+  return sum;
+}
+
+
+void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
+  sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
+  if (sizes->huge_size_ < p->area_size()) {
+    sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
+    sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
+    sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
+  } else {
+    sizes->small_size_ = 0;
+    sizes->medium_size_ = 0;
+    sizes->large_size_ = 0;
   }
 }
 
 
+static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
+  intptr_t sum = 0;
+  while (*n != NULL) {
+    if (Page::FromAddress((*n)->address()) == p) {
+      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+      sum += free_space->Size();
+      *n = (*n)->next();
+    } else {
+      n = (*n)->next_address();
+    }
+  }
+  return sum;
+}
+
+
+intptr_t FreeList::EvictFreeListItems(Page* p) {
+  intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
+
+  if (sum < p->area_size()) {
+    sum += EvictFreeListItemsInList(&small_list_, p) +
+        EvictFreeListItemsInList(&medium_list_, p) +
+        EvictFreeListItemsInList(&large_list_, p);
+  }
+
+  available_ -= static_cast<int>(sum);
+
+  return sum;
+}
+
+
+#ifdef DEBUG
+intptr_t FreeList::SumFreeList(FreeListNode* cur) {
+  intptr_t sum = 0;
+  while (cur != NULL) {
+    ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
+    sum += cur_as_free_space->Size();
+    cur = cur->next();
+  }
+  return sum;
+}
+
+
+static const int kVeryLongFreeList = 500;
+
+
+int FreeList::FreeListLength(FreeListNode* cur) {
+  int length = 0;
+  while (cur != NULL) {
+    length++;
+    cur = cur->next();
+    if (length == kVeryLongFreeList) return length;
+  }
+  return length;
+}
+
+
+bool FreeList::IsVeryLong() {
+  if (FreeListLength(small_list_) == kVeryLongFreeList) return  true;
+  if (FreeListLength(medium_list_) == kVeryLongFreeList) return  true;
+  if (FreeListLength(large_list_) == kVeryLongFreeList) return  true;
+  if (FreeListLength(huge_list_) == kVeryLongFreeList) return  true;
+  return false;
+}
+
+
+// This can take a very long time because it is linear in the number of entries
+// on the free list, so it should not be called if FreeListLength returns
+// kVeryLongFreeList.
+intptr_t FreeList::SumFreeLists() {
+  intptr_t sum = SumFreeList(small_list_);
+  sum += SumFreeList(medium_list_);
+  sum += SumFreeList(large_list_);
+  sum += SumFreeList(huge_list_);
+  return sum;
+}
+#endif
+
+
 // -----------------------------------------------------------------------------
 // OldSpace implementation
 
-void OldSpace::PrepareForMarkCompact(bool will_compact) {
-  // Call prepare of the super class.
-  PagedSpace::PrepareForMarkCompact(will_compact);
-
-  if (will_compact) {
-    // Reset relocation info.  During a compacting collection, everything in
-    // the space is considered 'available' and we will rediscover live data
-    // and waste during the collection.
-    MCResetRelocationInfo();
-    ASSERT(Available() == Capacity());
-  } else {
-    // During a non-compacting collection, everything below the linear
-    // allocation pointer is considered allocated (everything above is
-    // available) and we will rediscover available and wasted bytes during
-    // the collection.
-    accounting_stats_.AllocateBytes(free_list_.available());
-    accounting_stats_.FillWastedBytes(Waste());
+bool NewSpace::ReserveSpace(int bytes) {
+  // We can't reliably unpack a partial snapshot that needs more new space
+  // space than the minimum NewSpace size.  The limit can be set lower than
+  // the end of new space either because there is more space on the next page
+  // or because we have lowered the limit in order to get periodic incremental
+  // marking.  The most reliable way to ensure that there is linear space is
+  // to do the allocation, then rewind the limit.
+  ASSERT(bytes <= InitialCapacity());
+  MaybeObject* maybe = AllocateRaw(bytes);
+  Object* object = NULL;
+  if (!maybe->ToObject(&object)) return false;
+  HeapObject* allocation = HeapObject::cast(object);
+  Address top = allocation_info_.top;
+  if ((top - bytes) == allocation->address()) {
+    allocation_info_.top = allocation->address();
+    return true;
   }
+  // There may be a borderline case here where the allocation succeeded, but
+  // the limit and top have moved on to a new page.  In that case we try again.
+  return ReserveSpace(bytes);
+}
+
+
+void PagedSpace::PrepareForMarkCompact() {
+  // We don't have a linear allocation area while sweeping.  It will be restored
+  // on the first allocation after the sweep.
+  // Mark the old linear allocation area with a free space map so it can be
+  // skipped when scanning the heap.
+  int old_linear_size = static_cast<int>(limit() - top());
+  Free(top(), old_linear_size);
+  SetTop(NULL, NULL);
+
+  // Stop lazy sweeping and clear marking bits for unswept pages.
+  if (first_unswept_page_ != NULL) {
+    Page* p = first_unswept_page_;
+    do {
+      // Do not use ShouldBeSweptLazily predicate here.
+      // New evacuation candidates were selected but they still have
+      // to be swept before collection starts.
+      if (!p->WasSwept()) {
+        Bitmap::Clear(p);
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+      }
+      p = p->next_page();
+    } while (p != anchor());
+  }
+  first_unswept_page_ = Page::FromAddress(NULL);
+  unswept_free_bytes_ = 0;
 
   // Clear the free list before a full GC---it will be rebuilt afterward.
   free_list_.Reset();
 }
 
 
-void OldSpace::MCCommitRelocationInfo() {
-  // Update fast allocation info.
-  allocation_info_.top = mc_forwarding_info_.top;
-  allocation_info_.limit = mc_forwarding_info_.limit;
-  ASSERT(allocation_info_.VerifyPagedAllocation());
+bool PagedSpace::ReserveSpace(int size_in_bytes) {
+  ASSERT(size_in_bytes <= AreaSize());
+  ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
+  Address current_top = allocation_info_.top;
+  Address new_top = current_top + size_in_bytes;
+  if (new_top <= allocation_info_.limit) return true;
 
-  // The space is compacted and we haven't yet built free lists or
-  // wasted any space.
-  ASSERT(Waste() == 0);
-  ASSERT(AvailableFree() == 0);
+  HeapObject* new_area = free_list_.Allocate(size_in_bytes);
+  if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
+  if (new_area == NULL) return false;
 
-  // Build the free list for the space.
-  int computed_size = 0;
-  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
-  while (it.has_next()) {
-    Page* p = it.next();
-    // Space below the relocation pointer is allocated.
-    computed_size +=
-        static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
-    if (it.has_next()) {
-      // Free the space at the top of the page.
-      int extra_size =
-          static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
-      if (extra_size > 0) {
-        int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
-                                           extra_size);
-        // The bytes we have just "freed" to add to the free list were
-        // already accounted as available.
-        accounting_stats_.WasteBytes(wasted_bytes);
-      }
-    }
-  }
+  int old_linear_size = static_cast<int>(limit() - top());
+  // Mark the old linear allocation area with a free space so it can be
+  // skipped when scanning the heap.  This also puts it back in the free list
+  // if it is big enough.
+  Free(top(), old_linear_size);
 
-  // Make sure the computed size - based on the used portion of the pages in
-  // use - matches the size obtained while computing forwarding addresses.
-  ASSERT(computed_size == Size());
-}
-
-
-bool NewSpace::ReserveSpace(int bytes) {
-  // We can't reliably unpack a partial snapshot that needs more new space
-  // space than the minimum NewSpace size.
-  ASSERT(bytes <= InitialCapacity());
-  Address limit = allocation_info_.limit;
-  Address top = allocation_info_.top;
-  return limit - top >= bytes;
-}
-
-
-void PagedSpace::FreePages(Page* prev, Page* last) {
-  if (last == AllocationTopPage()) {
-    // Pages are already at the end of used pages.
-    return;
-  }
-
-  Page* first = NULL;
-
-  // Remove pages from the list.
-  if (prev == NULL) {
-    first = first_page_;
-    first_page_ = last->next_page();
-  } else {
-    first = prev->next_page();
-    heap()->isolate()->memory_allocator()->SetNextPage(
-        prev, last->next_page());
-  }
-
-  // Attach it after the last page.
-  heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
-  last_page_ = last;
-  heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
-
-  // Clean them up.
-  do {
-    first->InvalidateWatermark(true);
-    first->SetAllocationWatermark(first->ObjectAreaStart());
-    first->SetCachedAllocationWatermark(first->ObjectAreaStart());
-    first->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    first = first->next_page();
-  } while (first != NULL);
-
-  // Order of pages in this space might no longer be consistent with
-  // order of pages in chunks.
-  page_list_is_chunk_ordered_ = false;
-}
-
-
-void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
-  const bool add_to_freelist = true;
-
-  // Mark used and unused pages to properly fill unused pages
-  // after reordering.
-  PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
-  Page* last_in_use = AllocationTopPage();
-  bool in_use = true;
-
-  while (all_pages_iterator.has_next()) {
-    Page* p = all_pages_iterator.next();
-    p->SetWasInUseBeforeMC(in_use);
-    if (p == last_in_use) {
-      // We passed a page containing allocation top. All consequent
-      // pages are not used.
-      in_use = false;
-    }
-  }
-
-  if (page_list_is_chunk_ordered_) return;
-
-  Page* new_last_in_use = Page::FromAddress(NULL);
-  heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
-      this, &first_page_, &last_page_, &new_last_in_use);
-  ASSERT(new_last_in_use->is_valid());
-
-  if (new_last_in_use != last_in_use) {
-    // Current allocation top points to a page which is now in the middle
-    // of page list. We should move allocation top forward to the new last
-    // used page so various object iterators will continue to work properly.
-    int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
-                                         last_in_use->AllocationTop());
-
-    last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
-    if (size_in_bytes > 0) {
-      Address start = last_in_use->AllocationTop();
-      if (deallocate_blocks) {
-        accounting_stats_.AllocateBytes(size_in_bytes);
-        DeallocateBlock(start, size_in_bytes, add_to_freelist);
-      } else {
-        heap()->CreateFillerObjectAt(start, size_in_bytes);
-      }
-    }
-
-    // New last in use page was in the middle of the list before
-    // sorting so it full.
-    SetTop(new_last_in_use->AllocationTop());
-
-    ASSERT(AllocationTopPage() == new_last_in_use);
-    ASSERT(AllocationTopPage()->WasInUseBeforeMC());
-  }
-
-  PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
-  while (pages_in_use_iterator.has_next()) {
-    Page* p = pages_in_use_iterator.next();
-    if (!p->WasInUseBeforeMC()) {
-      // Empty page is in the middle of a sequence of used pages.
-      // Allocate it as a whole and deallocate immediately.
-      int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
-                                           p->ObjectAreaStart());
-
-      p->SetAllocationWatermark(p->ObjectAreaStart());
-      Address start = p->ObjectAreaStart();
-      if (deallocate_blocks) {
-        accounting_stats_.AllocateBytes(size_in_bytes);
-        DeallocateBlock(start, size_in_bytes, add_to_freelist);
-      } else {
-        heap()->CreateFillerObjectAt(start, size_in_bytes);
-      }
-    }
-  }
-
-  page_list_is_chunk_ordered_ = true;
-}
-
-
-void PagedSpace::PrepareForMarkCompact(bool will_compact) {
-  if (will_compact) {
-    RelinkPageListInChunkOrder(false);
-  }
-}
-
-
-bool PagedSpace::ReserveSpace(int bytes) {
-  Address limit = allocation_info_.limit;
-  Address top = allocation_info_.top;
-  if (limit - top >= bytes) return true;
-
-  // There wasn't enough space in the current page.  Lets put the rest
-  // of the page on the free list and start a fresh page.
-  PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
-
-  Page* reserved_page = TopPageOf(allocation_info_);
-  int bytes_left_to_reserve = bytes;
-  while (bytes_left_to_reserve > 0) {
-    if (!reserved_page->next_page()->is_valid()) {
-      if (heap()->OldGenerationAllocationLimitReached()) return false;
-      Expand(reserved_page);
-    }
-    bytes_left_to_reserve -= Page::kPageSize;
-    reserved_page = reserved_page->next_page();
-    if (!reserved_page->is_valid()) return false;
-  }
-  ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
-  TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
-  SetAllocationInfo(&allocation_info_,
-                    TopPageOf(allocation_info_)->next_page());
+  SetTop(new_area->address(), new_area->address() + size_in_bytes);
+  Allocate(size_in_bytes);
   return true;
 }
 
@@ -2202,47 +2220,70 @@
 // You have to call this last, since the implementation from PagedSpace
 // doesn't know that memory was 'promised' to large object space.
 bool LargeObjectSpace::ReserveSpace(int bytes) {
-  return heap()->OldGenerationSpaceAvailable() >= bytes;
+  return heap()->OldGenerationCapacityAvailable() >= bytes &&
+         (!heap()->incremental_marking()->IsStopped() ||
+           heap()->OldGenerationSpaceAvailable() >= bytes);
 }
 
 
-// Slow case for normal allocation.  Try in order: (1) allocate in the next
-// page in the space, (2) allocate off the space's free list, (3) expand the
-// space, (4) fail.
-HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
-  // Linear allocation in this space has failed.  If there is another page
-  // in the space, move to that page and allocate there.  This allocation
-  // should succeed (size_in_bytes should not be greater than a page's
-  // object area size).
-  Page* current_page = TopPageOf(allocation_info_);
-  if (current_page->next_page()->is_valid()) {
-    return AllocateInNextPage(current_page, size_in_bytes);
+bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
+  if (IsSweepingComplete()) return true;
+
+  intptr_t freed_bytes = 0;
+  Page* p = first_unswept_page_;
+  do {
+    Page* next_page = p->next_page();
+    if (ShouldBeSweptLazily(p)) {
+      if (FLAG_gc_verbose) {
+        PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
+               reinterpret_cast<intptr_t>(p));
+      }
+      DecreaseUnsweptFreeBytes(p);
+      freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
+    }
+    p = next_page;
+  } while (p != anchor() && freed_bytes < bytes_to_sweep);
+
+  if (p == anchor()) {
+    first_unswept_page_ = Page::FromAddress(NULL);
+  } else {
+    first_unswept_page_ = p;
   }
 
-  // There is no next page in this space.  Try free list allocation unless that
-  // is currently forbidden.
-  if (!heap()->linear_allocation()) {
-    int wasted_bytes;
-    Object* result;
-    MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
-    accounting_stats_.WasteBytes(wasted_bytes);
-    if (maybe->ToObject(&result)) {
-      accounting_stats_.AllocateBytes(size_in_bytes);
+  heap()->LowerOldGenLimits(freed_bytes);
 
-      HeapObject* obj = HeapObject::cast(result);
-      Page* p = Page::FromAddress(obj->address());
+  heap()->FreeQueuedChunks();
 
-      if (obj->address() >= p->AllocationWatermark()) {
-        // There should be no hole between the allocation watermark
-        // and allocated object address.
-        // Memory above the allocation watermark was not swept and
-        // might contain garbage pointers to new space.
-        ASSERT(obj->address() == p->AllocationWatermark());
-        p->SetAllocationWatermark(obj->address() + size_in_bytes);
-      }
+  return IsSweepingComplete();
+}
 
-      return obj;
-    }
+
+void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
+  if (allocation_info_.top >= allocation_info_.limit) return;
+
+  if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
+    // Create filler object to keep page iterable if it was iterable.
+    int remaining =
+        static_cast<int>(allocation_info_.limit - allocation_info_.top);
+    heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
+
+    allocation_info_.top = NULL;
+    allocation_info_.limit = NULL;
+  }
+}
+
+
+HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+  // Allocation in this space has failed.
+
+  // If there are unswept pages advance lazy sweeper then sweep one page before
+  // allocating a new page.
+  if (first_unswept_page_->is_valid()) {
+    AdvanceSweeper(size_in_bytes);
+
+    // Retry the free list allocation.
+    HeapObject* object = free_list_.Allocate(size_in_bytes);
+    if (object != NULL) return object;
   }
 
   // Free list allocation failed and there is no next page.  Fail if we have
@@ -2254,9 +2295,18 @@
   }
 
   // Try to expand the space and allocate in the new next page.
-  ASSERT(!current_page->next_page()->is_valid());
-  if (Expand(current_page)) {
-    return AllocateInNextPage(current_page, size_in_bytes);
+  if (Expand()) {
+    return free_list_.Allocate(size_in_bytes);
+  }
+
+  // Last ditch, sweep all the remaining pages to try to find space.  This may
+  // cause a pause.
+  if (!IsSweepingComplete()) {
+    AdvanceSweeper(kMaxInt);
+
+    // Retry the free list allocation.
+    HeapObject* object = free_list_.Allocate(size_in_bytes);
+    if (object != NULL) return object;
   }
 
   // Finally, fail.
@@ -2264,53 +2314,6 @@
 }
 
 
-void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
-  current_page->SetAllocationWatermark(allocation_info_.top);
-  int free_size =
-      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
-  if (free_size > 0) {
-    int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
-    accounting_stats_.WasteBytes(wasted_bytes);
-  }
-}
-
-
-void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
-  current_page->SetAllocationWatermark(allocation_info_.top);
-  int free_size =
-      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
-  // In the fixed space free list all the free list items have the right size.
-  // We use up the rest of the page while preserving this invariant.
-  while (free_size >= object_size_in_bytes_) {
-    free_list_.Free(allocation_info_.top);
-    allocation_info_.top += object_size_in_bytes_;
-    free_size -= object_size_in_bytes_;
-    accounting_stats_.WasteBytes(object_size_in_bytes_);
-  }
-}
-
-
-// Add the block at the top of the page to the space's free list, set the
-// allocation info to the next page (assumed to be one), and allocate
-// linearly there.
-HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
-                                         int size_in_bytes) {
-  ASSERT(current_page->next_page()->is_valid());
-  Page* next_page = current_page->next_page();
-  next_page->ClearGCFields();
-  PutRestOfCurrentPageOnFreeList(current_page);
-  SetAllocationInfo(&allocation_info_, next_page);
-  return AllocateLinearly(&allocation_info_, size_in_bytes);
-}
-
-
-void OldSpace::DeallocateBlock(Address start,
-                                 int size_in_bytes,
-                                 bool add_to_freelist) {
-  Free(start, size_in_bytes, add_to_freelist);
-}
-
-
 #ifdef DEBUG
 void PagedSpace::ReportCodeStatistics() {
   Isolate* isolate = Isolate::Current();
@@ -2413,7 +2416,7 @@
 void PagedSpace::CollectCodeStatistics() {
   Isolate* isolate = heap()->isolate();
   HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       isolate->code_kind_statistics()[code->kind()] += code->Size();
@@ -2438,16 +2441,17 @@
 }
 
 
-void OldSpace::ReportStatistics() {
+void PagedSpace::ReportStatistics() {
   int pct = static_cast<int>(Available() * 100 / Capacity());
   PrintF("  capacity: %" V8_PTR_PREFIX "d"
              ", waste: %" V8_PTR_PREFIX "d"
              ", available: %" V8_PTR_PREFIX "d, %%%d\n",
          Capacity(), Waste(), Available(), pct);
 
+  if (was_swept_conservatively_) return;
   ClearHistograms();
   HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
     CollectHistogramInfo(obj);
   ReportHistogram(true);
 }
@@ -2456,192 +2460,28 @@
 // -----------------------------------------------------------------------------
 // FixedSpace implementation
 
-void FixedSpace::PrepareForMarkCompact(bool will_compact) {
+void FixedSpace::PrepareForMarkCompact() {
   // Call prepare of the super class.
-  PagedSpace::PrepareForMarkCompact(will_compact);
+  PagedSpace::PrepareForMarkCompact();
 
-  if (will_compact) {
-    // Reset relocation info.
-    MCResetRelocationInfo();
-
-    // During a compacting collection, everything in the space is considered
-    // 'available' (set by the call to MCResetRelocationInfo) and we will
-    // rediscover live and wasted bytes during the collection.
-    ASSERT(Available() == Capacity());
-  } else {
-    // During a non-compacting collection, everything below the linear
-    // allocation pointer except wasted top-of-page blocks is considered
-    // allocated and we will rediscover available bytes during the
-    // collection.
-    accounting_stats_.AllocateBytes(free_list_.available());
-  }
+  // During a non-compacting collection, everything below the linear
+  // allocation pointer except wasted top-of-page blocks is considered
+  // allocated and we will rediscover available bytes during the
+  // collection.
+  accounting_stats_.AllocateBytes(free_list_.available());
 
   // Clear the free list before a full GC---it will be rebuilt afterward.
   free_list_.Reset();
 }
 
 
-void FixedSpace::MCCommitRelocationInfo() {
-  // Update fast allocation info.
-  allocation_info_.top = mc_forwarding_info_.top;
-  allocation_info_.limit = mc_forwarding_info_.limit;
-  ASSERT(allocation_info_.VerifyPagedAllocation());
-
-  // The space is compacted and we haven't yet wasted any space.
-  ASSERT(Waste() == 0);
-
-  // Update allocation_top of each page in use and compute waste.
-  int computed_size = 0;
-  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
-  while (it.has_next()) {
-    Page* page = it.next();
-    Address page_top = page->AllocationTop();
-    computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
-    if (it.has_next()) {
-      accounting_stats_.WasteBytes(
-          static_cast<int>(page->ObjectAreaEnd() - page_top));
-      page->SetAllocationWatermark(page_top);
-    }
-  }
-
-  // Make sure the computed size - based on the used portion of the
-  // pages in use - matches the size we adjust during allocation.
-  ASSERT(computed_size == Size());
-}
-
-
-// Slow case for normal allocation. Try in order: (1) allocate in the next
-// page in the space, (2) allocate off the space's free list, (3) expand the
-// space, (4) fail.
-HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
-  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
-  // Linear allocation in this space has failed.  If there is another page
-  // in the space, move to that page and allocate there.  This allocation
-  // should succeed.
-  Page* current_page = TopPageOf(allocation_info_);
-  if (current_page->next_page()->is_valid()) {
-    return AllocateInNextPage(current_page, size_in_bytes);
-  }
-
-  // There is no next page in this space.  Try free list allocation unless
-  // that is currently forbidden.  The fixed space free list implicitly assumes
-  // that all free blocks are of the fixed size.
-  if (!heap()->linear_allocation()) {
-    Object* result;
-    MaybeObject* maybe = free_list_.Allocate();
-    if (maybe->ToObject(&result)) {
-      accounting_stats_.AllocateBytes(size_in_bytes);
-      HeapObject* obj = HeapObject::cast(result);
-      Page* p = Page::FromAddress(obj->address());
-
-      if (obj->address() >= p->AllocationWatermark()) {
-        // There should be no hole between the allocation watermark
-        // and allocated object address.
-        // Memory above the allocation watermark was not swept and
-        // might contain garbage pointers to new space.
-        ASSERT(obj->address() == p->AllocationWatermark());
-        p->SetAllocationWatermark(obj->address() + size_in_bytes);
-      }
-
-      return obj;
-    }
-  }
-
-  // Free list allocation failed and there is no next page.  Fail if we have
-  // hit the old generation size limit that should cause a garbage
-  // collection.
-  if (!heap()->always_allocate() &&
-      heap()->OldGenerationAllocationLimitReached()) {
-    return NULL;
-  }
-
-  // Try to expand the space and allocate in the new next page.
-  ASSERT(!current_page->next_page()->is_valid());
-  if (Expand(current_page)) {
-    return AllocateInNextPage(current_page, size_in_bytes);
-  }
-
-  // Finally, fail.
-  return NULL;
-}
-
-
-// Move to the next page (there is assumed to be one) and allocate there.
-// The top of page block is always wasted, because it is too small to hold a
-// map.
-HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
-                                           int size_in_bytes) {
-  ASSERT(current_page->next_page()->is_valid());
-  ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
-  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
-  Page* next_page = current_page->next_page();
-  next_page->ClearGCFields();
-  current_page->SetAllocationWatermark(allocation_info_.top);
-  accounting_stats_.WasteBytes(page_extra_);
-  SetAllocationInfo(&allocation_info_, next_page);
-  return AllocateLinearly(&allocation_info_, size_in_bytes);
-}
-
-
-void FixedSpace::DeallocateBlock(Address start,
-                                 int size_in_bytes,
-                                 bool add_to_freelist) {
-  // Free-list elements in fixed space are assumed to have a fixed size.
-  // We break the free block into chunks and add them to the free list
-  // individually.
-  int size = object_size_in_bytes();
-  ASSERT(size_in_bytes % size == 0);
-  Address end = start + size_in_bytes;
-  for (Address a = start; a < end; a += size) {
-    Free(a, add_to_freelist);
-  }
-}
-
-
-#ifdef DEBUG
-void FixedSpace::ReportStatistics() {
-  int pct = static_cast<int>(Available() * 100 / Capacity());
-  PrintF("  capacity: %" V8_PTR_PREFIX "d"
-             ", waste: %" V8_PTR_PREFIX "d"
-             ", available: %" V8_PTR_PREFIX "d, %%%d\n",
-         Capacity(), Waste(), Available(), pct);
-
-  ClearHistograms();
-  HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
-    CollectHistogramInfo(obj);
-  ReportHistogram(false);
-}
-#endif
-
-
 // -----------------------------------------------------------------------------
 // MapSpace implementation
 
-void MapSpace::PrepareForMarkCompact(bool will_compact) {
-  // Call prepare of the super class.
-  FixedSpace::PrepareForMarkCompact(will_compact);
-
-  if (will_compact) {
-    // Initialize map index entry.
-    int page_count = 0;
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    while (it.has_next()) {
-      ASSERT_MAP_PAGE_INDEX(page_count);
-
-      Page* p = it.next();
-      ASSERT(p->mc_page_index == page_count);
-
-      page_addresses_[page_count++] = p->address();
-    }
-  }
-}
-
-
 #ifdef DEBUG
 void MapSpace::VerifyObject(HeapObject* object) {
   // The object should be a map or a free-list node.
-  ASSERT(object->IsMap() || object->IsByteArray());
+  ASSERT(object->IsMap() || object->IsFreeSpace());
 }
 #endif
 
@@ -2662,129 +2502,73 @@
 // LargeObjectIterator
 
 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
-  current_ = space->first_chunk_;
+  current_ = space->first_page_;
   size_func_ = NULL;
 }
 
 
 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
                                          HeapObjectCallback size_func) {
-  current_ = space->first_chunk_;
+  current_ = space->first_page_;
   size_func_ = size_func;
 }
 
 
-HeapObject* LargeObjectIterator::next() {
+HeapObject* LargeObjectIterator::Next() {
   if (current_ == NULL) return NULL;
 
   HeapObject* object = current_->GetObject();
-  current_ = current_->next();
+  current_ = current_->next_page();
   return object;
 }
 
 
 // -----------------------------------------------------------------------------
-// LargeObjectChunk
-
-LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
-                                        Executability executable) {
-  size_t requested = ChunkSizeFor(size_in_bytes);
-  size_t size;
-  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
-  Isolate* isolate = Isolate::Current();
-  void* mem = isolate->memory_allocator()->AllocateRawMemory(
-      requested + guard_size, &size, executable);
-  if (mem == NULL) return NULL;
-
-  // The start of the chunk may be overlayed with a page so we have to
-  // make sure that the page flags fit in the size field.
-  ASSERT((size & Page::kPageFlagMask) == 0);
-
-  LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
-  if (size < requested + guard_size) {
-    isolate->memory_allocator()->FreeRawMemory(
-        mem, size, executable);
-    LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
-    return NULL;
-  }
-
-  if (guard_size != 0) {
-    OS::Guard(mem, guard_size);
-    size -= guard_size;
-    mem = static_cast<Address>(mem) + guard_size;
-  }
-
-  ObjectSpace space = (executable == EXECUTABLE)
-      ? kObjectSpaceCodeSpace
-      : kObjectSpaceLoSpace;
-  isolate->memory_allocator()->PerformAllocationCallback(
-      space, kAllocationActionAllocate, size);
-
-  LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
-  chunk->size_ = size;
-  chunk->GetPage()->heap_ = isolate->heap();
-  return chunk;
-}
-
-
-void LargeObjectChunk::Free(Executability executable) {
-  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
-  ObjectSpace space =
-      (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
-  // Do not access instance fields after FreeRawMemory!
-  Address my_address = address();
-  size_t my_size = size();
-  Isolate* isolate = GetPage()->heap_->isolate();
-  MemoryAllocator* a = isolate->memory_allocator();
-  a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable);
-  a->PerformAllocationCallback(space, kAllocationActionFree, my_size);
-  LOG(isolate, DeleteEvent("LargeObjectChunk", my_address));
-}
-
-
-int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
-  int os_alignment = static_cast<int>(OS::AllocateAlignment());
-  if (os_alignment < Page::kPageSize) {
-    size_in_bytes += (Page::kPageSize - os_alignment);
-  }
-  return size_in_bytes + Page::kObjectStartOffset;
-}
-
-// -----------------------------------------------------------------------------
 // LargeObjectSpace
+static bool ComparePointers(void* key1, void* key2) {
+    return key1 == key2;
+}
 
-LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
+
+LargeObjectSpace::LargeObjectSpace(Heap* heap,
+                                   intptr_t max_capacity,
+                                   AllocationSpace id)
     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
-      first_chunk_(NULL),
+      max_capacity_(max_capacity),
+      first_page_(NULL),
       size_(0),
       page_count_(0),
-      objects_size_(0) {}
+      objects_size_(0),
+      chunk_map_(ComparePointers, 1024) {}
 
 
-bool LargeObjectSpace::Setup() {
-  first_chunk_ = NULL;
+bool LargeObjectSpace::SetUp() {
+  first_page_ = NULL;
   size_ = 0;
   page_count_ = 0;
   objects_size_ = 0;
+  chunk_map_.Clear();
   return true;
 }
 
 
 void LargeObjectSpace::TearDown() {
-  while (first_chunk_ != NULL) {
-    LargeObjectChunk* chunk = first_chunk_;
-    first_chunk_ = first_chunk_->next();
-    chunk->Free(chunk->GetPage()->PageExecutability());
+  while (first_page_ != NULL) {
+    LargePage* page = first_page_;
+    first_page_ = first_page_->next_page();
+    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
+
+    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
+    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
+        space, kAllocationActionFree, page->size());
+    heap()->isolate()->memory_allocator()->Free(page);
   }
-  Setup();
+  SetUp();
 }
 
 
-MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
-                                                   int object_size,
-                                                   Executability executable) {
-  ASSERT(0 < object_size && object_size <= requested_size);
-
+MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
+                                           Executability executable) {
   // Check if we want to force a GC before growing the old space further.
   // If so, fail the allocation.
   if (!heap()->always_allocate() &&
@@ -2792,190 +2576,136 @@
     return Failure::RetryAfterGC(identity());
   }
 
-  LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
-  if (chunk == NULL) {
+  if (Size() + object_size > max_capacity_) {
     return Failure::RetryAfterGC(identity());
   }
 
-  size_ += static_cast<int>(chunk->size());
-  objects_size_ += requested_size;
+  LargePage* page = heap()->isolate()->memory_allocator()->
+      AllocateLargePage(object_size, executable, this);
+  if (page == NULL) return Failure::RetryAfterGC(identity());
+  ASSERT(page->area_size() >= object_size);
+
+  size_ += static_cast<int>(page->size());
+  objects_size_ += object_size;
   page_count_++;
-  chunk->set_next(first_chunk_);
-  first_chunk_ = chunk;
+  page->set_next_page(first_page_);
+  first_page_ = page;
 
-  // Initialize page header.
-  Page* page = chunk->GetPage();
-  Address object_address = page->ObjectAreaStart();
+  // Register all MemoryChunk::kAlignment-aligned chunks covered by
+  // this large page in the chunk map.
+  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
+  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
+  for (uintptr_t key = base; key <= limit; key++) {
+    HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+                                              static_cast<uint32_t>(key),
+                                              true);
+    ASSERT(entry != NULL);
+    entry->value = page;
+  }
 
-  // Clear the low order bit of the second word in the page to flag it as a
-  // large object page.  If the chunk_size happened to be written there, its
-  // low order bit should already be clear.
-  page->SetIsLargeObjectPage(true);
-  page->SetPageExecutability(executable);
-  page->SetRegionMarks(Page::kAllRegionsCleanMarks);
-  return HeapObject::FromAddress(object_address);
-}
+  HeapObject* object = page->GetObject();
 
+#ifdef DEBUG
+  // Make the object consistent so the heap can be vefified in OldSpaceStep.
+  reinterpret_cast<Object**>(object->address())[0] =
+      heap()->fixed_array_map();
+  reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+#endif
 
-MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
-  ASSERT(0 < size_in_bytes);
-  return AllocateRawInternal(size_in_bytes,
-                             size_in_bytes,
-                             EXECUTABLE);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
-  ASSERT(0 < size_in_bytes);
-  return AllocateRawInternal(size_in_bytes,
-                             size_in_bytes,
-                             NOT_EXECUTABLE);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
-  ASSERT(0 < size_in_bytes);
-  return AllocateRawInternal(size_in_bytes,
-                             size_in_bytes,
-                             NOT_EXECUTABLE);
+  heap()->incremental_marking()->OldSpaceStep(object_size);
+  return object;
 }
 
 
 // GC support
 MaybeObject* LargeObjectSpace::FindObject(Address a) {
-  for (LargeObjectChunk* chunk = first_chunk_;
-       chunk != NULL;
-       chunk = chunk->next()) {
-    Address chunk_address = chunk->address();
-    if (chunk_address <= a && a < chunk_address + chunk->size()) {
-      return chunk->GetObject();
-    }
+  LargePage* page = FindPage(a);
+  if (page != NULL) {
+    return page->GetObject();
   }
   return Failure::Exception();
 }
 
 
-LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
-  // TODO(853): Change this implementation to only find executable
-  // chunks and use some kind of hash-based approach to speed it up.
-  for (LargeObjectChunk* chunk = first_chunk_;
-       chunk != NULL;
-       chunk = chunk->next()) {
-    Address chunk_address = chunk->address();
-    if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
-      return chunk;
+LargePage* LargeObjectSpace::FindPage(Address a) {
+  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
+  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+                                        static_cast<uint32_t>(key),
+                                        false);
+  if (e != NULL) {
+    ASSERT(e->value != NULL);
+    LargePage* page = reinterpret_cast<LargePage*>(e->value);
+    ASSERT(page->is_valid());
+    if (page->Contains(a)) {
+      return page;
     }
   }
   return NULL;
 }
 
 
-void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
-  LargeObjectIterator it(this);
-  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
-    // We only have code, sequential strings, or fixed arrays in large
-    // object space, and only fixed arrays can possibly contain pointers to
-    // the young generation.
-    if (object->IsFixedArray()) {
-      Page* page = Page::FromAddress(object->address());
-      uint32_t marks = page->GetRegionMarks();
-      uint32_t newmarks = Page::kAllRegionsCleanMarks;
-
-      if (marks != Page::kAllRegionsCleanMarks) {
-        // For a large page a single dirty mark corresponds to several
-        // regions (modulo 32). So we treat a large page as a sequence of
-        // normal pages of size Page::kPageSize having same dirty marks
-        // and subsequently iterate dirty regions on each of these pages.
-        Address start = object->address();
-        Address end = page->ObjectAreaEnd();
-        Address object_end = start + object->Size();
-
-        // Iterate regions of the first normal page covering object.
-        uint32_t first_region_number = page->GetRegionNumberForAddress(start);
-        newmarks |=
-            heap()->IterateDirtyRegions(marks >> first_region_number,
-                                        start,
-                                        end,
-                                        &Heap::IteratePointersInDirtyRegion,
-                                        copy_object) << first_region_number;
-
-        start = end;
-        end = start + Page::kPageSize;
-        while (end <= object_end) {
-          // Iterate next 32 regions.
-          newmarks |=
-              heap()->IterateDirtyRegions(marks,
-                                          start,
-                                          end,
-                                          &Heap::IteratePointersInDirtyRegion,
-                                          copy_object);
-          start = end;
-          end = start + Page::kPageSize;
-        }
-
-        if (start != object_end) {
-          // Iterate the last piece of an object which is less than
-          // Page::kPageSize.
-          newmarks |=
-              heap()->IterateDirtyRegions(marks,
-                                          start,
-                                          object_end,
-                                          &Heap::IteratePointersInDirtyRegion,
-                                          copy_object);
-        }
-
-        page->SetRegionMarks(newmarks);
-      }
-    }
-  }
-}
-
-
 void LargeObjectSpace::FreeUnmarkedObjects() {
-  LargeObjectChunk* previous = NULL;
-  LargeObjectChunk* current = first_chunk_;
+  LargePage* previous = NULL;
+  LargePage* current = first_page_;
   while (current != NULL) {
     HeapObject* object = current->GetObject();
-    if (object->IsMarked()) {
-      object->ClearMark();
-      heap()->mark_compact_collector()->tracer()->decrement_marked_count();
+    // Can this large page contain pointers to non-trivial objects.  No other
+    // pointer object is this big.
+    bool is_pointer_object = object->IsFixedArray();
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) {
+      mark_bit.Clear();
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
       previous = current;
-      current = current->next();
+      current = current->next_page();
     } else {
+      LargePage* page = current;
       // Cut the chunk out from the chunk list.
-      LargeObjectChunk* current_chunk = current;
-      current = current->next();
+      current = current->next_page();
       if (previous == NULL) {
-        first_chunk_ = current;
+        first_page_ = current;
       } else {
-        previous->set_next(current);
+        previous->set_next_page(current);
       }
 
       // Free the chunk.
       heap()->mark_compact_collector()->ReportDeleteIfNeeded(
           object, heap()->isolate());
-      LiveObjectList::ProcessNonLive(object);
-
-      size_ -= static_cast<int>(current_chunk->size());
+      size_ -= static_cast<int>(page->size());
       objects_size_ -= object->Size();
       page_count_--;
-      current_chunk->Free(current_chunk->GetPage()->PageExecutability());
+
+      // Remove entries belonging to this page.
+      // Use variable alignment to help pass length check (<= 80 characters)
+      // of single line in tools/presubmit.py.
+      const intptr_t alignment = MemoryChunk::kAlignment;
+      uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
+      uintptr_t limit = base + (page->size()-1)/alignment;
+      for (uintptr_t key = base; key <= limit; key++) {
+        chunk_map_.Remove(reinterpret_cast<void*>(key),
+                          static_cast<uint32_t>(key));
+      }
+
+      if (is_pointer_object) {
+        heap()->QueueMemoryChunkForFree(page);
+      } else {
+        heap()->isolate()->memory_allocator()->Free(page);
+      }
     }
   }
+  heap()->FreeQueuedChunks();
 }
 
 
 bool LargeObjectSpace::Contains(HeapObject* object) {
   Address address = object->address();
-  if (heap()->new_space()->Contains(address)) {
-    return false;
-  }
-  Page* page = Page::FromAddress(address);
+  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
 
-  SLOW_ASSERT(!page->IsLargeObjectPage()
-              || !FindObject(address)->IsFailure());
+  bool owned = (chunk->owner() == this);
 
-  return page->IsLargeObjectPage();
+  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
+
+  return owned;
 }
 
 
@@ -2983,14 +2713,14 @@
 // We do not assume that the large object iterator works, because it depends
 // on the invariants we are checking during verification.
 void LargeObjectSpace::Verify() {
-  for (LargeObjectChunk* chunk = first_chunk_;
+  for (LargePage* chunk = first_page_;
        chunk != NULL;
-       chunk = chunk->next()) {
+       chunk = chunk->next_page()) {
     // Each chunk contains an object that starts at the large object page's
     // object area start.
     HeapObject* object = chunk->GetObject();
     Page* page = Page::FromAddress(object->address());
-    ASSERT(object->address() == page->ObjectAreaStart());
+    ASSERT(object->address() == page->area_start());
 
     // The first word should be a map, and we expect all map pointers to be
     // in map space.
@@ -3015,9 +2745,6 @@
                           object->Size(),
                           &code_visitor);
     } else if (object->IsFixedArray()) {
-      // We loop over fixed arrays ourselves, rather then using the visitor,
-      // because the visitor doesn't support the start/offset iteration
-      // needed for IsRegionDirty.
       FixedArray* array = FixedArray::cast(object);
       for (int j = 0; j < array->length(); j++) {
         Object* element = array->get(j);
@@ -3025,13 +2752,6 @@
           HeapObject* element_object = HeapObject::cast(element);
           ASSERT(heap()->Contains(element_object));
           ASSERT(element_object->map()->IsMap());
-          if (heap()->InNewSpace(element_object)) {
-            Address array_addr = object->address();
-            Address element_addr = array_addr + FixedArray::kHeaderSize +
-                j * kPointerSize;
-
-            ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
-          }
         }
       }
     }
@@ -3041,7 +2761,7 @@
 
 void LargeObjectSpace::Print() {
   LargeObjectIterator it(this);
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     obj->Print();
   }
 }
@@ -3052,7 +2772,7 @@
   int num_objects = 0;
   ClearHistograms();
   LargeObjectIterator it(this);
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     num_objects++;
     CollectHistogramInfo(obj);
   }
@@ -3066,13 +2786,38 @@
 void LargeObjectSpace::CollectCodeStatistics() {
   Isolate* isolate = heap()->isolate();
   LargeObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       isolate->code_kind_statistics()[code->kind()] += code->Size();
     }
   }
 }
+
+
+void Page::Print() {
+  // Make a best-effort to print the objects in the page.
+  PrintF("Page@%p in %s\n",
+         this->address(),
+         AllocationSpaceName(this->owner()->identity()));
+  printf(" --------------------------------------\n");
+  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
+  unsigned mark_size = 0;
+  for (HeapObject* object = objects.Next();
+       object != NULL;
+       object = objects.Next()) {
+    bool is_marked = Marking::MarkBitFrom(object).Get();
+    PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
+    if (is_marked) {
+      mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
+    }
+    object->ShortPrint();
+    PrintF("\n");
+  }
+  printf(" --------------------------------------\n");
+  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
+}
+
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/spaces.h b/src/spaces.h
index f156496..b614c3b 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -29,6 +29,7 @@
 #define V8_SPACES_H_
 
 #include "allocation.h"
+#include "hashmap.h"
 #include "list.h"
 #include "log.h"
 
@@ -49,45 +50,47 @@
 //
 // The semispaces of the young generation are contiguous.  The old and map
 // spaces consists of a list of pages. A page has a page header and an object
-// area. A page size is deliberately chosen as 8K bytes.
-// The first word of a page is an opaque page header that has the
-// address of the next page and its ownership information. The second word may
-// have the allocation top address of this page. Heap objects are aligned to the
-// pointer size.
+// area.
 //
 // There is a separate large object space for objects larger than
 // Page::kMaxHeapObjectSize, so that they do not have to move during
 // collection. The large object space is paged. Pages in large object space
-// may be larger than 8K.
+// may be larger than the page size.
 //
-// A card marking write barrier is used to keep track of intergenerational
-// references. Old space pages are divided into regions of Page::kRegionSize
-// size. Each region has a corresponding dirty bit in the page header which is
-// set if the region might contain pointers to new space. For details about
-// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
-// method body.
+// A store-buffer based write barrier is used to keep track of intergenerational
+// references.  See store-buffer.h.
 //
-// During scavenges and mark-sweep collections we iterate intergenerational
-// pointers without decoding heap object maps so if the page belongs to old
-// pointer space or large object space it is essential to guarantee that
-// the page does not contain any garbage pointers to new space: every pointer
-// aligned word which satisfies the Heap::InNewSpace() predicate must be a
-// pointer to a live heap object in new space. Thus objects in old pointer
-// and large object spaces should have a special layout (e.g. no bare integer
-// fields). This requirement does not apply to map space which is iterated in
-// a special fashion. However we still require pointer fields of dead maps to
-// be cleaned.
+// During scavenges and mark-sweep collections we sometimes (after a store
+// buffer overflow) iterate intergenerational pointers without decoding heap
+// object maps so if the page belongs to old pointer space or large object
+// space it is essential to guarantee that the page does not contain any
+// garbage pointers to new space: every pointer aligned word which satisfies
+// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
+// new space. Thus objects in old pointer and large object spaces should have a
+// special layout (e.g. no bare integer fields). This requirement does not
+// apply to map space which is iterated in a special fashion. However we still
+// require pointer fields of dead maps to be cleaned.
 //
-// To enable lazy cleaning of old space pages we use a notion of allocation
-// watermark. Every pointer under watermark is considered to be well formed.
-// Page allocation watermark is not necessarily equal to page allocation top but
-// all alive objects on page should reside under allocation watermark.
-// During scavenge allocation watermark might be bumped and invalid pointers
-// might appear below it. To avoid following them we store a valid watermark
-// into special field in the page header and set a page WATERMARK_INVALIDATED
-// flag. For details see comments in the Page::SetAllocationWatermark() method
-// body.
+// To enable lazy cleaning of old space pages we can mark chunks of the page
+// as being garbage.  Garbage sections are marked with a special map.  These
+// sections are skipped when scanning the page, even if we are otherwise
+// scanning without regard for object boundaries.  Garbage sections are chained
+// together to form a free list after a GC.  Garbage sections created outside
+// of GCs by object trunctation etc. may not be in the free list chain.  Very
+// small free spaces are ignored, they need only be cleaned of bogus pointers
+// into new space.
 //
+// Each page may have up to one special garbage section.  The start of this
+// section is denoted by the top field in the space.  The end of the section
+// is denoted by the limit field in the space.  This special garbage section
+// is not marked with a free space map in the data.  The point of this section
+// is to enable linear allocation without having to constantly update the byte
+// array every time the top field is updated and a new object is created.  The
+// special garbage section is not in the chain of garbage sections.
+//
+// Since the top and limit fields are in the space, not the page, only one page
+// has a special garbage section, and if the top and limit are equal then there
+// is no special garbage section.
 
 // Some assertion macros used in the debugging mode.
 
@@ -101,7 +104,7 @@
   ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
 
 #define ASSERT_OBJECT_SIZE(size)                                               \
-  ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
+  ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
 
 #define ASSERT_PAGE_OFFSET(offset)                                             \
   ASSERT((Page::kObjectStartOffset <= offset)                                  \
@@ -114,30 +117,540 @@
 class PagedSpace;
 class MemoryAllocator;
 class AllocationInfo;
+class Space;
+class FreeList;
+class MemoryChunk;
+
+class MarkBit {
+ public:
+  typedef uint32_t CellType;
+
+  inline MarkBit(CellType* cell, CellType mask, bool data_only)
+      : cell_(cell), mask_(mask), data_only_(data_only) { }
+
+  inline CellType* cell() { return cell_; }
+  inline CellType mask() { return mask_; }
+
+#ifdef DEBUG
+  bool operator==(const MarkBit& other) {
+    return cell_ == other.cell_ && mask_ == other.mask_;
+  }
+#endif
+
+  inline void Set() { *cell_ |= mask_; }
+  inline bool Get() { return (*cell_ & mask_) != 0; }
+  inline void Clear() { *cell_ &= ~mask_; }
+
+  inline bool data_only() { return data_only_; }
+
+  inline MarkBit Next() {
+    CellType new_mask = mask_ << 1;
+    if (new_mask == 0) {
+      return MarkBit(cell_ + 1, 1, data_only_);
+    } else {
+      return MarkBit(cell_, new_mask, data_only_);
+    }
+  }
+
+ private:
+  CellType* cell_;
+  CellType mask_;
+  // This boolean indicates that the object is in a data-only space with no
+  // pointers.  This enables some optimizations when marking.
+  // It is expected that this field is inlined and turned into control flow
+  // at the place where the MarkBit object is created.
+  bool data_only_;
+};
+
+
+// Bitmap is a sequence of cells each containing fixed number of bits.
+class Bitmap {
+ public:
+  static const uint32_t kBitsPerCell = 32;
+  static const uint32_t kBitsPerCellLog2 = 5;
+  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
+  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
+  static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
+
+  static const size_t kLength =
+    (1 << kPageSizeBits) >> (kPointerSizeLog2);
+
+  static const size_t kSize =
+    (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
+
+
+  static int CellsForLength(int length) {
+    return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
+  }
+
+  int CellsCount() {
+    return CellsForLength(kLength);
+  }
+
+  static int SizeFor(int cells_count) {
+    return sizeof(MarkBit::CellType) * cells_count;
+  }
+
+  INLINE(static uint32_t IndexToCell(uint32_t index)) {
+    return index >> kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellToIndex(uint32_t index)) {
+    return index << kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
+    return (index + kBitIndexMask) & ~kBitIndexMask;
+  }
+
+  INLINE(MarkBit::CellType* cells()) {
+    return reinterpret_cast<MarkBit::CellType*>(this);
+  }
+
+  INLINE(Address address()) {
+    return reinterpret_cast<Address>(this);
+  }
+
+  INLINE(static Bitmap* FromAddress(Address addr)) {
+    return reinterpret_cast<Bitmap*>(addr);
+  }
+
+  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
+    MarkBit::CellType mask = 1 << (index & kBitIndexMask);
+    MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
+    return MarkBit(cell, mask, data_only);
+  }
+
+  static inline void Clear(MemoryChunk* chunk);
+
+  static void PrintWord(uint32_t word, uint32_t himask = 0) {
+    for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+      if ((mask & himask) != 0) PrintF("[");
+      PrintF((mask & word) ? "1" : "0");
+      if ((mask & himask) != 0) PrintF("]");
+    }
+  }
+
+  class CellPrinter {
+   public:
+    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
+
+    void Print(uint32_t pos, uint32_t cell) {
+      if (cell == seq_type) {
+        seq_length++;
+        return;
+      }
+
+      Flush();
+
+      if (IsSeq(cell)) {
+        seq_start = pos;
+        seq_length = 0;
+        seq_type = cell;
+        return;
+      }
+
+      PrintF("%d: ", pos);
+      PrintWord(cell);
+      PrintF("\n");
+    }
+
+    void Flush() {
+      if (seq_length > 0) {
+        PrintF("%d: %dx%d\n",
+               seq_start,
+               seq_type == 0 ? 0 : 1,
+               seq_length * kBitsPerCell);
+        seq_length = 0;
+      }
+    }
+
+    static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+
+   private:
+    uint32_t seq_start;
+    uint32_t seq_type;
+    uint32_t seq_length;
+  };
+
+  void Print() {
+    CellPrinter printer;
+    for (int i = 0; i < CellsCount(); i++) {
+      printer.Print(i, cells()[i]);
+    }
+    printer.Flush();
+    PrintF("\n");
+  }
+
+  bool IsClean() {
+    for (int i = 0; i < CellsCount(); i++) {
+      if (cells()[i] != 0) return false;
+    }
+    return true;
+  }
+};
+
+
+class SkipList;
+class SlotsBuffer;
+
+// MemoryChunk represents a memory region owned by a specific space.
+// It is divided into the header and the body. Chunk start is always
+// 1MB aligned. Start of the body is aligned so it can accommodate
+// any heap object.
+class MemoryChunk {
+ public:
+  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
+  static MemoryChunk* FromAddress(Address a) {
+    return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
+  }
+
+  // Only works for addresses in pointer spaces, not data or code spaces.
+  static inline MemoryChunk* FromAnyPointerAddress(Address addr);
+
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  bool is_valid() { return address() != NULL; }
+
+  MemoryChunk* next_chunk() const { return next_chunk_; }
+  MemoryChunk* prev_chunk() const { return prev_chunk_; }
+
+  void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
+  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
+
+  Space* owner() const {
+    if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
+        kFailureTag) {
+      return reinterpret_cast<Space*>(owner_ - kFailureTag);
+    } else {
+      return NULL;
+    }
+  }
+
+  void set_owner(Space* space) {
+    ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
+    owner_ = reinterpret_cast<Address>(space) + kFailureTag;
+    ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
+           kFailureTag);
+  }
+
+  VirtualMemory* reserved_memory() {
+    return &reservation_;
+  }
+
+  void InitializeReservedMemory() {
+    reservation_.Reset();
+  }
+
+  void set_reserved_memory(VirtualMemory* reservation) {
+    ASSERT_NOT_NULL(reservation);
+    reservation_.TakeControl(reservation);
+  }
+
+  bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
+  void initialize_scan_on_scavenge(bool scan) {
+    if (scan) {
+      SetFlag(SCAN_ON_SCAVENGE);
+    } else {
+      ClearFlag(SCAN_ON_SCAVENGE);
+    }
+  }
+  inline void set_scan_on_scavenge(bool scan);
+
+  int store_buffer_counter() { return store_buffer_counter_; }
+  void set_store_buffer_counter(int counter) {
+    store_buffer_counter_ = counter;
+  }
+
+  bool Contains(Address addr) {
+    return addr >= area_start() && addr < area_end();
+  }
+
+  // Checks whether addr can be a limit of addresses in this page.
+  // It's a limit if it's in the page, or if it's just after the
+  // last byte of the page.
+  bool ContainsLimit(Address addr) {
+    return addr >= area_start() && addr <= area_end();
+  }
+
+  enum MemoryChunkFlags {
+    IS_EXECUTABLE,
+    ABOUT_TO_BE_FREED,
+    POINTERS_TO_HERE_ARE_INTERESTING,
+    POINTERS_FROM_HERE_ARE_INTERESTING,
+    SCAN_ON_SCAVENGE,
+    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
+    IN_TO_SPACE,    // All pages in new space has one of these two set.
+    NEW_SPACE_BELOW_AGE_MARK,
+    CONTAINS_ONLY_DATA,
+    EVACUATION_CANDIDATE,
+    RESCAN_ON_EVACUATION,
+
+    // Pages swept precisely can be iterated, hitting only the live objects.
+    // Whereas those swept conservatively cannot be iterated over. Both flags
+    // indicate that marking bits have been cleared by the sweeper, otherwise
+    // marking bits are still intact.
+    WAS_SWEPT_PRECISELY,
+    WAS_SWEPT_CONSERVATIVELY,
+
+    // Last flag, keep at bottom.
+    NUM_MEMORY_CHUNK_FLAGS
+  };
+
+
+  static const int kPointersToHereAreInterestingMask =
+      1 << POINTERS_TO_HERE_ARE_INTERESTING;
+
+  static const int kPointersFromHereAreInterestingMask =
+      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
+
+  static const int kEvacuationCandidateMask =
+      1 << EVACUATION_CANDIDATE;
+
+  static const int kSkipEvacuationSlotsRecordingMask =
+      (1 << EVACUATION_CANDIDATE) |
+      (1 << RESCAN_ON_EVACUATION) |
+      (1 << IN_FROM_SPACE) |
+      (1 << IN_TO_SPACE);
+
+
+  void SetFlag(int flag) {
+    flags_ |= static_cast<uintptr_t>(1) << flag;
+  }
+
+  void ClearFlag(int flag) {
+    flags_ &= ~(static_cast<uintptr_t>(1) << flag);
+  }
+
+  void SetFlagTo(int flag, bool value) {
+    if (value) {
+      SetFlag(flag);
+    } else {
+      ClearFlag(flag);
+    }
+  }
+
+  bool IsFlagSet(int flag) {
+    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
+  }
+
+  // Set or clear multiple flags at a time. The flags in the mask
+  // are set to the value in "flags", the rest retain the current value
+  // in flags_.
+  void SetFlags(intptr_t flags, intptr_t mask) {
+    flags_ = (flags_ & ~mask) | (flags & mask);
+  }
+
+  // Return all current flags.
+  intptr_t GetFlags() { return flags_; }
+
+  // Manage live byte count (count of bytes known to be live,
+  // because they are marked black).
+  void ResetLiveBytes() {
+    if (FLAG_gc_verbose) {
+      PrintF("ResetLiveBytes:%p:%x->0\n",
+             static_cast<void*>(this), live_byte_count_);
+    }
+    live_byte_count_ = 0;
+  }
+  void IncrementLiveBytes(int by) {
+    if (FLAG_gc_verbose) {
+      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
+             static_cast<void*>(this), live_byte_count_,
+             ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
+             live_byte_count_ + by);
+    }
+    live_byte_count_ += by;
+    ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
+  }
+  int LiveBytes() {
+    ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
+    return live_byte_count_;
+  }
+
+  static void IncrementLiveBytesFromGC(Address address, int by) {
+    MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
+  }
+
+  static void IncrementLiveBytesFromMutator(Address address, int by);
+
+  static const intptr_t kAlignment =
+      (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+  static const intptr_t kAlignmentMask = kAlignment - 1;
+
+  static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
+
+  static const intptr_t kLiveBytesOffset =
+     kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
+     kPointerSize + kPointerSize +
+     kPointerSize + kPointerSize + kPointerSize + kIntSize;
+
+  static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
+
+  static const size_t kHeaderSize =
+      kSlotsBufferOffset + kPointerSize + kPointerSize;
+
+  static const int kBodyOffset =
+    CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
+
+  // The start offset of the object area in a page. Aligned to both maps and
+  // code alignment to be suitable for both.  Also aligned to 32 words because
+  // the marking bitmap is arranged in 32 bit chunks.
+  static const int kObjectStartAlignment = 32 * kPointerSize;
+  static const int kObjectStartOffset = kBodyOffset - 1 +
+      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
+
+  size_t size() const { return size_; }
+
+  void set_size(size_t size) {
+    size_ = size;
+  }
+
+  void SetArea(Address area_start, Address area_end) {
+    area_start_ = area_start;
+    area_end_ = area_end;
+  }
+
+  Executability executable() {
+    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+  }
+
+  bool ContainsOnlyData() {
+    return IsFlagSet(CONTAINS_ONLY_DATA);
+  }
+
+  bool InNewSpace() {
+    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
+  }
+
+  bool InToSpace() {
+    return IsFlagSet(IN_TO_SPACE);
+  }
+
+  bool InFromSpace() {
+    return IsFlagSet(IN_FROM_SPACE);
+  }
+
+  // ---------------------------------------------------------------------
+  // Markbits support
+
+  inline Bitmap* markbits() {
+    return Bitmap::FromAddress(address() + kHeaderSize);
+  }
+
+  void PrintMarkbits() { markbits()->Print(); }
+
+  inline uint32_t AddressToMarkbitIndex(Address addr) {
+    return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
+  }
+
+  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
+    const intptr_t offset =
+        reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+
+    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
+  }
+
+  inline Address MarkbitIndexToAddress(uint32_t index) {
+    return this->address() + (index << kPointerSizeLog2);
+  }
+
+  void InsertAfter(MemoryChunk* other);
+  void Unlink();
+
+  inline Heap* heap() { return heap_; }
+
+  static const int kFlagsOffset = kPointerSize * 3;
+
+  bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
+
+  bool ShouldSkipEvacuationSlotRecording() {
+    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+  }
+
+  inline SkipList* skip_list() {
+    return skip_list_;
+  }
+
+  inline void set_skip_list(SkipList* skip_list) {
+    skip_list_ = skip_list;
+  }
+
+  inline SlotsBuffer* slots_buffer() {
+    return slots_buffer_;
+  }
+
+  inline SlotsBuffer** slots_buffer_address() {
+    return &slots_buffer_;
+  }
+
+  void MarkEvacuationCandidate() {
+    ASSERT(slots_buffer_ == NULL);
+    SetFlag(EVACUATION_CANDIDATE);
+  }
+
+  void ClearEvacuationCandidate() {
+    ASSERT(slots_buffer_ == NULL);
+    ClearFlag(EVACUATION_CANDIDATE);
+  }
+
+  Address area_start() { return area_start_; }
+  Address area_end() { return area_end_; }
+  int area_size() {
+    return static_cast<int>(area_end() - area_start());
+  }
+
+ protected:
+  MemoryChunk* next_chunk_;
+  MemoryChunk* prev_chunk_;
+  size_t size_;
+  intptr_t flags_;
+
+  // Start and end of allocatable memory on this chunk.
+  Address area_start_;
+  Address area_end_;
+
+  // If the chunk needs to remember its memory reservation, it is stored here.
+  VirtualMemory reservation_;
+  // The identity of the owning space.  This is tagged as a failure pointer, but
+  // no failure can be in an object, so this can be distinguished from any entry
+  // in a fixed array.
+  Address owner_;
+  Heap* heap_;
+  // Used by the store buffer to keep track of which pages to mark scan-on-
+  // scavenge.
+  int store_buffer_counter_;
+  // Count of bytes marked black on page.
+  int live_byte_count_;
+  SlotsBuffer* slots_buffer_;
+  SkipList* skip_list_;
+
+  static MemoryChunk* Initialize(Heap* heap,
+                                 Address base,
+                                 size_t size,
+                                 Address area_start,
+                                 Address area_end,
+                                 Executability executable,
+                                 Space* owner);
+
+  friend class MemoryAllocator;
+};
+
+STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
 
 // -----------------------------------------------------------------------------
-// A page normally has 8K bytes. Large object pages may be larger.  A page
-// address is always aligned to the 8K page size.
-//
-// Each page starts with a header of Page::kPageHeaderSize size which contains
-// bookkeeping data.
-//
-// The mark-compact collector transforms a map pointer into a page index and a
-// page offset. The exact encoding is described in the comments for
-// class MapWord in objects.h.
+// A page is a memory chunk of a size 1MB. Large object pages may be larger.
 //
 // The only way to get a page pointer is by calling factory methods:
 //   Page* p = Page::FromAddress(addr); or
 //   Page* p = Page::FromAllocationTop(top);
-class Page {
+class Page : public MemoryChunk {
  public:
   // Returns the page containing a given address. The address ranges
   // from [page_addr .. page_addr + kPageSize[
-  //
-  // Note that this function only works for addresses in normal paged
-  // spaces and addresses in the first 8K of large object pages (i.e.,
-  // the start of large objects but not necessarily derived pointers
-  // within them).
+  // This only works if the object is in fact in a page.  See also MemoryChunk::
+  // FromAddress() and FromAnyAddress().
   INLINE(static Page* FromAddress(Address a)) {
     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
   }
@@ -148,66 +661,23 @@
   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
   INLINE(static Page* FromAllocationTop(Address top)) {
     Page* p = FromAddress(top - kPointerSize);
-    ASSERT_PAGE_OFFSET(p->Offset(top));
     return p;
   }
 
-  // Returns the start address of this page.
-  Address address() { return reinterpret_cast<Address>(this); }
-
-  // Checks whether this is a valid page address.
-  bool is_valid() { return address() != NULL; }
-
-  // Returns the next page of this page.
+  // Returns the next page in the chain of pages owned by a space.
   inline Page* next_page();
-
-  // Return the end of allocation in this page. Undefined for unused pages.
-  inline Address AllocationTop();
-
-  // Return the allocation watermark for the page.
-  // For old space pages it is guaranteed that the area under the watermark
-  // does not contain any garbage pointers to new space.
-  inline Address AllocationWatermark();
-
-  // Return the allocation watermark offset from the beginning of the page.
-  inline uint32_t AllocationWatermarkOffset();
-
-  inline void SetAllocationWatermark(Address allocation_watermark);
-
-  inline void SetCachedAllocationWatermark(Address allocation_watermark);
-  inline Address CachedAllocationWatermark();
-
-  // Returns the start address of the object area in this page.
-  Address ObjectAreaStart() { return address() + kObjectStartOffset; }
-
-  // Returns the end address (exclusive) of the object area in this page.
-  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
+  inline Page* prev_page();
+  inline void set_next_page(Page* page);
+  inline void set_prev_page(Page* page);
 
   // Checks whether an address is page aligned.
   static bool IsAlignedToPageSize(Address a) {
     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
   }
 
-  // True if this page was in use before current compaction started.
-  // Result is valid only for pages owned by paged spaces and
-  // only after PagedSpace::PrepareForMarkCompact was called.
-  inline bool WasInUseBeforeMC();
-
-  inline void SetWasInUseBeforeMC(bool was_in_use);
-
-  // True if this page is a large object page.
-  inline bool IsLargeObjectPage();
-
-  inline void SetIsLargeObjectPage(bool is_large_object_page);
-
-  inline Executability PageExecutability();
-
-  inline void SetPageExecutability(Executability executable);
-
   // Returns the offset of a given address to this page.
   INLINE(int Offset(Address a)) {
     int offset = static_cast<int>(a - address());
-    ASSERT_PAGE_OFFSET(offset);
     return offset;
   }
 
@@ -218,144 +688,70 @@
   }
 
   // ---------------------------------------------------------------------
-  // Card marking support
-
-  static const uint32_t kAllRegionsCleanMarks = 0x0;
-  static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
-
-  inline uint32_t GetRegionMarks();
-  inline void SetRegionMarks(uint32_t dirty);
-
-  inline uint32_t GetRegionMaskForAddress(Address addr);
-  inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
-  inline int GetRegionNumberForAddress(Address addr);
-
-  inline void MarkRegionDirty(Address addr);
-  inline bool IsRegionDirty(Address addr);
-
-  inline void ClearRegionMarks(Address start,
-                               Address end,
-                               bool reaches_limit);
 
   // Page size in bytes.  This must be a multiple of the OS page size.
   static const int kPageSize = 1 << kPageSizeBits;
 
+  // Object area size in bytes.
+  static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
+
+  // Maximum object size that fits in a page.
+  static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
+
   // Page size mask.
   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
 
-  static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
-    kIntSize + kPointerSize + kPointerSize;
-
-  // The start offset of the object area in a page. Aligned to both maps and
-  // code alignment to be suitable for both.
-  static const int kObjectStartOffset =
-      CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
-
-  // Object area size in bytes.
-  static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
-
-  // Maximum object size that fits in a page.
-  static const int kMaxHeapObjectSize = kObjectAreaSize;
-
-  static const int kDirtyFlagOffset = 2 * kPointerSize;
-  static const int kRegionSizeLog2 = 8;
-  static const int kRegionSize = 1 << kRegionSizeLog2;
-  static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
-
-  STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
-
-  enum PageFlag {
-    IS_NORMAL_PAGE = 0,
-    WAS_IN_USE_BEFORE_MC,
-
-    // Page allocation watermark was bumped by preallocation during scavenge.
-    // Correct watermark can be retrieved by CachedAllocationWatermark() method
-    WATERMARK_INVALIDATED,
-    IS_EXECUTABLE,
-    NUM_PAGE_FLAGS  // Must be last
-  };
-  static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
-
-  // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
-  // scavenge we just invalidate the watermark on each old space page after
-  // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
-  // flag at the beginning of the next scavenge and each page becomes marked as
-  // having a valid watermark.
-  //
-  // The following invariant must hold for pages in old pointer and map spaces:
-  //     If page is in use then page is marked as having invalid watermark at
-  //     the beginning and at the end of any GC.
-  //
-  // This invariant guarantees that after flipping flag meaning at the
-  // beginning of scavenge all pages in use will be marked as having valid
-  // watermark.
-  static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
-
-  // Returns true if the page allocation watermark was not altered during
-  // scavenge.
-  inline bool IsWatermarkValid();
-
-  inline void InvalidateWatermark(bool value);
-
-  inline bool GetPageFlag(PageFlag flag);
-  inline void SetPageFlag(PageFlag flag, bool value);
-  inline void ClearPageFlags();
-
   inline void ClearGCFields();
 
-  static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
-  static const int kAllocationWatermarkOffsetBits  = kPageSizeBits + 1;
-  static const uint32_t kAllocationWatermarkOffsetMask =
-      ((1 << kAllocationWatermarkOffsetBits) - 1) <<
-      kAllocationWatermarkOffsetShift;
+  static inline Page* Initialize(Heap* heap,
+                                 MemoryChunk* chunk,
+                                 Executability executable,
+                                 PagedSpace* owner);
 
-  static const uint32_t kFlagsMask =
-    ((1 << kAllocationWatermarkOffsetShift) - 1);
+  void InitializeAsAnchor(PagedSpace* owner);
 
-  STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
-               kAllocationWatermarkOffsetBits);
+  bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
+  bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
+  bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
 
-  //---------------------------------------------------------------------------
-  // Page header description.
-  //
-  // If a page is not in the large object space, the first word,
-  // opaque_header, encodes the next page address (aligned to kPageSize 8K)
-  // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
-  // opaque_header. The value range of the opaque_header is [0..kPageSize[,
-  // or [next_page_start, next_page_end[. It cannot point to a valid address
-  // in the current page.  If a page is in the large object space, the first
-  // word *may* (if the page start and large object chunk start are the
-  // same) contain the address of the next large object chunk.
-  intptr_t opaque_header;
+  void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
+  void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
 
-  // If the page is not in the large object space, the low-order bit of the
-  // second word is set. If the page is in the large object space, the
-  // second word *may* (if the page start and large object chunk start are
-  // the same) contain the large object chunk size.  In either case, the
-  // low-order bit for large object pages will be cleared.
-  // For normal pages this word is used to store page flags and
-  // offset of allocation top.
-  intptr_t flags_;
+  void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
+  void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
 
-  // This field contains dirty marks for regions covering the page. Only dirty
-  // regions might contain intergenerational references.
-  // Only 32 dirty marks are supported so for large object pages several regions
-  // might be mapped to a single dirty mark.
-  uint32_t dirty_regions_;
+#ifdef DEBUG
+  void Print();
+#endif  // DEBUG
 
-  // The index of the page in its owner space.
-  int mc_page_index;
-
-  // During mark-compact collections this field contains the forwarding address
-  // of the first live object in this page.
-  // During scavenge collection this field is used to store allocation watermark
-  // if it is altered during scavenge.
-  Address mc_first_forwarded;
-
-  Heap* heap_;
+  friend class MemoryAllocator;
 };
 
 
+STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
+
+
+class LargePage : public MemoryChunk {
+ public:
+  HeapObject* GetObject() {
+    return HeapObject::FromAddress(area_start());
+  }
+
+  inline LargePage* next_page() const {
+    return static_cast<LargePage*>(next_chunk());
+  }
+
+  inline void set_next_page(LargePage* page) {
+    set_next_chunk(page);
+  }
+ private:
+  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
+
+  friend class MemoryAllocator;
+};
+
+STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+
 // ----------------------------------------------------------------------------
 // Space is the abstract superclass for all allocation spaces.
 class Space : public Malloced {
@@ -380,6 +776,14 @@
   // (e.g. see LargeObjectSpace).
   virtual intptr_t SizeOfObjects() { return Size(); }
 
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (id_ == CODE_SPACE) {
+      return RoundDown(size, kCodeAlignment);
+    } else {
+      return RoundDown(size, kPointerSize);
+    }
+  }
+
 #ifdef DEBUG
   virtual void Print() = 0;
 #endif
@@ -414,7 +818,7 @@
   // Reserves a range of virtual memory, but does not commit any of it.
   // Can only be called once, at heap initialization time.
   // Returns false on failure.
-  bool Setup(const size_t requested_size);
+  bool SetUp(const size_t requested_size);
 
   // Frees the range of virtual memory, and frees the data structures used to
   // manage it.
@@ -430,9 +834,9 @@
   // Allocates a chunk of memory from the large-object portion of
   // the code range.  On platforms with no separate code range, should
   // not be called.
-  MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
-                                          size_t* allocated);
-  void FreeRawMemory(void* buf, size_t length);
+  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
+                                            size_t* allocated);
+  void FreeRawMemory(Address buf, size_t length);
 
  private:
   Isolate* isolate_;
@@ -443,9 +847,15 @@
   class FreeBlock {
    public:
     FreeBlock(Address start_arg, size_t size_arg)
-        : start(start_arg), size(size_arg) {}
+        : start(start_arg), size(size_arg) {
+      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
+      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
+    }
     FreeBlock(void* start_arg, size_t size_arg)
-        : start(static_cast<Address>(start_arg)), size(size_arg) {}
+        : start(static_cast<Address>(start_arg)), size(size_arg) {
+      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
+      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
+    }
 
     Address start;
     size_t size;
@@ -473,123 +883,80 @@
 };
 
 
+class SkipList {
+ public:
+  SkipList() {
+    Clear();
+  }
+
+  void Clear() {
+    for (int idx = 0; idx < kSize; idx++) {
+      starts_[idx] = reinterpret_cast<Address>(-1);
+    }
+  }
+
+  Address StartFor(Address addr) {
+    return starts_[RegionNumber(addr)];
+  }
+
+  void AddObject(Address addr, int size) {
+    int start_region = RegionNumber(addr);
+    int end_region = RegionNumber(addr + size - kPointerSize);
+    for (int idx = start_region; idx <= end_region; idx++) {
+      if (starts_[idx] > addr) starts_[idx] = addr;
+    }
+  }
+
+  static inline int RegionNumber(Address addr) {
+    return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
+  }
+
+  static void Update(Address addr, int size) {
+    Page* page = Page::FromAddress(addr);
+    SkipList* list = page->skip_list();
+    if (list == NULL) {
+      list = new SkipList();
+      page->set_skip_list(list);
+    }
+
+    list->AddObject(addr, size);
+  }
+
+ private:
+  static const int kRegionSizeLog2 = 13;
+  static const int kRegionSize = 1 << kRegionSizeLog2;
+  static const int kSize = Page::kPageSize / kRegionSize;
+
+  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
+
+  Address starts_[kSize];
+};
+
+
 // ----------------------------------------------------------------------------
 // A space acquires chunks of memory from the operating system. The memory
-// allocator manages chunks for the paged heap spaces (old space and map
-// space).  A paged chunk consists of pages. Pages in a chunk have contiguous
-// addresses and are linked as a list.
+// allocator allocated and deallocates pages for the paged heap spaces and large
+// pages for large object space.
 //
-// The allocator keeps an initial chunk which is used for the new space.  The
-// leftover regions of the initial chunk are used for the initial chunks of
-// old space and map space if they are big enough to hold at least one page.
-// The allocator assumes that there is one old space and one map space, each
-// expands the space by allocating kPagesPerChunk pages except the last
-// expansion (before running out of space).  The first chunk may contain fewer
-// than kPagesPerChunk pages as well.
+// Each space has to manage it's own pages.
 //
-// The memory allocator also allocates chunks for the large object space, but
-// they are managed by the space itself.  The new space does not expand.
-//
-// The fact that pages for paged spaces are allocated and deallocated in chunks
-// induces a constraint on the order of pages in a linked lists. We say that
-// pages are linked in the chunk-order if and only if every two consecutive
-// pages from the same chunk are consecutive in the linked list.
-//
-
-
 class MemoryAllocator {
  public:
   explicit MemoryAllocator(Isolate* isolate);
 
   // Initializes its internal bookkeeping structures.
   // Max capacity of the total space and executable memory limit.
-  bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
+  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
 
-  // Deletes valid chunks.
   void TearDown();
 
-  // Reserves an initial address range of virtual memory to be split between
-  // the two new space semispaces, the old space, and the map space.  The
-  // memory is not yet committed or assigned to spaces and split into pages.
-  // The initial chunk is unmapped when the memory allocator is torn down.
-  // This function should only be called when there is not already a reserved
-  // initial chunk (initial_chunk_ should be NULL).  It returns the start
-  // address of the initial chunk if successful, with the side effect of
-  // setting the initial chunk, or else NULL if unsuccessful and leaves the
-  // initial chunk NULL.
-  void* ReserveInitialChunk(const size_t requested);
+  Page* AllocatePage(PagedSpace* owner, Executability executable);
 
-  // Commits pages from an as-yet-unmanaged block of virtual memory into a
-  // paged space.  The block should be part of the initial chunk reserved via
-  // a call to ReserveInitialChunk.  The number of pages is always returned in
-  // the output parameter num_pages.  This function assumes that the start
-  // address is non-null and that it is big enough to hold at least one
-  // page-aligned page.  The call always succeeds, and num_pages is always
-  // greater than zero.
-  Page* CommitPages(Address start, size_t size, PagedSpace* owner,
-                    int* num_pages);
+  LargePage* AllocateLargePage(intptr_t object_size,
+                                      Executability executable,
+                                      Space* owner);
 
-  // Commit a contiguous block of memory from the initial chunk.  Assumes that
-  // the address is not NULL, the size is greater than zero, and that the
-  // block is contained in the initial chunk.  Returns true if it succeeded
-  // and false otherwise.
-  bool CommitBlock(Address start, size_t size, Executability executable);
-
-  // Uncommit a contiguous block of memory [start..(start+size)[.
-  // start is not NULL, the size is greater than zero, and the
-  // block is contained in the initial chunk.  Returns true if it succeeded
-  // and false otherwise.
-  bool UncommitBlock(Address start, size_t size);
-
-  // Zaps a contiguous block of memory [start..(start+size)[ thus
-  // filling it up with a recognizable non-NULL bit pattern.
-  void ZapBlock(Address start, size_t size);
-
-  // Attempts to allocate the requested (non-zero) number of pages from the
-  // OS.  Fewer pages might be allocated than requested. If it fails to
-  // allocate memory for the OS or cannot allocate a single page, this
-  // function returns an invalid page pointer (NULL). The caller must check
-  // whether the returned page is valid (by calling Page::is_valid()).  It is
-  // guaranteed that allocated pages have contiguous addresses.  The actual
-  // number of allocated pages is returned in the output parameter
-  // allocated_pages.  If the PagedSpace owner is executable and there is
-  // a code range, the pages are allocated from the code range.
-  Page* AllocatePages(int requested_pages, int* allocated_pages,
-                      PagedSpace* owner);
-
-  // Frees pages from a given page and after. Requires pages to be
-  // linked in chunk-order (see comment for class).
-  // If 'p' is the first page of a chunk, pages from 'p' are freed
-  // and this function returns an invalid page pointer.
-  // Otherwise, the function searches a page after 'p' that is
-  // the first page of a chunk. Pages after the found page
-  // are freed and the function returns 'p'.
-  Page* FreePages(Page* p);
-
-  // Frees all pages owned by given space.
-  void FreeAllPages(PagedSpace* space);
-
-  // Allocates and frees raw memory of certain size.
-  // These are just thin wrappers around OS::Allocate and OS::Free,
-  // but keep track of allocated bytes as part of heap.
-  // If the flag is EXECUTABLE and a code range exists, the requested
-  // memory is allocated from the code range.  If a code range exists
-  // and the freed memory is in it, the code range manages the freed memory.
-  MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
-                                          size_t* allocated,
-                                          Executability executable);
-  void FreeRawMemory(void* buf,
-                     size_t length,
-                     Executability executable);
-  void PerformAllocationCallback(ObjectSpace space,
-                                 AllocationAction action,
-                                 size_t size);
-
-  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
-                                   ObjectSpace space,
-                                   AllocationAction action);
-  void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
-  bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
+  void Free(MemoryChunk* chunk);
 
   // Returns the maximum available bytes of heaps.
   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
@@ -608,70 +975,85 @@
 
   // Returns maximum available bytes that the old space can have.
   intptr_t MaxAvailable() {
-    return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
+    return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
   }
 
-  // Links two pages.
-  inline void SetNextPage(Page* prev, Page* next);
-
-  // Returns the next page of a given page.
-  inline Page* GetNextPage(Page* p);
-
-  // Checks whether a page belongs to a space.
-  inline bool IsPageInSpace(Page* p, PagedSpace* space);
-
-  // Returns the space that owns the given page.
-  inline PagedSpace* PageOwner(Page* page);
-
-  // Finds the first/last page in the same chunk as a given page.
-  Page* FindFirstPageInSameChunk(Page* p);
-  Page* FindLastPageInSameChunk(Page* p);
-
-  // Relinks list of pages owned by space to make it chunk-ordered.
-  // Returns new first and last pages of space.
-  // Also returns last page in relinked list which has WasInUsedBeforeMC
-  // flag set.
-  void RelinkPageListInChunkOrder(PagedSpace* space,
-                                  Page** first_page,
-                                  Page** last_page,
-                                  Page** last_page_in_use);
-
 #ifdef DEBUG
   // Reports statistic info of the space.
   void ReportStatistics();
 #endif
 
-  // Due to encoding limitation, we can only have 8K chunks.
-  static const int kMaxNofChunks = 1 << kPageSizeBits;
-  // If a chunk has at least 16 pages, the maximum heap size is about
-  // 8K * 8K * 16 = 1G bytes.
-#ifdef V8_TARGET_ARCH_X64
-  static const int kPagesPerChunk = 32;
-  // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
-  static const int kChunkTableLevels = 4;
-  static const int kChunkTableBitsPerLevel = 12;
-#else
-  static const int kPagesPerChunk = 16;
-  // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
-  static const int kChunkTableLevels = 2;
-  static const int kChunkTableBitsPerLevel = 8;
-#endif
+  MemoryChunk* AllocateChunk(intptr_t body_size,
+                             Executability executable,
+                             Space* space);
+
+  Address ReserveAlignedMemory(size_t requested,
+                               size_t alignment,
+                               VirtualMemory* controller);
+  Address AllocateAlignedMemory(size_t requested,
+                                size_t alignment,
+                                Executability executable,
+                                VirtualMemory* controller);
+
+  void FreeMemory(VirtualMemory* reservation, Executability executable);
+  void FreeMemory(Address addr, size_t size, Executability executable);
+
+  // Commit a contiguous block of memory from the initial chunk.  Assumes that
+  // the address is not NULL, the size is greater than zero, and that the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool CommitBlock(Address start, size_t size, Executability executable);
+
+  // Uncommit a contiguous block of memory [start..(start+size)[.
+  // start is not NULL, the size is greater than zero, and the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool UncommitBlock(Address start, size_t size);
+
+  // Zaps a contiguous block of memory [start..(start+size)[ thus
+  // filling it up with a recognizable non-NULL bit pattern.
+  void ZapBlock(Address start, size_t size);
+
+  void PerformAllocationCallback(ObjectSpace space,
+                                 AllocationAction action,
+                                 size_t size);
+
+  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+                                          ObjectSpace space,
+                                          AllocationAction action);
+
+  void RemoveMemoryAllocationCallback(
+      MemoryAllocationCallback callback);
+
+  bool MemoryAllocationCallbackRegistered(
+      MemoryAllocationCallback callback);
+
+  static int CodePageGuardStartOffset();
+
+  static int CodePageGuardSize();
+
+  static int CodePageAreaStartOffset();
+
+  static int CodePageAreaEndOffset();
+
+  static int CodePageAreaSize() {
+    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
+  }
+
+  static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
 
  private:
-  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
-
   Isolate* isolate_;
 
   // Maximum space size in bytes.
-  intptr_t capacity_;
+  size_t capacity_;
   // Maximum subset of capacity_ that can be executable
-  intptr_t capacity_executable_;
+  size_t capacity_executable_;
 
   // Allocated space size in bytes.
-  intptr_t size_;
-
+  size_t size_;
   // Allocated executable space size in bytes.
-  intptr_t size_executable_;
+  size_t size_executable_;
 
   struct MemoryAllocationCallbackRegistration {
     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -683,64 +1065,11 @@
     ObjectSpace space;
     AllocationAction action;
   };
+
   // A List of callback that are triggered when memory is allocated or free'd
   List<MemoryAllocationCallbackRegistration>
       memory_allocation_callbacks_;
 
-  // The initial chunk of virtual memory.
-  VirtualMemory* initial_chunk_;
-
-  // Allocated chunk info: chunk start address, chunk size, and owning space.
-  class ChunkInfo BASE_EMBEDDED {
-   public:
-    ChunkInfo() : address_(NULL),
-                  size_(0),
-                  owner_(NULL),
-                  executable_(NOT_EXECUTABLE),
-                  owner_identity_(FIRST_SPACE) {}
-    inline void init(Address a, size_t s, PagedSpace* o);
-    Address address() { return address_; }
-    size_t size() { return size_; }
-    PagedSpace* owner() { return owner_; }
-    // We save executability of the owner to allow using it
-    // when collecting stats after the owner has been destroyed.
-    Executability executable() const { return executable_; }
-    AllocationSpace owner_identity() const { return owner_identity_; }
-
-   private:
-    Address address_;
-    size_t size_;
-    PagedSpace* owner_;
-    Executability executable_;
-    AllocationSpace owner_identity_;
-  };
-
-  // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
-  List<ChunkInfo> chunks_;
-  List<int> free_chunk_ids_;
-  int max_nof_chunks_;
-  int top_;
-
-  // Push/pop a free chunk id onto/from the stack.
-  void Push(int free_chunk_id);
-  int Pop();
-  bool OutOfChunkIds() { return top_ == 0; }
-
-  // Frees a chunk.
-  void DeleteChunk(int chunk_id);
-
-  // Basic check whether a chunk id is in the valid range.
-  inline bool IsValidChunkId(int chunk_id);
-
-  // Checks whether a chunk id identifies an allocated chunk.
-  inline bool IsValidChunk(int chunk_id);
-
-  // Returns the chunk id that a page belongs to.
-  inline int GetChunkId(Page* p);
-
-  // True if the address lies in the initial chunk.
-  inline bool InInitialChunk(Address address);
-
   // Initializes pages in a chunk. Returns the first page address.
   // This function and GetChunkId() are provided for the mark-compact
   // collector to rebuild page headers in the from space, which is
@@ -748,13 +1077,7 @@
   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
                                PagedSpace* owner);
 
-  Page* RelinkPagesInChunk(int chunk_id,
-                           Address chunk_start,
-                           size_t chunk_size,
-                           Page* prev,
-                           Page** last_page_in_use);
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
 };
 
 
@@ -777,111 +1100,67 @@
 // -----------------------------------------------------------------------------
 // Heap object iterator in new/old/map spaces.
 //
-// A HeapObjectIterator iterates objects from a given address to the
-// top of a space. The given address must be below the current
-// allocation pointer (space top). There are some caveats.
+// A HeapObjectIterator iterates objects from the bottom of the given space
+// to its top or from the bottom of the given page to its top.
 //
-// (1) If the space top changes upward during iteration (because of
-//     allocating new objects), the iterator does not iterate objects
-//     above the original space top. The caller must create a new
-//     iterator starting from the old top in order to visit these new
-//     objects.
-//
-// (2) If new objects are allocated below the original allocation top
-//     (e.g., free-list allocation in paged spaces), the new objects
-//     may or may not be iterated depending on their position with
-//     respect to the current point of iteration.
-//
-// (3) The space top should not change downward during iteration,
-//     otherwise the iterator will return not-necessarily-valid
-//     objects.
-
+// If objects are allocated in the page during iteration the iterator may
+// or may not iterate over those objects.  The caller must create a new
+// iterator in order to be sure to visit these new objects.
 class HeapObjectIterator: public ObjectIterator {
  public:
-  // Creates a new object iterator in a given space. If a start
-  // address is not given, the iterator starts from the space bottom.
+  // Creates a new object iterator in a given space.
   // If the size function is not given, the iterator calls the default
   // Object::Size().
   explicit HeapObjectIterator(PagedSpace* space);
   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
-  HeapObjectIterator(PagedSpace* space, Address start);
-  HeapObjectIterator(PagedSpace* space,
-                     Address start,
-                     HeapObjectCallback size_func);
   HeapObjectIterator(Page* page, HeapObjectCallback size_func);
 
-  inline HeapObject* next() {
-    return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
+  // Advance to the next object, skipping free spaces and other fillers and
+  // skipping the special garbage section of which there is one per space.
+  // Returns NULL when the iteration has ended.
+  inline HeapObject* Next() {
+    do {
+      HeapObject* next_obj = FromCurrentPage();
+      if (next_obj != NULL) return next_obj;
+    } while (AdvanceToNextPage());
+    return NULL;
   }
 
-  // implementation of ObjectIterator.
-  virtual HeapObject* next_object() { return next(); }
+  virtual HeapObject* next_object() {
+    return Next();
+  }
 
  private:
-  Address cur_addr_;  // current iteration point
-  Address end_addr_;  // end iteration point
-  Address cur_limit_;  // current page limit
-  HeapObjectCallback size_func_;  // size function
-  Page* end_page_;  // caches the page of the end address
+  enum PageMode { kOnePageOnly, kAllPagesInSpace };
 
-  HeapObject* FromCurrentPage() {
-    ASSERT(cur_addr_ < cur_limit_);
+  Address cur_addr_;  // Current iteration point.
+  Address cur_end_;   // End iteration point.
+  HeapObjectCallback size_func_;  // Size function or NULL.
+  PagedSpace* space_;
+  PageMode page_mode_;
 
-    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
-    ASSERT_OBJECT_SIZE(obj_size);
+  // Fast (inlined) path of next().
+  inline HeapObject* FromCurrentPage();
 
-    cur_addr_ += obj_size;
-    ASSERT(cur_addr_ <= cur_limit_);
-
-    return obj;
-  }
-
-  // Slow path of next, goes into the next page.
-  HeapObject* FromNextPage();
+  // Slow path of next(), goes into the next page.  Returns false if the
+  // iteration has ended.
+  bool AdvanceToNextPage();
 
   // Initializes fields.
-  void Initialize(Address start, Address end, HeapObjectCallback size_func);
-
-#ifdef DEBUG
-  // Verifies whether fields have valid values.
-  void Verify();
-#endif
+  inline void Initialize(PagedSpace* owner,
+                         Address start,
+                         Address end,
+                         PageMode mode,
+                         HeapObjectCallback size_func);
 };
 
 
 // -----------------------------------------------------------------------------
 // A PageIterator iterates the pages in a paged space.
-//
-// The PageIterator class provides three modes for iterating pages in a space:
-//   PAGES_IN_USE iterates pages containing allocated objects.
-//   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
-//                    mark-compact collection.
-//   ALL_PAGES iterates all pages in the space.
-//
-// There are some caveats.
-//
-// (1) If the space expands during iteration, new pages will not be
-//     returned by the iterator in any mode.
-//
-// (2) If new objects are allocated during iteration, they will appear
-//     in pages returned by the iterator.  Allocation may cause the
-//     allocation pointer or MC allocation pointer in the last page to
-//     change between constructing the iterator and iterating the last
-//     page.
-//
-// (3) The space should not shrink during iteration, otherwise the
-//     iterator will return deallocated pages.
 
 class PageIterator BASE_EMBEDDED {
  public:
-  enum Mode {
-    PAGES_IN_USE,
-    PAGES_USED_BY_MC,
-    ALL_PAGES
-  };
-
-  PageIterator(PagedSpace* space, Mode mode);
+  explicit inline PageIterator(PagedSpace* space);
 
   inline bool has_next();
   inline Page* next();
@@ -889,21 +1168,25 @@
  private:
   PagedSpace* space_;
   Page* prev_page_;  // Previous page returned.
-  Page* stop_page_;  // Page to stop at (last page returned by the iterator).
+  // Next page that will be returned.  Cached here so that we can use this
+  // iterator for operations that deallocate pages.
+  Page* next_page_;
 };
 
 
 // -----------------------------------------------------------------------------
-// A space has a list of pages. The next page can be accessed via
-// Page::next_page() call. The next page of the last page is an
-// invalid page pointer. A space can expand and shrink dynamically.
+// A space has a circular list of pages. The next page can be accessed via
+// Page::next_page() call.
 
 // An abstraction of allocation and relocation pointers in a page-structured
 // space.
 class AllocationInfo {
  public:
-  Address top;  // current allocation top
-  Address limit;  // current allocation limit
+  AllocationInfo() : top(NULL), limit(NULL) {
+  }
+
+  Address top;  // Current allocation top.
+  Address limit;  // Current allocation limit.
 
 #ifdef DEBUG
   bool VerifyPagedAllocation() {
@@ -915,11 +1198,11 @@
 
 
 // An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (ie, not
+// The 'capacity' of a space is the number of object-area bytes (i.e., not
 // including page bookkeeping structures) currently in the space. The 'size'
 // of a space is the number of allocated bytes, the 'waste' in the space is
 // the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (eg, small blocks due
+// allocation without reorganizing the space via a GC (e.g. small blocks due
 // to internal fragmentation, top of page areas in map space), and the bytes
 // 'available' is the number of unallocated bytes that are not waste.  The
 // capacity is the sum of size, waste, and available.
@@ -932,73 +1215,213 @@
  public:
   AllocationStats() { Clear(); }
 
-  // Zero out all the allocation statistics (ie, no capacity).
+  // Zero out all the allocation statistics (i.e., no capacity).
   void Clear() {
     capacity_ = 0;
-    available_ = 0;
     size_ = 0;
     waste_ = 0;
   }
 
-  // Reset the allocation statistics (ie, available = capacity with no
+  void ClearSizeWaste() {
+    size_ = capacity_;
+    waste_ = 0;
+  }
+
+  // Reset the allocation statistics (i.e., available = capacity with no
   // wasted or allocated bytes).
   void Reset() {
-    available_ = capacity_;
     size_ = 0;
     waste_ = 0;
   }
 
   // Accessors for the allocation statistics.
   intptr_t Capacity() { return capacity_; }
-  intptr_t Available() { return available_; }
   intptr_t Size() { return size_; }
   intptr_t Waste() { return waste_; }
 
-  // Grow the space by adding available bytes.
+  // Grow the space by adding available bytes.  They are initially marked as
+  // being in use (part of the size), but will normally be immediately freed,
+  // putting them on the free list and removing them from size_.
   void ExpandSpace(int size_in_bytes) {
     capacity_ += size_in_bytes;
-    available_ += size_in_bytes;
+    size_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
-  // Shrink the space by removing available bytes.
+  // Shrink the space by removing available bytes.  Since shrinking is done
+  // during sweeping, bytes have been marked as being in use (part of the size)
+  // and are hereby freed.
   void ShrinkSpace(int size_in_bytes) {
     capacity_ -= size_in_bytes;
-    available_ -= size_in_bytes;
+    size_ -= size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
   // Allocate from available bytes (available -> size).
   void AllocateBytes(intptr_t size_in_bytes) {
-    available_ -= size_in_bytes;
     size_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
   // Free allocated bytes, making them available (size -> available).
   void DeallocateBytes(intptr_t size_in_bytes) {
     size_ -= size_in_bytes;
-    available_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
   // Waste free bytes (available -> waste).
   void WasteBytes(int size_in_bytes) {
-    available_ -= size_in_bytes;
+    size_ -= size_in_bytes;
     waste_ += size_in_bytes;
-  }
-
-  // Consider the wasted bytes to be allocated, as they contain filler
-  // objects (waste -> size).
-  void FillWastedBytes(intptr_t size_in_bytes) {
-    waste_ -= size_in_bytes;
-    size_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
  private:
   intptr_t capacity_;
-  intptr_t available_;
   intptr_t size_;
   intptr_t waste_;
 };
 
 
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap.  They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object).  They have a size and a next pointer.  The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode: public HeapObject {
+ public:
+  // Obtain a free-list node from a raw address.  This is not a cast because
+  // it does not check nor require that the first word at the address is a map
+  // pointer.
+  static FreeListNode* FromAddress(Address address) {
+    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+  }
+
+  static inline bool IsFreeListNode(HeapObject* object);
+
+  // Set the size in bytes, which can be read with HeapObject::Size().  This
+  // function also writes a map to the first word of the block so that it
+  // looks like a heap object to the garbage collector and heap iteration
+  // functions.
+  void set_size(Heap* heap, int size_in_bytes);
+
+  // Accessors for the next field.
+  inline FreeListNode* next();
+  inline FreeListNode** next_address();
+  inline void set_next(FreeListNode* next);
+
+  inline void Zap();
+
+ private:
+  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list for the old space.  The free list is organized in such a way
+// as to encourage objects allocated around the same time to be near each
+// other.  The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer.  When the limit is hit we need to
+// find a new space to allocate from.  This is done with the free list, which
+// is divided up into rough categories to cut down on waste.  Having finer
+// categories would scatter allocation more.
+
+// The old space free list is organized in categories.
+// 1-31 words:  Such small free areas are discarded for efficiency reasons.
+//     They can be reclaimed by the compactor.  However the distance between top
+//     and limit may be this small.
+// 32-255 words: There is a list of spaces this large.  It is used for top and
+//     limit when the object we need to allocate is 1-31 words in size.  These
+//     spaces are called small.
+// 256-2047 words: There is a list of spaces this large.  It is used for top and
+//     limit when the object we need to allocate is 32-255 words in size.  These
+//     spaces are called medium.
+// 1048-16383 words: There is a list of spaces this large.  It is used for top
+//     and limit when the object we need to allocate is 256-2047 words in size.
+//     These spaces are call large.
+// At least 16384 words.  This list is for objects of 2048 words or larger.
+//     Empty pages are added to this list.  These spaces are called huge.
+class FreeList BASE_EMBEDDED {
+ public:
+  explicit FreeList(PagedSpace* owner);
+
+  // Clear the free list.
+  void Reset();
+
+  // Return the number of bytes available on the free list.
+  intptr_t available() { return available_; }
+
+  // Place a node on the free list.  The block of size 'size_in_bytes'
+  // starting at 'start' is placed on the free list.  The return value is the
+  // number of bytes that have been lost due to internal fragmentation by
+  // freeing the block.  Bookkeeping information will be written to the block,
+  // i.e., its contents will be destroyed.  The start address should be word
+  // aligned, and the size should be a non-zero multiple of the word size.
+  int Free(Address start, int size_in_bytes);
+
+  // Allocate a block of size 'size_in_bytes' from the free list.  The block
+  // is unitialized.  A failure is returned if no block is available.  The
+  // number of bytes lost to fragmentation is returned in the output parameter
+  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
+  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+
+#ifdef DEBUG
+  void Zap();
+  static intptr_t SumFreeList(FreeListNode* node);
+  static int FreeListLength(FreeListNode* cur);
+  intptr_t SumFreeLists();
+  bool IsVeryLong();
+#endif
+
+  struct SizeStats {
+    intptr_t Total() {
+      return small_size_ + medium_size_ + large_size_ + huge_size_;
+    }
+
+    intptr_t small_size_;
+    intptr_t medium_size_;
+    intptr_t large_size_;
+    intptr_t huge_size_;
+  };
+
+  void CountFreeListItems(Page* p, SizeStats* sizes);
+
+  intptr_t EvictFreeListItems(Page* p);
+
+ private:
+  // The size range of blocks, in bytes.
+  static const int kMinBlockSize = 3 * kPointerSize;
+  static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
+
+  FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
+
+  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+
+  PagedSpace* owner_;
+  Heap* heap_;
+
+  // Total available bytes in all blocks on this free list.
+  int available_;
+
+  static const int kSmallListMin = 0x20 * kPointerSize;
+  static const int kSmallListMax = 0xff * kPointerSize;
+  static const int kMediumListMax = 0x7ff * kPointerSize;
+  static const int kLargeListMax = 0x3fff * kPointerSize;
+  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
+  static const int kMediumAllocationMax = kSmallListMax;
+  static const int kLargeAllocationMax = kMediumListMax;
+  FreeListNode* small_list_;
+  FreeListNode* medium_list_;
+  FreeListNode* large_list_;
+  FreeListNode* huge_list_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
+};
+
+
 class PagedSpace : public Space {
  public:
   // Creates a space with a maximum capacity, and an id.
@@ -1013,11 +1436,11 @@
   // the memory allocator's initial chunk) if possible.  If the block of
   // addresses is not big enough to contain a single page-aligned page, a
   // fresh chunk will be allocated.
-  bool Setup(Address start, size_t size);
+  bool SetUp();
 
   // Returns true if the space has been successfully set up and not
   // subsequently torn down.
-  bool HasBeenSetup();
+  bool HasBeenSetUp();
 
   // Cleans up the space, frees all pages in this space except those belonging
   // to the initial chunk, uncommits addresses in the initial chunk.
@@ -1026,8 +1449,6 @@
   // Checks whether an object/address is in this space.
   inline bool Contains(Address a);
   bool Contains(HeapObject* o) { return Contains(o->address()); }
-  // Never crashes even if a is not a valid pointer.
-  inline bool SafeContains(Address a);
 
   // Given an address occupied by a live object, return that object if it is
   // in this space, or Failure::Exception() if it is not. The implementation
@@ -1035,104 +1456,92 @@
   // linear in the number of objects in the page. It may be slow.
   MUST_USE_RESULT MaybeObject* FindObject(Address addr);
 
-  // Checks whether page is currently in use by this space.
-  bool IsUsed(Page* page);
-
-  void MarkAllPagesClean();
-
   // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact(bool will_compact);
+  virtual void PrepareForMarkCompact();
 
-  // The top of allocation in a page in this space. Undefined if page is unused.
-  Address PageAllocationTop(Page* page) {
-    return page == TopPageOf(allocation_info_) ? top()
-        : PageAllocationLimit(page);
-  }
-
-  // The limit of allocation for a page in this space.
-  virtual Address PageAllocationLimit(Page* page) = 0;
-
-  void FlushTopPageWatermark() {
-    AllocationTopPage()->SetCachedAllocationWatermark(top());
-    AllocationTopPage()->InvalidateWatermark(true);
-  }
-
-  // Current capacity without growing (Size() + Available() + Waste()).
+  // Current capacity without growing (Size() + Available()).
   intptr_t Capacity() { return accounting_stats_.Capacity(); }
 
   // Total amount of memory committed for this space.  For paged
   // spaces this equals the capacity.
   intptr_t CommittedMemory() { return Capacity(); }
 
-  // Available bytes without growing.
-  intptr_t Available() { return accounting_stats_.Available(); }
+  // Sets the capacity, the available space and the wasted space to zero.
+  // The stats are rebuilt during sweeping by adding each page to the
+  // capacity and the size when it is encountered.  As free spaces are
+  // discovered during the sweeping they are subtracted from the size and added
+  // to the available and wasted totals.
+  void ClearStats() {
+    accounting_stats_.ClearSizeWaste();
+  }
 
-  // Allocated bytes in this space.
+  // Available bytes without growing.  These are the bytes on the free list.
+  // The bytes in the linear allocation area are not included in this total
+  // because updating the stats would slow down allocation.  New pages are
+  // immediately added to the free list so they show up here.
+  intptr_t Available() { return free_list_.available(); }
+
+  // Allocated bytes in this space.  Garbage bytes that were not found due to
+  // lazy sweeping are counted as being allocated!  The bytes in the current
+  // linear allocation area (between top and limit) are also counted here.
   virtual intptr_t Size() { return accounting_stats_.Size(); }
 
-  // Wasted bytes due to fragmentation and not recoverable until the
-  // next GC of this space.
-  intptr_t Waste() { return accounting_stats_.Waste(); }
+  // As size, but the bytes in lazily swept pages are estimated and the bytes
+  // in the current linear allocation area are not included.
+  virtual intptr_t SizeOfObjects() {
+    ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
+    return Size() - unswept_free_bytes_ - (limit() - top());
+  }
 
-  // Returns the address of the first object in this space.
-  Address bottom() { return first_page_->ObjectAreaStart(); }
+  // Wasted bytes in this space.  These are just the bytes that were thrown away
+  // due to being too small to use for allocation.  They do not include the
+  // free bytes that were not found at all due to lazy sweeping.
+  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
 
   // Returns the allocation pointer in this space.
   Address top() { return allocation_info_.top; }
+  Address limit() { return allocation_info_.limit; }
 
   // Allocate the requested number of bytes in the space if possible, return a
   // failure object if not.
   MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
 
-  // Allocate the requested number of bytes for relocation during mark-compact
-  // collection.
-  MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
-
   virtual bool ReserveSpace(int bytes);
 
-  // Used by ReserveSpace.
-  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
-
-  // Free all pages in range from prev (exclusive) to last (inclusive).
-  // Freed pages are moved to the end of page list.
-  void FreePages(Page* prev, Page* last);
-
-  // Deallocates a block.
-  virtual void DeallocateBlock(Address start,
-                               int size_in_bytes,
-                               bool add_to_freelist) = 0;
+  // Give a block of memory to the space's free list.  It might be added to
+  // the free list or accounted as waste.
+  // If add_to_freelist is false then just accounting stats are updated and
+  // no attempt to add area to free list is made.
+  int Free(Address start, int size_in_bytes) {
+    int wasted = free_list_.Free(start, size_in_bytes);
+    accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
+    return size_in_bytes - wasted;
+  }
 
   // Set space allocation info.
-  void SetTop(Address top) {
+  void SetTop(Address top, Address limit) {
+    ASSERT(top == limit ||
+           Page::FromAddress(top) == Page::FromAddress(limit - 1));
     allocation_info_.top = top;
-    allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
+    allocation_info_.limit = limit;
   }
 
-  // ---------------------------------------------------------------------------
-  // Mark-compact collection support functions
-
-  // Set the relocation point to the beginning of the space.
-  void MCResetRelocationInfo();
-
-  // Writes relocation info to the top page.
-  void MCWriteRelocationInfoToPage() {
-    TopPageOf(mc_forwarding_info_)->
-        SetAllocationWatermark(mc_forwarding_info_.top);
+  void Allocate(int bytes) {
+    accounting_stats_.AllocateBytes(bytes);
   }
 
-  // Computes the offset of a given address in this space to the beginning
-  // of the space.
-  int MCSpaceOffsetForAddress(Address addr);
+  void IncreaseCapacity(int size) {
+    accounting_stats_.ExpandSpace(size);
+  }
 
-  // Updates the allocation pointer to the relocation top after a mark-compact
-  // collection.
-  virtual void MCCommitRelocationInfo() = 0;
+  // Releases an unused page and shrinks the space.
+  void ReleasePage(Page* page);
 
-  // Releases half of unused pages.
-  void Shrink();
+  // Releases all of the unused pages.
+  void ReleaseAllUnusedPages();
 
-  // Ensures that the capacity is at least 'capacity'. Returns false on failure.
-  bool EnsureCapacity(int capacity);
+  // The dummy page that anchors the linked list of pages.
+  Page* anchor() { return &anchor_; }
 
 #ifdef DEBUG
   // Print meta info and objects in this space.
@@ -1141,6 +1550,9 @@
   // Verify integrity of this space.
   virtual void Verify(ObjectVisitor* visitor);
 
+  // Reports statistics for the space
+  void ReportStatistics();
+
   // Overridden by subclasses to verify space-specific object
   // properties (e.g., only maps or free-list nodes are in map space).
   virtual void VerifyObject(HeapObject* obj) {}
@@ -1151,91 +1563,108 @@
   static void ResetCodeStatistics();
 #endif
 
-  // Returns the page of the allocation pointer.
-  Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+  bool was_swept_conservatively() { return was_swept_conservatively_; }
+  void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
 
-  void RelinkPageListInChunkOrder(bool deallocate_blocks);
+  // Evacuation candidates are swept by evacuator.  Needs to return a valid
+  // result before _and_ after evacuation has finished.
+  static bool ShouldBeSweptLazily(Page* p) {
+    return !p->IsEvacuationCandidate() &&
+           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
+           !p->WasSweptPrecisely();
+  }
+
+  void SetPagesToSweep(Page* first) {
+    ASSERT(unswept_free_bytes_ == 0);
+    if (first == &anchor_) first = NULL;
+    first_unswept_page_ = first;
+  }
+
+  void IncrementUnsweptFreeBytes(int by) {
+    unswept_free_bytes_ += by;
+  }
+
+  void IncreaseUnsweptFreeBytes(Page* p) {
+    ASSERT(ShouldBeSweptLazily(p));
+    unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
+  }
+
+  void DecreaseUnsweptFreeBytes(Page* p) {
+    ASSERT(ShouldBeSweptLazily(p));
+    unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
+  }
+
+  bool AdvanceSweeper(intptr_t bytes_to_sweep);
+
+  bool IsSweepingComplete() {
+    return !first_unswept_page_->is_valid();
+  }
+
+  Page* FirstPage() { return anchor_.next_page(); }
+  Page* LastPage() { return anchor_.prev_page(); }
+
+  void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
+    free_list_.CountFreeListItems(p, sizes);
+  }
+
+  void EvictEvacuationCandidatesFromFreeLists();
+
+  bool CanExpand();
+
+  // Returns the number of total pages in this space.
+  int CountTotalPages();
+
+  // Return size of allocatable area on a page in this space.
+  inline int AreaSize() {
+    return area_size_;
+  }
 
  protected:
+  int area_size_;
+
   // Maximum capacity of this space.
   intptr_t max_capacity_;
 
   // Accounting information for this space.
   AllocationStats accounting_stats_;
 
-  // The first page in this space.
-  Page* first_page_;
+  // The dummy page that anchors the double linked list of pages.
+  Page anchor_;
 
-  // The last page in this space.  Initially set in Setup, updated in
-  // Expand and Shrink.
-  Page* last_page_;
-
-  // True if pages owned by this space are linked in chunk-order.
-  // See comment for class MemoryAllocator for definition of chunk-order.
-  bool page_list_is_chunk_ordered_;
+  // The space's free list.
+  FreeList free_list_;
 
   // Normal allocation information.
   AllocationInfo allocation_info_;
 
-  // Relocation information during mark-compact collections.
-  AllocationInfo mc_forwarding_info_;
-
   // Bytes of each page that cannot be allocated.  Possibly non-zero
   // for pages in spaces with only fixed-size objects.  Always zero
   // for pages in spaces with variable sized objects (those pages are
   // padded with free-list nodes).
   int page_extra_;
 
-  // Sets allocation pointer to a page bottom.
-  static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
+  bool was_swept_conservatively_;
 
-  // Returns the top page specified by an allocation info structure.
-  static Page* TopPageOf(AllocationInfo alloc_info) {
-    return Page::FromAllocationTop(alloc_info.limit);
-  }
+  // The first page to be swept when the lazy sweeper advances. Is set
+  // to NULL when all pages have been swept.
+  Page* first_unswept_page_;
 
-  int CountPagesToTop() {
-    Page* p = Page::FromAllocationTop(allocation_info_.top);
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    int counter = 1;
-    while (it.has_next()) {
-      if (it.next() == p) return counter;
-      counter++;
-    }
-    UNREACHABLE();
-    return -1;
-  }
+  // The number of free bytes which could be reclaimed by advancing the
+  // lazy sweeper.  This is only an estimation because lazy sweeping is
+  // done conservatively.
+  intptr_t unswept_free_bytes_;
 
   // Expands the space by allocating a fixed number of pages. Returns false if
-  // it cannot allocate requested number of pages from OS. Newly allocated
-  // pages are append to the last_page;
-  bool Expand(Page* last_page);
+  // it cannot allocate requested number of pages from OS, or if the hard heap
+  // size limit has been hit.
+  bool Expand();
 
-  // Generic fast case allocation function that tries linear allocation in
-  // the top page of 'alloc_info'.  Returns NULL on failure.
-  inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
-                                      int size_in_bytes);
-
-  // During normal allocation or deserialization, roll to the next page in
-  // the space (there is assumed to be one) and allocate there.  This
-  // function is space-dependent.
-  virtual HeapObject* AllocateInNextPage(Page* current_page,
-                                         int size_in_bytes) = 0;
+  // Generic fast case allocation function that tries linear allocation at the
+  // address denoted by top in allocation_info_.
+  inline HeapObject* AllocateLinearly(int size_in_bytes);
 
   // Slow path of AllocateRaw.  This function is space-dependent.
-  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
-
-  // Slow path of MCAllocateRaw.
-  MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
-
-#ifdef DEBUG
-  // Returns the number of total pages in this space.
-  int CountTotalPages();
-#endif
-
- private:
-  // Returns a pointer to the page of the relocation pointer.
-  Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
+  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
 
   friend class PageIterator;
 };
@@ -1276,39 +1705,128 @@
 };
 
 
+enum SemiSpaceId {
+  kFromSpace = 0,
+  kToSpace = 1
+};
+
+
+class SemiSpace;
+
+
+class NewSpacePage : public MemoryChunk {
+ public:
+  // GC related flags copied from from-space to to-space when
+  // flipping semispaces.
+  static const intptr_t kCopyOnFlipFlagsMask =
+    (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+    (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+    (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+
+  static const int kAreaSize = Page::kNonCodeObjectAreaSize;
+
+  inline NewSpacePage* next_page() const {
+    return static_cast<NewSpacePage*>(next_chunk());
+  }
+
+  inline void set_next_page(NewSpacePage* page) {
+    set_next_chunk(page);
+  }
+
+  inline NewSpacePage* prev_page() const {
+    return static_cast<NewSpacePage*>(prev_chunk());
+  }
+
+  inline void set_prev_page(NewSpacePage* page) {
+    set_prev_chunk(page);
+  }
+
+  SemiSpace* semi_space() {
+    return reinterpret_cast<SemiSpace*>(owner());
+  }
+
+  bool is_anchor() { return !this->InNewSpace(); }
+
+  static bool IsAtStart(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
+        == kObjectStartOffset;
+  }
+
+  static bool IsAtEnd(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
+  }
+
+  Address address() {
+    return reinterpret_cast<Address>(this);
+  }
+
+  // Finds the NewSpacePage containg the given address.
+  static inline NewSpacePage* FromAddress(Address address_in_page) {
+    Address page_start =
+        reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
+                                  ~Page::kPageAlignmentMask);
+    NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
+    return page;
+  }
+
+  // Find the page for a limit address. A limit address is either an address
+  // inside a page, or the address right after the last byte of a page.
+  static inline NewSpacePage* FromLimit(Address address_limit) {
+    return NewSpacePage::FromAddress(address_limit - 1);
+  }
+
+ private:
+  // Create a NewSpacePage object that is only used as anchor
+  // for the doubly-linked list of real pages.
+  explicit NewSpacePage(SemiSpace* owner) {
+    InitializeAsAnchor(owner);
+  }
+
+  static NewSpacePage* Initialize(Heap* heap,
+                                  Address start,
+                                  SemiSpace* semi_space);
+
+  // Intialize a fake NewSpacePage used as sentinel at the ends
+  // of a doubly-linked list of real NewSpacePages.
+  // Only uses the prev/next links, and sets flags to not be in new-space.
+  void InitializeAsAnchor(SemiSpace* owner);
+
+  friend class SemiSpace;
+  friend class SemiSpaceIterator;
+};
+
+
 // -----------------------------------------------------------------------------
 // SemiSpace in young generation
 //
-// A semispace is a contiguous chunk of memory. The mark-compact collector
-// uses the memory in the from space as a marking stack when tracing live
-// objects.
+// A semispace is a contiguous chunk of memory holding page-like memory
+// chunks. The mark-compact collector  uses the memory of the first page in
+// the from space as a marking stack when tracing live objects.
 
 class SemiSpace : public Space {
  public:
   // Constructor.
-  explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
-    start_ = NULL;
-    age_mark_ = NULL;
-  }
+  SemiSpace(Heap* heap, SemiSpaceId semispace)
+    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+      start_(NULL),
+      age_mark_(NULL),
+      id_(semispace),
+      anchor_(this),
+      current_page_(NULL) { }
 
   // Sets up the semispace using the given chunk.
-  bool Setup(Address start, int initial_capacity, int maximum_capacity);
+  void SetUp(Address start, int initial_capacity, int maximum_capacity);
 
   // Tear down the space.  Heap memory was not allocated by the space, so it
   // is not deallocated here.
   void TearDown();
 
   // True if the space has been set up but not torn down.
-  bool HasBeenSetup() { return start_ != NULL; }
-
-  // Grow the size of the semispace by committing extra virtual memory.
-  // Assumes that the caller has checked that the semispace has not reached
-  // its maximum capacity (and thus there is space available in the reserved
-  // address range to grow).
-  bool Grow();
+  bool HasBeenSetUp() { return start_ != NULL; }
 
   // Grow the semispace to the new capacity.  The new capacity
-  // requested must be larger than the current capacity.
+  // requested must be larger than the current capacity and less than
+  // the maximum capacity.
   bool GrowTo(int new_capacity);
 
   // Shrinks the semispace to the new capacity.  The new capacity
@@ -1316,14 +1834,40 @@
   // semispace and less than the current capacity.
   bool ShrinkTo(int new_capacity);
 
-  // Returns the start address of the space.
-  Address low() { return start_; }
+  // Returns the start address of the first page of the space.
+  Address space_start() {
+    ASSERT(anchor_.next_page() != &anchor_);
+    return anchor_.next_page()->area_start();
+  }
+
+  // Returns the start address of the current page of the space.
+  Address page_low() {
+    return current_page_->area_start();
+  }
+
   // Returns one past the end address of the space.
-  Address high() { return low() + capacity_; }
+  Address space_end() {
+    return anchor_.prev_page()->area_end();
+  }
+
+  // Returns one past the end address of the current page of the space.
+  Address page_high() {
+    return current_page_->area_end();
+  }
+
+  bool AdvancePage() {
+    NewSpacePage* next_page = current_page_->next_page();
+    if (next_page == anchor()) return false;
+    current_page_ = next_page;
+    return true;
+  }
+
+  // Resets the space to using the first page.
+  void Reset();
 
   // Age mark accessors.
   Address age_mark() { return age_mark_; }
-  void set_age_mark(Address mark) { age_mark_ = mark; }
+  void set_age_mark(Address mark);
 
   // True if the address is in the address range of this semispace (not
   // necessarily below the allocation pointer).
@@ -1338,11 +1882,6 @@
     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
   }
 
-  // The offset of an address from the beginning of the space.
-  int SpaceOffsetForAddress(Address addr) {
-    return static_cast<int>(addr - low());
-  }
-
   // If we don't have these here then SemiSpace will be abstract.  However
   // they should never be called.
   virtual intptr_t Size() {
@@ -1359,9 +1898,19 @@
   bool Commit();
   bool Uncommit();
 
+  NewSpacePage* first_page() { return anchor_.next_page(); }
+  NewSpacePage* current_page() { return current_page_; }
+
 #ifdef DEBUG
   virtual void Print();
   virtual void Verify();
+  // Validate a range of of addresses in a SemiSpace.
+  // The "from" address must be on a page prior to the "to" address,
+  // in the linked page order, or it must be earlier on the same page.
+  static void AssertValidRange(Address from, Address to);
+#else
+  // Do nothing.
+  inline static void AssertValidRange(Address from, Address to) {}
 #endif
 
   // Returns the current capacity of the semi space.
@@ -1373,7 +1922,17 @@
   // Returns the initial capacity of the semi space.
   int InitialCapacity() { return initial_capacity_; }
 
+  SemiSpaceId id() { return id_; }
+
+  static void Swap(SemiSpace* from, SemiSpace* to);
+
  private:
+  // Flips the semispace between being from-space and to-space.
+  // Copies the flags into the masked positions on all pages in the space.
+  void FlipPages(intptr_t flags, intptr_t flag_mask);
+
+  NewSpacePage* anchor() { return &anchor_; }
+
   // The current and maximum capacity of the space.
   int capacity_;
   int maximum_capacity_;
@@ -1390,7 +1949,13 @@
   uintptr_t object_expected_;
 
   bool committed_;
+  SemiSpaceId id_;
 
+  NewSpacePage anchor_;
+  NewSpacePage* current_page_;
+
+  friend class SemiSpaceIterator;
+  friend class NewSpacePageIterator;
  public:
   TRACK_MEMORY("SemiSpace")
 };
@@ -1406,12 +1971,26 @@
   // Create an iterator over the objects in the given space.  If no start
   // address is given, the iterator starts from the bottom of the space.  If
   // no size function is given, the iterator calls Object::Size().
-  explicit SemiSpaceIterator(NewSpace* space);
-  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
-  SemiSpaceIterator(NewSpace* space, Address start);
 
-  HeapObject* next() {
+  // Iterate over all of allocated to-space.
+  explicit SemiSpaceIterator(NewSpace* space);
+  // Iterate over all of allocated to-space, with a custome size function.
+  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
+  // Iterate over part of allocated to-space, from start to the end
+  // of allocation.
+  SemiSpaceIterator(NewSpace* space, Address start);
+  // Iterate from one address to another in the same semi-space.
+  SemiSpaceIterator(Address from, Address to);
+
+  HeapObject* Next() {
     if (current_ == limit_) return NULL;
+    if (NewSpacePage::IsAtEnd(current_)) {
+      NewSpacePage* page = NewSpacePage::FromLimit(current_);
+      page = page->next_page();
+      ASSERT(!page->is_anchor());
+      current_ = page->area_start();
+      if (current_ == limit_) return NULL;
+    }
 
     HeapObject* object = HeapObject::FromAddress(current_);
     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1421,14 +2000,13 @@
   }
 
   // Implementation of the ObjectIterator functions.
-  virtual HeapObject* next_object() { return next(); }
+  virtual HeapObject* next_object() { return Next(); }
 
  private:
-  void Initialize(NewSpace* space, Address start, Address end,
+  void Initialize(Address start,
+                  Address end,
                   HeapObjectCallback size_func);
 
-  // The semispace.
-  SemiSpace* space_;
   // The current iteration point.
   Address current_;
   // The end of iteration.
@@ -1439,6 +2017,34 @@
 
 
 // -----------------------------------------------------------------------------
+// A PageIterator iterates the pages in a semi-space.
+class NewSpacePageIterator BASE_EMBEDDED {
+ public:
+  // Make an iterator that runs over all pages in to-space.
+  explicit inline NewSpacePageIterator(NewSpace* space);
+
+  // Make an iterator that runs over all pages in the given semispace,
+  // even those not used in allocation.
+  explicit inline NewSpacePageIterator(SemiSpace* space);
+
+  // Make iterator that iterates from the page containing start
+  // to the page that contains limit in the same semispace.
+  inline NewSpacePageIterator(Address start, Address limit);
+
+  inline bool has_next();
+  inline NewSpacePage* next();
+
+ private:
+  NewSpacePage* prev_page_;  // Previous page returned.
+  // Next page that will be returned.  Cached here so that we can use this
+  // iterator for operations that deallocate pages.
+  NewSpacePage* next_page_;
+  // Last page returned.
+  NewSpacePage* last_page_;
+};
+
+
+// -----------------------------------------------------------------------------
 // The young generation space.
 //
 // The new space consists of a contiguous pair of semispaces.  It simply
@@ -1449,19 +2055,21 @@
   // Constructor.
   explicit NewSpace(Heap* heap)
     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
-      to_space_(heap),
-      from_space_(heap) {}
+      to_space_(heap, kToSpace),
+      from_space_(heap, kFromSpace),
+      reservation_(),
+      inline_allocation_limit_step_(0) {}
 
   // Sets up the new space using the given chunk.
-  bool Setup(Address start, int size);
+  bool SetUp(int reserved_semispace_size_, int max_semispace_size);
 
   // Tears down the space.  Heap memory was not allocated by the space, so it
   // is not deallocated here.
   void TearDown();
 
   // True if the space has been set up but not torn down.
-  bool HasBeenSetup() {
-    return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
+  bool HasBeenSetUp() {
+    return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
   }
 
   // Flip the pair of spaces.
@@ -1480,18 +2088,30 @@
     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
         == reinterpret_cast<uintptr_t>(start_);
   }
+
   bool Contains(Object* o) {
-    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
+    Address a = reinterpret_cast<Address>(o);
+    return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
   }
 
   // Return the allocated bytes in the active semispace.
-  virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
+  virtual intptr_t Size() {
+    return pages_used_ * NewSpacePage::kAreaSize +
+        static_cast<int>(top() - to_space_.page_low());
+  }
+
   // The same, but returning an int.  We have to have the one that returns
   // intptr_t because it is inherited, but if we know we are dealing with the
   // new space, which can't get as big as the other spaces then this is useful:
   int SizeAsInt() { return static_cast<int>(Size()); }
 
   // Return the current capacity of a semispace.
+  intptr_t EffectiveCapacity() {
+    SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
+    return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
+  }
+
+  // Return the current capacity of a semispace.
   intptr_t Capacity() {
     ASSERT(to_space_.Capacity() == from_space_.Capacity());
     return to_space_.Capacity();
@@ -1503,8 +2123,10 @@
     return Capacity();
   }
 
-  // Return the available bytes without growing in the active semispace.
-  intptr_t Available() { return Capacity() - Size(); }
+  // Return the available bytes without growing.
+  intptr_t Available() {
+    return Capacity() - Size();
+  }
 
   // Return the maximum capacity of a semispace.
   int MaximumCapacity() {
@@ -1519,9 +2141,12 @@
   }
 
   // Return the address of the allocation pointer in the active semispace.
-  Address top() { return allocation_info_.top; }
+  Address top() {
+    ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
+    return allocation_info_.top;
+  }
   // Return the address of the first object in the active semispace.
-  Address bottom() { return to_space_.low(); }
+  Address bottom() { return to_space_.space_start(); }
 
   // Get the age mark of the inactive semispace.
   Address age_mark() { return from_space_.age_mark(); }
@@ -1533,54 +2158,68 @@
   Address start() { return start_; }
   uintptr_t mask() { return address_mask_; }
 
+  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
+    ASSERT(Contains(addr));
+    ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
+           IsAligned(OffsetFrom(addr) - 1, kPointerSize));
+    return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
+  }
+
+  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
+    return reinterpret_cast<Address>(index << kPointerSizeLog2);
+  }
+
   // The allocation top and limit addresses.
   Address* allocation_top_address() { return &allocation_info_.top; }
   Address* allocation_limit_address() { return &allocation_info_.limit; }
 
-  MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
-    return AllocateRawInternal(size_in_bytes, &allocation_info_);
-  }
-
-  // Allocate the requested number of bytes for relocation during mark-compact
-  // collection.
-  MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
-    return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
-  }
+  MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
 
   // Reset the allocation pointer to the beginning of the active semispace.
   void ResetAllocationInfo();
-  // Reset the reloction pointer to the bottom of the inactive semispace in
-  // preparation for mark-compact collection.
-  void MCResetRelocationInfo();
-  // Update the allocation pointer in the active semispace after a
-  // mark-compact collection.
-  void MCCommitRelocationInfo();
 
-  // Get the extent of the inactive semispace (for use as a marking stack).
-  Address FromSpaceLow() { return from_space_.low(); }
-  Address FromSpaceHigh() { return from_space_.high(); }
-
-  // Get the extent of the active semispace (to sweep newly copied objects
-  // during a scavenge collection).
-  Address ToSpaceLow() { return to_space_.low(); }
-  Address ToSpaceHigh() { return to_space_.high(); }
-
-  // Offsets from the beginning of the semispaces.
-  int ToSpaceOffsetForAddress(Address a) {
-    return to_space_.SpaceOffsetForAddress(a);
+  void LowerInlineAllocationLimit(intptr_t step) {
+    inline_allocation_limit_step_ = step;
+    if (step == 0) {
+      allocation_info_.limit = to_space_.page_high();
+    } else {
+      allocation_info_.limit = Min(
+          allocation_info_.top + inline_allocation_limit_step_,
+          allocation_info_.limit);
+    }
+    top_on_previous_step_ = allocation_info_.top;
   }
-  int FromSpaceOffsetForAddress(Address a) {
-    return from_space_.SpaceOffsetForAddress(a);
+
+  // Get the extent of the inactive semispace (for use as a marking stack,
+  // or to zap it). Notice: space-addresses are not necessarily on the
+  // same page, so FromSpaceStart() might be above FromSpaceEnd().
+  Address FromSpacePageLow() { return from_space_.page_low(); }
+  Address FromSpacePageHigh() { return from_space_.page_high(); }
+  Address FromSpaceStart() { return from_space_.space_start(); }
+  Address FromSpaceEnd() { return from_space_.space_end(); }
+
+  // Get the extent of the active semispace's pages' memory.
+  Address ToSpaceStart() { return to_space_.space_start(); }
+  Address ToSpaceEnd() { return to_space_.space_end(); }
+
+  inline bool ToSpaceContains(Address address) {
+    return to_space_.Contains(address);
+  }
+  inline bool FromSpaceContains(Address address) {
+    return from_space_.Contains(address);
   }
 
   // True if the object is a heap object in the address range of the
   // respective semispace (not necessarily below the allocation pointer of the
   // semispace).
-  bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
-  bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
 
-  bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
-  bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
+  // Try to switch the active semispace to a new, empty, page.
+  // Returns false if this isn't possible or reasonable (i.e., there
+  // are no pages, or the current page is already empty), or true
+  // if successful.
+  bool AddFreshPage();
 
   virtual bool ReserveSpace(int bytes);
 
@@ -1620,10 +2259,24 @@
     return from_space_.Uncommit();
   }
 
+  inline intptr_t inline_allocation_limit_step() {
+    return inline_allocation_limit_step_;
+  }
+
+  SemiSpace* active_space() { return &to_space_; }
+
  private:
+  // Update allocation info to match the current to-space page.
+  void UpdateAllocationInfo();
+
+  Address chunk_base_;
+  uintptr_t chunk_size_;
+
   // The semispaces.
   SemiSpace to_space_;
   SemiSpace from_space_;
+  VirtualMemory reservation_;
+  int pages_used_;
 
   // Start address and bit mask for containment testing.
   Address start_;
@@ -1634,15 +2287,19 @@
   // Allocation pointer and limit for normal allocation and allocation during
   // mark-compact collection.
   AllocationInfo allocation_info_;
-  AllocationInfo mc_forwarding_info_;
+
+  // When incremental marking is active we will set allocation_info_.limit
+  // to be lower than actual limit and then will gradually increase it
+  // in steps to guarantee that we do incremental marking steps even
+  // when all allocation is performed from inlined generated code.
+  intptr_t inline_allocation_limit_step_;
+
+  Address top_on_previous_step_;
 
   HistogramInfo* allocated_histogram_;
   HistogramInfo* promoted_histogram_;
 
-  // Implementation of AllocateRaw and MCAllocateRaw.
-  MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
-      int size_in_bytes,
-      AllocationInfo* alloc_info);
+  MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
 
   friend class SemiSpaceIterator;
 
@@ -1652,193 +2309,6 @@
 
 
 // -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap.  They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object).  They have a size and a next pointer.  The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
- public:
-  // Obtain a free-list node from a raw address.  This is not a cast because
-  // it does not check nor require that the first word at the address is a map
-  // pointer.
-  static FreeListNode* FromAddress(Address address) {
-    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
-  }
-
-  static inline bool IsFreeListNode(HeapObject* object);
-
-  // Set the size in bytes, which can be read with HeapObject::Size().  This
-  // function also writes a map to the first word of the block so that it
-  // looks like a heap object to the garbage collector and heap iteration
-  // functions.
-  void set_size(Heap* heap, int size_in_bytes);
-
-  // Accessors for the next field.
-  inline Address next(Heap* heap);
-  inline void set_next(Heap* heap, Address next);
-
- private:
-  static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list for the old space.
-class OldSpaceFreeList BASE_EMBEDDED {
- public:
-  OldSpaceFreeList(Heap* heap, AllocationSpace owner);
-
-  // Clear the free list.
-  void Reset();
-
-  // Return the number of bytes available on the free list.
-  intptr_t available() { return available_; }
-
-  // Place a node on the free list.  The block of size 'size_in_bytes'
-  // starting at 'start' is placed on the free list.  The return value is the
-  // number of bytes that have been lost due to internal fragmentation by
-  // freeing the block.  Bookkeeping information will be written to the block,
-  // ie, its contents will be destroyed.  The start address should be word
-  // aligned, and the size should be a non-zero multiple of the word size.
-  int Free(Address start, int size_in_bytes);
-
-  // Allocate a block of size 'size_in_bytes' from the free list.  The block
-  // is unitialized.  A failure is returned if no block is available.  The
-  // number of bytes lost to fragmentation is returned in the output parameter
-  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
-  MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
-
-  void MarkNodes();
-
- private:
-  // The size range of blocks, in bytes. (Smaller allocations are allowed, but
-  // will always result in waste.)
-  static const int kMinBlockSize = 2 * kPointerSize;
-  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
-
-  Heap* heap_;
-
-  // The identity of the owning space, for building allocation Failure
-  // objects.
-  AllocationSpace owner_;
-
-  // Total available bytes in all blocks on this free list.
-  int available_;
-
-  // Blocks are put on exact free lists in an array, indexed by size in words.
-  // The available sizes are kept in an increasingly ordered list. Entries
-  // corresponding to sizes < kMinBlockSize always have an empty free list
-  // (but index kHead is used for the head of the size list).
-  struct SizeNode {
-    // Address of the head FreeListNode of the implied block size or NULL.
-    Address head_node_;
-    // Size (words) of the next larger available size if head_node_ != NULL.
-    int next_size_;
-  };
-  static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
-  SizeNode free_[kFreeListsLength];
-
-  // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
-  static const int kHead = kMinBlockSize / kPointerSize - 1;
-  static const int kEnd = kMaxInt;
-
-  // We keep a "finger" in the size list to speed up a common pattern:
-  // repeated requests for the same or increasing sizes.
-  int finger_;
-
-  // Starting from *prev, find and return the smallest size >= index (words),
-  // or kEnd. Update *prev to be the largest size < index, or kHead.
-  int FindSize(int index, int* prev) {
-    int cur = free_[*prev].next_size_;
-    while (cur < index) {
-      *prev = cur;
-      cur = free_[cur].next_size_;
-    }
-    return cur;
-  }
-
-  // Remove an existing element from the size list.
-  void RemoveSize(int index) {
-    int prev = kHead;
-    int cur = FindSize(index, &prev);
-    ASSERT(cur == index);
-    free_[prev].next_size_ = free_[cur].next_size_;
-    finger_ = prev;
-  }
-
-  // Insert a new element into the size list.
-  void InsertSize(int index) {
-    int prev = kHead;
-    int cur = FindSize(index, &prev);
-    ASSERT(cur != index);
-    free_[prev].next_size_ = index;
-    free_[index].next_size_ = cur;
-  }
-
-  // The size list is not updated during a sequence of calls to Free, but is
-  // rebuilt before the next allocation.
-  void RebuildSizeList();
-  bool needs_rebuild_;
-
-#ifdef DEBUG
-  // Does this free list contain a free block located at the address of 'node'?
-  bool Contains(FreeListNode* node);
-#endif
-
-  DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
-};
-
-
-// The free list for the map space.
-class FixedSizeFreeList BASE_EMBEDDED {
- public:
-  FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
-
-  // Clear the free list.
-  void Reset();
-
-  // Return the number of bytes available on the free list.
-  intptr_t available() { return available_; }
-
-  // Place a node on the free list.  The block starting at 'start' (assumed to
-  // have size object_size_) is placed on the free list.  Bookkeeping
-  // information will be written to the block, ie, its contents will be
-  // destroyed.  The start address should be word aligned.
-  void Free(Address start);
-
-  // Allocate a fixed sized block from the free list.  The block is unitialized.
-  // A failure is returned if no block is available.
-  MUST_USE_RESULT MaybeObject* Allocate();
-
-  void MarkNodes();
-
- private:
-  Heap* heap_;
-
-  // Available bytes on the free list.
-  intptr_t available_;
-
-  // The head of the free list.
-  Address head_;
-
-  // The tail of the free list.
-  Address tail_;
-
-  // The identity of the owning space, for building allocation Failure
-  // objects.
-  AllocationSpace owner_;
-
-  // The size of the objects in this space.
-  int object_size_;
-
-  DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
-};
-
-
-// -----------------------------------------------------------------------------
 // Old object space (excluding map objects)
 
 class OldSpace : public PagedSpace {
@@ -1849,71 +2319,28 @@
            intptr_t max_capacity,
            AllocationSpace id,
            Executability executable)
-      : PagedSpace(heap, max_capacity, id, executable),
-        free_list_(heap, id) {
+      : PagedSpace(heap, max_capacity, id, executable) {
     page_extra_ = 0;
   }
 
-  // The bytes available on the free list (ie, not above the linear allocation
-  // pointer).
-  intptr_t AvailableFree() { return free_list_.available(); }
-
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
-    return page->ObjectAreaEnd();
+    return page->area_end();
   }
 
-  // Give a block of memory to the space's free list.  It might be added to
-  // the free list or accounted as waste.
-  // If add_to_freelist is false then just accounting stats are updated and
-  // no attempt to add area to free list is made.
-  void Free(Address start, int size_in_bytes, bool add_to_freelist) {
-    accounting_stats_.DeallocateBytes(size_in_bytes);
-
-    if (add_to_freelist) {
-      int wasted_bytes = free_list_.Free(start, size_in_bytes);
-      accounting_stats_.WasteBytes(wasted_bytes);
-    }
-  }
-
-  virtual void DeallocateBlock(Address start,
-                               int size_in_bytes,
-                               bool add_to_freelist);
-
-  // Prepare for full garbage collection.  Resets the relocation pointer and
-  // clears the free list.
-  virtual void PrepareForMarkCompact(bool will_compact);
-
-  // Updates the allocation pointer to the relocation top after a mark-compact
-  // collection.
-  virtual void MCCommitRelocationInfo();
-
-  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
-
-  void MarkFreeListNodes() { free_list_.MarkNodes(); }
-
-#ifdef DEBUG
-  // Reports statistics for the space
-  void ReportStatistics();
-#endif
-
- protected:
-  // Virtual function in the superclass.  Slow path of AllocateRaw.
-  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
-  // Virtual function in the superclass.  Allocate linearly at the start of
-  // the page after current_page (there is assumed to be one).
-  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
-
- private:
-  // The space's free list.
-  OldSpaceFreeList free_list_;
-
  public:
   TRACK_MEMORY("OldSpace")
 };
 
 
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
+  SLOW_ASSERT((space).page_low() <= (info).top             \
+              && (info).top <= (space).page_high()         \
+              && (info).limit <= (space).page_high())
+
+
 // -----------------------------------------------------------------------------
 // Old space for objects of a fixed size
 
@@ -1926,56 +2353,21 @@
              const char* name)
       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
         object_size_in_bytes_(object_size_in_bytes),
-        name_(name),
-        free_list_(heap, id, object_size_in_bytes) {
-    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
+        name_(name) {
+    page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
   }
 
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
-    return page->ObjectAreaEnd() - page_extra_;
+    return page->area_end() - page_extra_;
   }
 
   int object_size_in_bytes() { return object_size_in_bytes_; }
 
-  // Give a fixed sized block of memory to the space's free list.
-  // If add_to_freelist is false then just accounting stats are updated and
-  // no attempt to add area to free list is made.
-  void Free(Address start, bool add_to_freelist) {
-    if (add_to_freelist) {
-      free_list_.Free(start);
-    }
-    accounting_stats_.DeallocateBytes(object_size_in_bytes_);
-  }
-
   // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact(bool will_compact);
-
-  // Updates the allocation pointer to the relocation top after a mark-compact
-  // collection.
-  virtual void MCCommitRelocationInfo();
-
-  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
-
-  virtual void DeallocateBlock(Address start,
-                               int size_in_bytes,
-                               bool add_to_freelist);
-
-  void MarkFreeListNodes() { free_list_.MarkNodes(); }
-
-#ifdef DEBUG
-  // Reports statistic info of the space
-  void ReportStatistics();
-#endif
+  virtual void PrepareForMarkCompact();
 
  protected:
-  // Virtual function in the superclass.  Slow path of AllocateRaw.
-  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
-  // Virtual function in the superclass.  Allocate linearly at the start of
-  // the page after current_page (there is assumed to be one).
-  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
-
   void ResetFreeList() {
     free_list_.Reset();
   }
@@ -1986,9 +2378,6 @@
 
   // The name of this space.
   const char* name_;
-
-  // The space's free list.
-  FixedSizeFreeList free_list_;
 };
 
 
@@ -1998,89 +2387,21 @@
 class MapSpace : public FixedSpace {
  public:
   // Creates a map space object with a maximum capacity.
-  MapSpace(Heap* heap,
-           intptr_t max_capacity,
-           int max_map_space_pages,
-           AllocationSpace id)
+  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
       : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
-        max_map_space_pages_(max_map_space_pages) {
-    ASSERT(max_map_space_pages < kMaxMapPageIndex);
+        max_map_space_pages_(kMaxMapPageIndex - 1) {
   }
 
-  // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact(bool will_compact);
-
   // Given an index, returns the page address.
-  Address PageAddress(int page_index) { return page_addresses_[page_index]; }
+  // TODO(1600): this limit is artifical just to keep code compilable
+  static const int kMaxMapPageIndex = 1 << 16;
 
-  static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
-
-  // Are map pointers encodable into map word?
-  bool MapPointersEncodable() {
-    if (!FLAG_use_big_map_space) {
-      ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
-      return true;
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (IsPowerOf2(Map::kSize)) {
+      return RoundDown(size, Map::kSize);
+    } else {
+      return (size / Map::kSize) * Map::kSize;
     }
-    return CountPagesToTop() <= max_map_space_pages_;
-  }
-
-  // Should be called after forced sweep to find out if map space needs
-  // compaction.
-  bool NeedsCompaction(int live_maps) {
-    return !MapPointersEncodable() && live_maps <= CompactionThreshold();
-  }
-
-  Address TopAfterCompaction(int live_maps) {
-    ASSERT(NeedsCompaction(live_maps));
-
-    int pages_left = live_maps / kMapsPerPage;
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    while (pages_left-- > 0) {
-      ASSERT(it.has_next());
-      it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    }
-    ASSERT(it.has_next());
-    Page* top_page = it.next();
-    top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    ASSERT(top_page->is_valid());
-
-    int offset = live_maps % kMapsPerPage * Map::kSize;
-    Address top = top_page->ObjectAreaStart() + offset;
-    ASSERT(top < top_page->ObjectAreaEnd());
-    ASSERT(Contains(top));
-
-    return top;
-  }
-
-  void FinishCompaction(Address new_top, int live_maps) {
-    Page* top_page = Page::FromAddress(new_top);
-    ASSERT(top_page->is_valid());
-
-    SetAllocationInfo(&allocation_info_, top_page);
-    allocation_info_.top = new_top;
-
-    int new_size = live_maps * Map::kSize;
-    accounting_stats_.DeallocateBytes(accounting_stats_.Size());
-    accounting_stats_.AllocateBytes(new_size);
-
-    // Flush allocation watermarks.
-    for (Page* p = first_page_; p != top_page; p = p->next_page()) {
-      p->SetAllocationWatermark(p->AllocationTop());
-    }
-    top_page->SetAllocationWatermark(new_top);
-
-#ifdef DEBUG
-    if (FLAG_enable_slow_asserts) {
-      intptr_t actual_size = 0;
-      for (Page* p = first_page_; p != top_page; p = p->next_page())
-        actual_size += kMapsPerPage * Map::kSize;
-      actual_size += (new_top - top_page->ObjectAreaStart());
-      ASSERT(accounting_stats_.Size() == actual_size);
-    }
-#endif
-
-    Shrink();
-    ResetFreeList();
   }
 
  protected:
@@ -2089,7 +2410,7 @@
 #endif
 
  private:
-  static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
+  static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
 
   // Do map space compaction if there is a page gap.
   int CompactionThreshold() {
@@ -2098,9 +2419,6 @@
 
   const int max_map_space_pages_;
 
-  // An array of page start address in a map space.
-  Address page_addresses_[kMaxMapPageIndex];
-
  public:
   TRACK_MEMORY("MapSpace")
 };
@@ -2116,6 +2434,14 @@
       : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
   {}
 
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
+      return RoundDown(size, JSGlobalPropertyCell::kSize);
+    } else {
+      return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
+    }
+  }
+
  protected:
 #ifdef DEBUG
   virtual void VerifyObject(HeapObject* obj);
@@ -2133,81 +2459,26 @@
 // A large object always starts at Page::kObjectStartOffset to a page.
 // Large objects do not move during garbage collections.
 
-// A LargeObjectChunk holds exactly one large object page with exactly one
-// large object.
-class LargeObjectChunk {
+class LargeObjectSpace : public Space {
  public:
-  // Allocates a new LargeObjectChunk that contains a large object page
-  // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
-  // object) bytes after the object area start of that page.
-  static LargeObjectChunk* New(int size_in_bytes, Executability executable);
+  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
+  virtual ~LargeObjectSpace() {}
 
-  // Free the memory associated with the chunk.
-  void Free(Executability executable);
+  // Initializes internal data structures.
+  bool SetUp();
 
-  // Interpret a raw address as a large object chunk.
-  static LargeObjectChunk* FromAddress(Address address) {
-    return reinterpret_cast<LargeObjectChunk*>(address);
-  }
+  // Releases internal resources, frees objects in this space.
+  void TearDown();
 
-  // Returns the address of this chunk.
-  Address address() { return reinterpret_cast<Address>(this); }
-
-  Page* GetPage() {
-    return Page::FromAddress(RoundUp(address(), Page::kPageSize));
-  }
-
-  // Accessors for the fields of the chunk.
-  LargeObjectChunk* next() { return next_; }
-  void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
-  size_t size() { return size_ & ~Page::kPageFlagMask; }
-
-  // Compute the start address in the chunk.
-  Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
-
-  // Returns the object in this chunk.
-  HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
-
-  // Given a requested size returns the physical size of a chunk to be
-  // allocated.
-  static int ChunkSizeFor(int size_in_bytes);
-
-  // Given a chunk size, returns the object size it can accommodate.  Used by
-  // LargeObjectSpace::Available.
   static intptr_t ObjectSizeFor(intptr_t chunk_size) {
     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
   }
 
- private:
-  // A pointer to the next large object chunk in the space or NULL.
-  LargeObjectChunk* next_;
-
-  // The total size of this chunk.
-  size_t size_;
-
- public:
-  TRACK_MEMORY("LargeObjectChunk")
-};
-
-
-class LargeObjectSpace : public Space {
- public:
-  LargeObjectSpace(Heap* heap, AllocationSpace id);
-  virtual ~LargeObjectSpace() {}
-
-  // Initializes internal data structures.
-  bool Setup();
-
-  // Releases internal resources, frees objects in this space.
-  void TearDown();
-
-  // Allocates a (non-FixedArray, non-Code) large object.
-  MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
-  // Allocates a large Code object.
-  MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
-  // Allocates a large FixedArray.
-  MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
+  // Shared implementation of AllocateRaw, AllocateRawCode and
+  // AllocateRawFixedArray.
+  MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
+                                           Executability executable);
 
   // Available bytes for objects in this space.
   inline intptr_t Available();
@@ -2229,12 +2500,9 @@
   // space, may be slow.
   MaybeObject* FindObject(Address a);
 
-  // Finds a large object page containing the given pc, returns NULL
+  // Finds a large object page containing the given address, returns NULL
   // if such a page doesn't exist.
-  LargeObjectChunk* FindChunkContainingPc(Address pc);
-
-  // Iterates objects covered by dirty regions.
-  void IterateDirtyRegions(ObjectSlotCallback func);
+  LargePage* FindPage(Address a);
 
   // Frees unmarked objects.
   void FreeUnmarkedObjects();
@@ -2243,13 +2511,15 @@
   bool Contains(HeapObject* obj);
 
   // Checks whether the space is empty.
-  bool IsEmpty() { return first_chunk_ == NULL; }
+  bool IsEmpty() { return first_page_ == NULL; }
 
   // See the comments for ReserveSpace in the Space class.  This has to be
   // called after ReserveSpace has been called on the paged spaces, since they
   // may use some memory, leaving less for large objects.
   virtual bool ReserveSpace(int bytes);
 
+  LargePage* first_page() { return first_page_; }
+
 #ifdef DEBUG
   virtual void Verify();
   virtual void Print();
@@ -2261,17 +2531,14 @@
   bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
 
  private:
+  intptr_t max_capacity_;
   // The head of the linked list of large object chunks.
-  LargeObjectChunk* first_chunk_;
+  LargePage* first_page_;
   intptr_t size_;  // allocated bytes
   int page_count_;  // number of chunks
   intptr_t objects_size_;  // size of objects
-
-  // Shared implementation of AllocateRaw, AllocateRawCode and
-  // AllocateRawFixedArray.
-  MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
-                                                   int object_size,
-                                                   Executability executable);
+  // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
+  HashMap chunk_map_;
 
   friend class LargeObjectIterator;
 
@@ -2285,17 +2552,78 @@
   explicit LargeObjectIterator(LargeObjectSpace* space);
   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
 
-  HeapObject* next();
+  HeapObject* Next();
 
   // implementation of ObjectIterator.
-  virtual HeapObject* next_object() { return next(); }
+  virtual HeapObject* next_object() { return Next(); }
 
  private:
-  LargeObjectChunk* current_;
+  LargePage* current_;
   HeapObjectCallback size_func_;
 };
 
 
+// Iterates over the chunks (pages and large object pages) that can contain
+// pointers to new space.
+class PointerChunkIterator BASE_EMBEDDED {
+ public:
+  inline explicit PointerChunkIterator(Heap* heap);
+
+  // Return NULL when the iterator is done.
+  MemoryChunk* next() {
+    switch (state_) {
+      case kOldPointerState: {
+        if (old_pointer_iterator_.has_next()) {
+          return old_pointer_iterator_.next();
+        }
+        state_ = kMapState;
+        // Fall through.
+      }
+      case kMapState: {
+        if (map_iterator_.has_next()) {
+          return map_iterator_.next();
+        }
+        state_ = kLargeObjectState;
+        // Fall through.
+      }
+      case kLargeObjectState: {
+        HeapObject* heap_object;
+        do {
+          heap_object = lo_iterator_.Next();
+          if (heap_object == NULL) {
+            state_ = kFinishedState;
+            return NULL;
+          }
+          // Fixed arrays are the only pointer-containing objects in large
+          // object space.
+        } while (!heap_object->IsFixedArray());
+        MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
+        return answer;
+      }
+      case kFinishedState:
+        return NULL;
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+
+ private:
+  enum State {
+    kOldPointerState,
+    kMapState,
+    kLargeObjectState,
+    kFinishedState
+  };
+  State state_;
+  PageIterator old_pointer_iterator_;
+  PageIterator map_iterator_;
+  LargeObjectIterator lo_iterator_;
+};
+
+
 #ifdef DEBUG
 struct CommentStatistic {
   const char* comment;
diff --git a/src/splay-tree-inl.h b/src/splay-tree-inl.h
index 9c2287e..4640ed5 100644
--- a/src/splay-tree-inl.h
+++ b/src/splay-tree-inl.h
@@ -45,7 +45,7 @@
 bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
   if (is_empty()) {
     // If the tree is empty, insert the new node.
-    root_ = new Node(key, Config::kNoValue);
+    root_ = new Node(key, Config::NoValue());
   } else {
     // Splay on the key to move the last node on the search path
     // for the key to the root of the tree.
@@ -57,7 +57,7 @@
       return false;
     }
     // Insert the new node.
-    Node* node = new Node(key, Config::kNoValue);
+    Node* node = new Node(key, Config::NoValue());
     InsertInternal(cmp, node);
   }
   locator->bind(root_);
@@ -226,7 +226,7 @@
 void SplayTree<Config, Allocator>::Splay(const Key& key) {
   if (is_empty())
     return;
-  Node dummy_node(Config::kNoKey, Config::kNoValue);
+  Node dummy_node(Config::kNoKey, Config::NoValue());
   // Create a dummy node.  The use of the dummy node is a bit
   // counter-intuitive: The right child of the dummy node will hold
   // the L tree of the algorithm.  The left child of the dummy node
diff --git a/src/store-buffer-inl.h b/src/store-buffer-inl.h
new file mode 100644
index 0000000..dd65cbc
--- /dev/null
+++ b/src/store-buffer-inl.h
@@ -0,0 +1,79 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STORE_BUFFER_INL_H_
+#define V8_STORE_BUFFER_INL_H_
+
+#include "store-buffer.h"
+
+namespace v8 {
+namespace internal {
+
+Address StoreBuffer::TopAddress() {
+  return reinterpret_cast<Address>(heap_->store_buffer_top_address());
+}
+
+
+void StoreBuffer::Mark(Address addr) {
+  ASSERT(!heap_->cell_space()->Contains(addr));
+  ASSERT(!heap_->code_space()->Contains(addr));
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+  *top++ = addr;
+  heap_->public_set_store_buffer_top(top);
+  if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
+    ASSERT(top == limit_);
+    Compact();
+  } else {
+    ASSERT(top < limit_);
+  }
+}
+
+
+void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
+  if (store_buffer_rebuilding_enabled_) {
+    SLOW_ASSERT(!heap_->cell_space()->Contains(addr) &&
+                !heap_->code_space()->Contains(addr) &&
+                !heap_->old_data_space()->Contains(addr) &&
+                !heap_->new_space()->Contains(addr));
+    Address* top = old_top_;
+    *top++ = addr;
+    old_top_ = top;
+    old_buffer_is_sorted_ = false;
+    old_buffer_is_filtered_ = false;
+    if (top >= old_limit_) {
+      ASSERT(callback_ != NULL);
+      (*callback_)(heap_,
+                   MemoryChunk::FromAnyPointerAddress(addr),
+                   kStoreBufferFullEvent);
+    }
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_STORE_BUFFER_INL_H_
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
new file mode 100644
index 0000000..3852155
--- /dev/null
+++ b/src/store-buffer.cc
@@ -0,0 +1,719 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "store-buffer.h"
+#include "store-buffer-inl.h"
+#include "v8-counters.h"
+
+namespace v8 {
+namespace internal {
+
+StoreBuffer::StoreBuffer(Heap* heap)
+    : heap_(heap),
+      start_(NULL),
+      limit_(NULL),
+      old_start_(NULL),
+      old_limit_(NULL),
+      old_top_(NULL),
+      old_reserved_limit_(NULL),
+      old_buffer_is_sorted_(false),
+      old_buffer_is_filtered_(false),
+      during_gc_(false),
+      store_buffer_rebuilding_enabled_(false),
+      callback_(NULL),
+      may_move_store_buffer_entries_(true),
+      virtual_memory_(NULL),
+      hash_set_1_(NULL),
+      hash_set_2_(NULL),
+      hash_sets_are_empty_(true) {
+}
+
+
+void StoreBuffer::SetUp() {
+  virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
+  uintptr_t start_as_int =
+      reinterpret_cast<uintptr_t>(virtual_memory_->address());
+  start_ =
+      reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
+  limit_ = start_ + (kStoreBufferSize / kPointerSize);
+
+  old_virtual_memory_ =
+      new VirtualMemory(kOldStoreBufferLength * kPointerSize);
+  old_top_ = old_start_ =
+      reinterpret_cast<Address*>(old_virtual_memory_->address());
+  // Don't know the alignment requirements of the OS, but it is certainly not
+  // less than 0xfff.
+  ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
+  int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
+  ASSERT(initial_length > 0);
+  ASSERT(initial_length <= kOldStoreBufferLength);
+  old_limit_ = old_start_ + initial_length;
+  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
+
+  CHECK(old_virtual_memory_->Commit(
+            reinterpret_cast<void*>(old_start_),
+            (old_limit_ - old_start_) * kPointerSize,
+            false));
+
+  ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
+  ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
+  Address* vm_limit = reinterpret_cast<Address*>(
+      reinterpret_cast<char*>(virtual_memory_->address()) +
+          virtual_memory_->size());
+  ASSERT(start_ <= vm_limit);
+  ASSERT(limit_ <= vm_limit);
+  USE(vm_limit);
+  ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
+  ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
+         0);
+
+  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+                                kStoreBufferSize,
+                                false));  // Not executable.
+  heap_->public_set_store_buffer_top(start_);
+
+  hash_set_1_ = new uintptr_t[kHashSetLength];
+  hash_set_2_ = new uintptr_t[kHashSetLength];
+  hash_sets_are_empty_ = false;
+
+  ClearFilteringHashSets();
+}
+
+
+void StoreBuffer::TearDown() {
+  delete virtual_memory_;
+  delete old_virtual_memory_;
+  delete[] hash_set_1_;
+  delete[] hash_set_2_;
+  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
+  start_ = limit_ = NULL;
+  heap_->public_set_store_buffer_top(start_);
+}
+
+
+void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
+  isolate->heap()->store_buffer()->Compact();
+}
+
+
+#if V8_TARGET_ARCH_X64
+static int CompareAddresses(const void* void_a, const void* void_b) {
+  intptr_t a =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
+  intptr_t b =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
+  // Unfortunately if int is smaller than intptr_t there is no branch-free
+  // way to return a number with the same sign as the difference between the
+  // pointers.
+  if (a == b) return 0;
+  if (a < b) return -1;
+  ASSERT(a > b);
+  return 1;
+}
+#else
+static int CompareAddresses(const void* void_a, const void* void_b) {
+  intptr_t a =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
+  intptr_t b =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
+  ASSERT(sizeof(1) == sizeof(a));
+  // Shift down to avoid wraparound.
+  return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
+}
+#endif
+
+
+void StoreBuffer::Uniq() {
+  // Remove adjacent duplicates and cells that do not point at new space.
+  Address previous = NULL;
+  Address* write = old_start_;
+  ASSERT(may_move_store_buffer_entries_);
+  for (Address* read = old_start_; read < old_top_; read++) {
+    Address current = *read;
+    if (current != previous) {
+      if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
+        *write++ = current;
+      }
+    }
+    previous = current;
+  }
+  old_top_ = write;
+}
+
+
+void StoreBuffer::EnsureSpace(intptr_t space_needed) {
+  while (old_limit_ - old_top_ < space_needed &&
+         old_limit_ < old_reserved_limit_) {
+    size_t grow = old_limit_ - old_start_;  // Double size.
+    CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
+                                      grow * kPointerSize,
+                                      false));
+    old_limit_ += grow;
+  }
+
+  if (old_limit_ - old_top_ >= space_needed) return;
+
+  if (old_buffer_is_filtered_) return;
+  ASSERT(may_move_store_buffer_entries_);
+  Compact();
+
+  old_buffer_is_filtered_ = true;
+  bool page_has_scan_on_scavenge_flag = false;
+
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  while ((chunk = it.next()) != NULL) {
+    if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+  }
+
+  if (page_has_scan_on_scavenge_flag) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  // If filtering out the entries from scan_on_scavenge pages got us down to
+  // less than half full, then we are satisfied with that.
+  if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+
+  // Sample 1 entry in 97 and filter out the pages where we estimate that more
+  // than 1 in 8 pointers are to new space.
+  static const int kSampleFinenesses = 5;
+  static const struct Samples {
+    int prime_sample_step;
+    int threshold;
+  } samples[kSampleFinenesses] =  {
+    { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
+    { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
+    { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
+    { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
+    { 1, 0}
+  };
+  for (int i = kSampleFinenesses - 1; i >= 0; i--) {
+    ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
+    // As a last resort we mark all pages as being exempt from the store buffer.
+    ASSERT(i != 0 || old_top_ == old_start_);
+    if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+  }
+  UNREACHABLE();
+}
+
+
+// Sample the store buffer to see if some pages are taking up a lot of space
+// in the store buffer.
+void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  while ((chunk = it.next()) != NULL) {
+    chunk->set_store_buffer_counter(0);
+  }
+  bool created_new_scan_on_scavenge_pages = false;
+  MemoryChunk* previous_chunk = NULL;
+  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
+    Address addr = *p;
+    MemoryChunk* containing_chunk = NULL;
+    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+      containing_chunk = previous_chunk;
+    } else {
+      containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+    }
+    int old_counter = containing_chunk->store_buffer_counter();
+    if (old_counter == threshold) {
+      containing_chunk->set_scan_on_scavenge(true);
+      created_new_scan_on_scavenge_pages = true;
+    }
+    containing_chunk->set_store_buffer_counter(old_counter + 1);
+    previous_chunk = containing_chunk;
+  }
+  if (created_new_scan_on_scavenge_pages) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+  old_buffer_is_filtered_ = true;
+}
+
+
+void StoreBuffer::Filter(int flag) {
+  Address* new_top = old_start_;
+  MemoryChunk* previous_chunk = NULL;
+  for (Address* p = old_start_; p < old_top_; p++) {
+    Address addr = *p;
+    MemoryChunk* containing_chunk = NULL;
+    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+      containing_chunk = previous_chunk;
+    } else {
+      containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+      previous_chunk = containing_chunk;
+    }
+    if (!containing_chunk->IsFlagSet(flag)) {
+      *new_top++ = addr;
+    }
+  }
+  old_top_ = new_top;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
+}
+
+
+void StoreBuffer::SortUniq() {
+  Compact();
+  if (old_buffer_is_sorted_) return;
+  qsort(reinterpret_cast<void*>(old_start_),
+        old_top_ - old_start_,
+        sizeof(*old_top_),
+        &CompareAddresses);
+  Uniq();
+
+  old_buffer_is_sorted_ = true;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
+}
+
+
+bool StoreBuffer::PrepareForIteration() {
+  Compact();
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  bool page_has_scan_on_scavenge_flag = false;
+  while ((chunk = it.next()) != NULL) {
+    if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+  }
+
+  if (page_has_scan_on_scavenge_flag) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  // Filtering hash sets are inconsistent with the store buffer after
+  // iteration.
+  ClearFilteringHashSets();
+
+  return page_has_scan_on_scavenge_flag;
+}
+
+
+#ifdef DEBUG
+void StoreBuffer::Clean() {
+  ClearFilteringHashSets();
+  Uniq();  // Also removes things that no longer point to new space.
+  CheckForFullBuffer();
+}
+
+
+static Address* in_store_buffer_1_element_cache = NULL;
+
+
+bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
+  if (!FLAG_enable_slow_asserts) return true;
+  if (in_store_buffer_1_element_cache != NULL &&
+      *in_store_buffer_1_element_cache == cell_address) {
+    return true;
+  }
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+  for (Address* current = top - 1; current >= start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  return false;
+}
+#endif
+
+
+void StoreBuffer::ClearFilteringHashSets() {
+  if (!hash_sets_are_empty_) {
+    memset(reinterpret_cast<void*>(hash_set_1_),
+           0,
+           sizeof(uintptr_t) * kHashSetLength);
+    memset(reinterpret_cast<void*>(hash_set_2_),
+           0,
+           sizeof(uintptr_t) * kHashSetLength);
+    hash_sets_are_empty_ = true;
+  }
+}
+
+
+void StoreBuffer::GCPrologue() {
+  ClearFilteringHashSets();
+  during_gc_ = true;
+}
+
+
+#ifdef DEBUG
+static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
+  // Do nothing.
+}
+
+
+void StoreBuffer::VerifyPointers(PagedSpace* space,
+                                 RegionCallback region_callback) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* page = it.next();
+    FindPointersToNewSpaceOnPage(
+        reinterpret_cast<PagedSpace*>(page->owner()),
+        page,
+        region_callback,
+        &DummyScavengePointer);
+  }
+}
+
+
+void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
+  LargeObjectIterator it(space);
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    if (object->IsFixedArray()) {
+      Address slot_address = object->address();
+      Address end = object->address() + object->Size();
+
+      while (slot_address < end) {
+        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+        // When we are not in GC the Heap::InNewSpace() predicate
+        // checks that pointers which satisfy predicate point into
+        // the active semispace.
+        heap_->InNewSpace(*slot);
+        slot_address += kPointerSize;
+      }
+    }
+  }
+}
+#endif
+
+
+void StoreBuffer::Verify() {
+#ifdef DEBUG
+  VerifyPointers(heap_->old_pointer_space(),
+                 &StoreBuffer::FindPointersToNewSpaceInRegion);
+  VerifyPointers(heap_->map_space(),
+                 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
+  VerifyPointers(heap_->lo_space());
+#endif
+}
+
+
+void StoreBuffer::GCEpilogue() {
+  during_gc_ = false;
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInRegion(
+    Address start, Address end, ObjectSlotCallback slot_callback) {
+  for (Address slot_address = start;
+       slot_address < end;
+       slot_address += kPointerSize) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    if (heap_->InNewSpace(*slot)) {
+      HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
+      ASSERT(object->IsHeapObject());
+      slot_callback(reinterpret_cast<HeapObject**>(slot), object);
+      if (heap_->InNewSpace(*slot)) {
+        EnterDirectlyIntoStoreBuffer(slot_address);
+      }
+    }
+  }
+}
+
+
+// Compute start address of the first map following given addr.
+static inline Address MapStartAlign(Address addr) {
+  Address page = Page::FromAddress(addr)->area_start();
+  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
+}
+
+
+// Compute end address of the first map preceding given addr.
+static inline Address MapEndAlign(Address addr) {
+  Address page = Page::FromAllocationTop(addr)->area_start();
+  return page + ((addr - page) / Map::kSize * Map::kSize);
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInMaps(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback) {
+  ASSERT(MapStartAlign(start) == start);
+  ASSERT(MapEndAlign(end) == end);
+
+  Address map_address = start;
+  while (map_address < end) {
+    ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
+    ASSERT(Memory::Object_at(map_address)->IsMap());
+
+    Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
+    Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
+
+    FindPointersToNewSpaceInRegion(pointer_fields_start,
+                                   pointer_fields_end,
+                                   slot_callback);
+    map_address += Map::kSize;
+  }
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback) {
+  Address map_aligned_start = MapStartAlign(start);
+  Address map_aligned_end   = MapEndAlign(end);
+
+  ASSERT(map_aligned_start == start);
+  ASSERT(map_aligned_end == end);
+
+  FindPointersToNewSpaceInMaps(map_aligned_start,
+                               map_aligned_end,
+                               slot_callback);
+}
+
+
+// This function iterates over all the pointers in a paged space in the heap,
+// looking for pointers into new space.  Within the pages there may be dead
+// objects that have not been overwritten by free spaces or fillers because of
+// lazy sweeping.  These dead objects may not contain pointers to new space.
+// The garbage areas that have been swept properly (these will normally be the
+// large ones) will be marked with free space and filler map words.  In
+// addition any area that has never been used at all for object allocation must
+// be marked with a free space or filler.  Because the free space and filler
+// maps do not move we can always recognize these even after a compaction.
+// Normal objects like FixedArrays and JSObjects should not contain references
+// to these maps.  The special garbage section (see comment in spaces.h) is
+// skipped since it can contain absolutely anything.  Any objects that are
+// allocated during iteration may or may not be visited by the iteration, but
+// they will not be partially visited.
+void StoreBuffer::FindPointersToNewSpaceOnPage(
+    PagedSpace* space,
+    Page* page,
+    RegionCallback region_callback,
+    ObjectSlotCallback slot_callback) {
+  Address visitable_start = page->area_start();
+  Address end_of_page = page->area_end();
+
+  Address visitable_end = visitable_start;
+
+  Object* free_space_map = heap_->free_space_map();
+  Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
+
+  while (visitable_end < end_of_page) {
+    Object* o = *reinterpret_cast<Object**>(visitable_end);
+    // Skip fillers but not things that look like fillers in the special
+    // garbage section which can contain anything.
+    if (o == free_space_map ||
+        o == two_pointer_filler_map ||
+        (visitable_end == space->top() && visitable_end != space->limit())) {
+      if (visitable_start != visitable_end) {
+        // After calling this the special garbage section may have moved.
+        (this->*region_callback)(visitable_start,
+                                 visitable_end,
+                                 slot_callback);
+        if (visitable_end >= space->top() && visitable_end < space->limit()) {
+          visitable_end = space->limit();
+          visitable_start = visitable_end;
+          continue;
+        }
+      }
+      if (visitable_end == space->top() && visitable_end != space->limit()) {
+        visitable_start = visitable_end = space->limit();
+      } else {
+        // At this point we are either at the start of a filler or we are at
+        // the point where the space->top() used to be before the
+        // visit_pointer_region call above.  Either way we can skip the
+        // object at the current spot:  We don't promise to visit objects
+        // allocated during heap traversal, and if space->top() moved then it
+        // must be because an object was allocated at this point.
+        visitable_start =
+            visitable_end + HeapObject::FromAddress(visitable_end)->Size();
+        visitable_end = visitable_start;
+      }
+    } else {
+      ASSERT(o != free_space_map);
+      ASSERT(o != two_pointer_filler_map);
+      ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
+      visitable_end += kPointerSize;
+    }
+  }
+  ASSERT(visitable_end == end_of_page);
+  if (visitable_start != visitable_end) {
+    (this->*region_callback)(visitable_start,
+                             visitable_end,
+                             slot_callback);
+  }
+}
+
+
+void StoreBuffer::IteratePointersInStoreBuffer(
+    ObjectSlotCallback slot_callback) {
+  Address* limit = old_top_;
+  old_top_ = old_start_;
+  {
+    DontMoveStoreBufferEntriesScope scope(this);
+    for (Address* current = old_start_; current < limit; current++) {
+#ifdef DEBUG
+      Address* saved_top = old_top_;
+#endif
+      Object** slot = reinterpret_cast<Object**>(*current);
+      Object* object = *slot;
+      if (heap_->InFromSpace(object)) {
+        HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+        slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+        if (heap_->InNewSpace(*slot)) {
+          EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
+        }
+      }
+      ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
+    }
+  }
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
+  // We do not sort or remove duplicated entries from the store buffer because
+  // we expect that callback will rebuild the store buffer thus removing
+  // all duplicates and pointers to old space.
+  bool some_pages_to_scan = PrepareForIteration();
+
+  // TODO(gc): we want to skip slots on evacuation candidates
+  // but we can't simply figure that out from slot address
+  // because slot can belong to a large object.
+  IteratePointersInStoreBuffer(slot_callback);
+
+  // We are done scanning all the pointers that were in the store buffer, but
+  // there may be some pages marked scan_on_scavenge that have pointers to new
+  // space that are not in the store buffer.  We must scan them now.  As we
+  // scan, the surviving pointers to new space will be added to the store
+  // buffer.  If there are still a lot of pointers to new space then we will
+  // keep the scan_on_scavenge flag on the page and discard the pointers that
+  // were added to the store buffer.  If there are not many pointers to new
+  // space left on the page we will keep the pointers in the store buffer and
+  // remove the flag from the page.
+  if (some_pages_to_scan) {
+    if (callback_ != NULL) {
+      (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
+    }
+    PointerChunkIterator it(heap_);
+    MemoryChunk* chunk;
+    while ((chunk = it.next()) != NULL) {
+      if (chunk->scan_on_scavenge()) {
+        chunk->set_scan_on_scavenge(false);
+        if (callback_ != NULL) {
+          (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
+        }
+        if (chunk->owner() == heap_->lo_space()) {
+          LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
+          HeapObject* array = large_page->GetObject();
+          ASSERT(array->IsFixedArray());
+          Address start = array->address();
+          Address end = start + array->Size();
+          FindPointersToNewSpaceInRegion(start, end, slot_callback);
+        } else {
+          Page* page = reinterpret_cast<Page*>(chunk);
+          PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+          FindPointersToNewSpaceOnPage(
+              owner,
+              page,
+              (owner == heap_->map_space() ?
+                 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
+                 &StoreBuffer::FindPointersToNewSpaceInRegion),
+              slot_callback);
+        }
+      }
+    }
+    if (callback_ != NULL) {
+      (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
+    }
+  }
+}
+
+
+void StoreBuffer::Compact() {
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+
+  if (top == start_) return;
+
+  // There's no check of the limit in the loop below so we check here for
+  // the worst case (compaction doesn't eliminate any pointers).
+  ASSERT(top <= limit_);
+  heap_->public_set_store_buffer_top(start_);
+  EnsureSpace(top - start_);
+  ASSERT(may_move_store_buffer_entries_);
+  // Goes through the addresses in the store buffer attempting to remove
+  // duplicates.  In the interest of speed this is a lossy operation.  Some
+  // duplicates will remain.  We have two hash sets with different hash
+  // functions to reduce the number of unnecessary clashes.
+  hash_sets_are_empty_ = false;  // Hash sets are in use.
+  for (Address* current = start_; current < top; current++) {
+    ASSERT(!heap_->cell_space()->Contains(*current));
+    ASSERT(!heap_->code_space()->Contains(*current));
+    ASSERT(!heap_->old_data_space()->Contains(*current));
+    uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
+    // Shift out the last bits including any tags.
+    int_addr >>= kPointerSizeLog2;
+    int hash1 =
+        ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+    if (hash_set_1_[hash1] == int_addr) continue;
+    uintptr_t hash2 = (int_addr - (int_addr >> kHashSetLengthLog2));
+    hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
+    hash2 &= (kHashSetLength - 1);
+    if (hash_set_2_[hash2] == int_addr) continue;
+    if (hash_set_1_[hash1] == 0) {
+      hash_set_1_[hash1] = int_addr;
+    } else if (hash_set_2_[hash2] == 0) {
+      hash_set_2_[hash2] = int_addr;
+    } else {
+      // Rather than slowing down we just throw away some entries.  This will
+      // cause some duplicates to remain undetected.
+      hash_set_1_[hash1] = int_addr;
+      hash_set_2_[hash2] = 0;
+    }
+    old_buffer_is_sorted_ = false;
+    old_buffer_is_filtered_ = false;
+    *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
+    ASSERT(old_top_ <= old_limit_);
+  }
+  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
+  CheckForFullBuffer();
+}
+
+
+void StoreBuffer::CheckForFullBuffer() {
+  EnsureSpace(kStoreBufferSize * 2);
+}
+
+} }  // namespace v8::internal
diff --git a/src/store-buffer.h b/src/store-buffer.h
new file mode 100644
index 0000000..951a9ca
--- /dev/null
+++ b/src/store-buffer.h
@@ -0,0 +1,255 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STORE_BUFFER_H_
+#define V8_STORE_BUFFER_H_
+
+#include "allocation.h"
+#include "checks.h"
+#include "globals.h"
+#include "platform.h"
+#include "v8globals.h"
+
+namespace v8 {
+namespace internal {
+
+class StoreBuffer;
+
+typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
+
+typedef void (StoreBuffer::*RegionCallback)(
+    Address start, Address end, ObjectSlotCallback slot_callback);
+
+// Used to implement the write barrier by collecting addresses of pointers
+// between spaces.
+class StoreBuffer {
+ public:
+  explicit StoreBuffer(Heap* heap);
+
+  static void StoreBufferOverflow(Isolate* isolate);
+
+  inline Address TopAddress();
+
+  void SetUp();
+  void TearDown();
+
+  // This is used by the mutator to enter addresses into the store buffer.
+  inline void Mark(Address addr);
+
+  // This is used by the heap traversal to enter the addresses into the store
+  // buffer that should still be in the store buffer after GC.  It enters
+  // addresses directly into the old buffer because the GC starts by wiping the
+  // old buffer and thereafter only visits each cell once so there is no need
+  // to attempt to remove any dupes.  During the first part of a GC we
+  // are using the store buffer to access the old spaces and at the same time
+  // we are rebuilding the store buffer using this function.  There is, however
+  // no issue of overwriting the buffer we are iterating over, because this
+  // stage of the scavenge can only reduce the number of addresses in the store
+  // buffer (some objects are promoted so pointers to them do not need to be in
+  // the store buffer).  The later parts of the GC scan the pages that are
+  // exempt from the store buffer and process the promotion queue.  These steps
+  // can overflow this buffer.  We check for this and on overflow we call the
+  // callback set up with the StoreBufferRebuildScope object.
+  inline void EnterDirectlyIntoStoreBuffer(Address addr);
+
+  // Iterates over all pointers that go from old space to new space.  It will
+  // delete the store buffer as it starts so the callback should reenter
+  // surviving old-to-new pointers into the store buffer to rebuild it.
+  void IteratePointersToNewSpace(ObjectSlotCallback callback);
+
+  static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
+  static const int kStoreBufferSize = kStoreBufferOverflowBit;
+  static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
+  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
+  static const int kHashSetLengthLog2 = 12;
+  static const int kHashSetLength = 1 << kHashSetLengthLog2;
+
+  void Compact();
+
+  void GCPrologue();
+  void GCEpilogue();
+
+  Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
+  Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
+  Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
+  void SetTop(Object*** top) {
+    ASSERT(top >= Start());
+    ASSERT(top <= Limit());
+    old_top_ = reinterpret_cast<Address*>(top);
+  }
+
+  bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
+  bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
+
+  // Goes through the store buffer removing pointers to things that have
+  // been promoted.  Rebuilds the store buffer completely if it overflowed.
+  void SortUniq();
+
+  void EnsureSpace(intptr_t space_needed);
+  void Verify();
+
+  bool PrepareForIteration();
+
+#ifdef DEBUG
+  void Clean();
+  // Slow, for asserts only.
+  bool CellIsInStoreBuffer(Address cell);
+#endif
+
+  void Filter(int flag);
+
+ private:
+  Heap* heap_;
+
+  // The store buffer is divided up into a new buffer that is constantly being
+  // filled by mutator activity and an old buffer that is filled with the data
+  // from the new buffer after compression.
+  Address* start_;
+  Address* limit_;
+
+  Address* old_start_;
+  Address* old_limit_;
+  Address* old_top_;
+  Address* old_reserved_limit_;
+  VirtualMemory* old_virtual_memory_;
+
+  bool old_buffer_is_sorted_;
+  bool old_buffer_is_filtered_;
+  bool during_gc_;
+  // The garbage collector iterates over many pointers to new space that are not
+  // handled by the store buffer.  This flag indicates whether the pointers
+  // found by the callbacks should be added to the store buffer or not.
+  bool store_buffer_rebuilding_enabled_;
+  StoreBufferCallback callback_;
+  bool may_move_store_buffer_entries_;
+
+  VirtualMemory* virtual_memory_;
+
+  // Two hash sets used for filtering.
+  // If address is in the hash set then it is guaranteed to be in the
+  // old part of the store buffer.
+  uintptr_t* hash_set_1_;
+  uintptr_t* hash_set_2_;
+  bool hash_sets_are_empty_;
+
+  void ClearFilteringHashSets();
+
+  void CheckForFullBuffer();
+  void Uniq();
+  void ExemptPopularPages(int prime_sample_step, int threshold);
+
+  void FindPointersToNewSpaceInRegion(Address start,
+                                      Address end,
+                                      ObjectSlotCallback slot_callback);
+
+  // For each region of pointers on a page in use from an old space call
+  // visit_pointer_region callback.
+  // If either visit_pointer_region or callback can cause an allocation
+  // in old space and changes in allocation watermark then
+  // can_preallocate_during_iteration should be set to true.
+  void IteratePointersOnPage(
+      PagedSpace* space,
+      Page* page,
+      RegionCallback region_callback,
+      ObjectSlotCallback slot_callback);
+
+  void FindPointersToNewSpaceInMaps(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback);
+
+  void FindPointersToNewSpaceInMapsRegion(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback);
+
+  void FindPointersToNewSpaceOnPage(
+    PagedSpace* space,
+    Page* page,
+    RegionCallback region_callback,
+    ObjectSlotCallback slot_callback);
+
+  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
+
+#ifdef DEBUG
+  void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
+  void VerifyPointers(LargeObjectSpace* space);
+#endif
+
+  friend class StoreBufferRebuildScope;
+  friend class DontMoveStoreBufferEntriesScope;
+};
+
+
+class StoreBufferRebuildScope {
+ public:
+  explicit StoreBufferRebuildScope(Heap* heap,
+                                   StoreBuffer* store_buffer,
+                                   StoreBufferCallback callback)
+      : heap_(heap),
+        store_buffer_(store_buffer),
+        stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
+        stored_callback_(store_buffer->callback_) {
+    store_buffer_->store_buffer_rebuilding_enabled_ = true;
+    store_buffer_->callback_ = callback;
+    (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
+  }
+
+  ~StoreBufferRebuildScope() {
+    store_buffer_->callback_ = stored_callback_;
+    store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
+    store_buffer_->CheckForFullBuffer();
+  }
+
+ private:
+  Heap* heap_;
+  StoreBuffer* store_buffer_;
+  bool stored_state_;
+  StoreBufferCallback stored_callback_;
+};
+
+
+class DontMoveStoreBufferEntriesScope {
+ public:
+  explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer),
+        stored_state_(store_buffer->may_move_store_buffer_entries_) {
+    store_buffer_->may_move_store_buffer_entries_ = false;
+  }
+
+  ~DontMoveStoreBufferEntriesScope() {
+    store_buffer_->may_move_store_buffer_entries_ = stored_state_;
+  }
+
+ private:
+  StoreBuffer* store_buffer_;
+  bool stored_state_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_STORE_BUFFER_H_
diff --git a/src/string-search.h b/src/string-search.h
index 1223db0..8c3456a 100644
--- a/src/string-search.h
+++ b/src/string-search.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -242,9 +242,9 @@
 
 
 template <typename PatternChar, typename SubjectChar>
-static inline bool CharCompare(const PatternChar* pattern,
-                               const SubjectChar* subject,
-                               int length) {
+inline bool CharCompare(const PatternChar* pattern,
+                        const SubjectChar* subject,
+                        int length) {
   ASSERT(length > 0);
   int pos = 0;
   do {
@@ -369,6 +369,10 @@
   shift_table[pattern_length] = 1;
   suffix_table[pattern_length] = pattern_length + 1;
 
+  if (pattern_length <= start) {
+    return;
+  }
+
   // Find suffixes.
   PatternChar last_char = pattern[pattern_length - 1];
   int suffix = pattern_length + 1;
@@ -555,10 +559,10 @@
 // object should be constructed once and the Search function then called
 // for each search.
 template <typename SubjectChar, typename PatternChar>
-static int SearchString(Isolate* isolate,
-                        Vector<const SubjectChar> subject,
-                        Vector<const PatternChar> pattern,
-                        int start_index) {
+int SearchString(Isolate* isolate,
+                 Vector<const SubjectChar> subject,
+                 Vector<const PatternChar> pattern,
+                 int start_index) {
   StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
   return search.Search(subject, start_index);
 }
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 8086cf9..35f7be5 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -350,29 +350,24 @@
   }
   DescriptorArray* descs = map->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    switch (descs->GetType(i)) {
-      case FIELD: {
-        Object* key = descs->GetKey(i);
-        if (key->IsString() || key->IsNumber()) {
-          int len = 3;
-          if (key->IsString()) {
-            len = String::cast(key)->length();
-          }
-          for (; len < 18; len++)
-            Put(' ');
-          if (key->IsString()) {
-            Put(String::cast(key));
-          } else {
-            key->ShortPrint();
-          }
-          Add(": ");
-          Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
-          Add("%o\n", value);
+    if (descs->GetType(i) == FIELD) {
+      Object* key = descs->GetKey(i);
+      if (key->IsString() || key->IsNumber()) {
+        int len = 3;
+        if (key->IsString()) {
+          len = String::cast(key)->length();
         }
+        for (; len < 18; len++)
+          Put(' ');
+        if (key->IsString()) {
+          Put(String::cast(key));
+        } else {
+          key->ShortPrint();
+        }
+        Add(": ");
+        Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
+        Add("%o\n", value);
       }
-      break;
-      default:
-      break;
     }
   }
 }
diff --git a/src/string.js b/src/string.js
index 297105d..84dde3d 100644
--- a/src/string.js
+++ b/src/string.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,8 +28,8 @@
 
 // This file relies on the fact that the following declaration has been made
 // in runtime.js:
-// const $String = global.String;
-// const $NaN = 0/0;
+// var $String = global.String;
+// var $NaN = 0/0;
 
 
 // Set the String function and constructor.
@@ -46,16 +46,18 @@
 
 // ECMA-262 section 15.5.4.2
 function StringToString() {
-  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
+  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
     throw new $TypeError('String.prototype.toString is not generic');
+  }
   return %_ValueOf(this);
 }
 
 
 // ECMA-262 section 15.5.4.3
 function StringValueOf() {
-  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
+  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
     throw new $TypeError('String.prototype.valueOf is not generic');
+  }
   return %_ValueOf(this);
 }
 
@@ -91,7 +93,8 @@
 // ECMA-262, section 15.5.4.6
 function StringConcat() {
   if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
-    throw MakeTypeError("called_on_null_or_undefined", ["String.prototype.concat"]);
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.concat"]);
   }
   var len = %_ArgumentsLength();
   var this_as_string = TO_STRING_INLINE(this);
@@ -241,6 +244,15 @@
 
   // Convert the search argument to a string and search for it.
   search = TO_STRING_INLINE(search);
+  if (search.length == 1 &&
+      subject.length > 0xFF &&
+      IS_STRING(replace) &&
+      %StringIndexOf(replace, '$', 0) < 0) {
+    // Searching by traversing a cons string tree and replace with cons of
+    // slices works only when the replaced string is a single character, being
+    // replaced by a simple string and only pays off for long strings.
+    return %StringReplaceOneCharWithString(subject, search, replace);
+  }
   var start = %StringIndexOf(subject, search, 0);
   if (start < 0) return subject;
   var end = start + search.length;
@@ -358,7 +370,7 @@
       builder_elements.push(SubString(string, position, next));
     }
   }
-};
+}
 
 
 // Compute the string of a given regular expression capture.
@@ -371,7 +383,7 @@
   if (start < 0) return;
   var end = lastCaptureInfo[CAPTURE(scaled + 1)];
   return SubString(string, start, end);
-};
+}
 
 
 // Add the string of a given regular expression capture to the
@@ -384,7 +396,7 @@
   if (start < 0) return;
   var end = matchInfo[CAPTURE(scaled + 1)];
   builder.addSpecialSlice(start, end);
-};
+}
 
 // TODO(lrn): This array will survive indefinitely if replace is never
 // called again. However, it will be empty, since the contents are cleared
@@ -531,32 +543,37 @@
   var s_len = s.length;
   var start_i = TO_INTEGER(start);
   var end_i = s_len;
-  if (end !== void 0)
+  if (end !== void 0) {
     end_i = TO_INTEGER(end);
+  }
 
   if (start_i < 0) {
     start_i += s_len;
-    if (start_i < 0)
+    if (start_i < 0) {
       start_i = 0;
+    }
   } else {
-    if (start_i > s_len)
-      start_i = s_len;
+    if (start_i > s_len) {
+      return '';
+    }
   }
 
   if (end_i < 0) {
     end_i += s_len;
-    if (end_i < 0)
-      end_i = 0;
+    if (end_i < 0) {
+      return '';
+    }
   } else {
-    if (end_i > s_len)
+    if (end_i > s_len) {
       end_i = s_len;
+    }
   }
 
-  var num_c = end_i - start_i;
-  if (num_c < 0)
-    num_c = 0;
+  if (end_i <= start_i) {
+    return '';
+  }
 
-  return SubString(s, start_i, start_i + num_c);
+  return SubString(s, start_i, end_i);
 }
 
 
@@ -568,20 +585,19 @@
   }
   var subject = TO_STRING_INLINE(this);
   limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
-  if (limit === 0) return [];
 
   // ECMA-262 says that if separator is undefined, the result should
-  // be an array of size 1 containing the entire string.  SpiderMonkey
-  // and KJS have this behavior only when no separator is given.  If
-  // undefined is explicitly given, they convert it to a string and
-  // use that.  We do as SpiderMonkey and KJS.
-  if (%_ArgumentsLength() === 0) {
+  // be an array of size 1 containing the entire string.
+  if (IS_UNDEFINED(separator)) {
     return [subject];
   }
 
   var length = subject.length;
   if (!IS_REGEXP(separator)) {
     separator = TO_STRING_INLINE(separator);
+
+    if (limit === 0) return [];
+
     var separator_length = separator.length;
 
     // If the separator string is empty then return the elements in the subject.
@@ -592,6 +608,14 @@
     return result;
   }
 
+  if (limit === 0) return [];
+
+  // Separator is a regular expression.
+  return StringSplitOnRegExp(subject, separator, limit, length);
+}
+
+
+function StringSplitOnRegExp(subject, separator, limit, length) {
   %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
 
   if (length === 0) {
@@ -688,7 +712,7 @@
     }
   }
 
-  return (start_i + 1 == end_i
+  return ((start_i + 1 == end_i)
           ? %_StringCharAt(s, start_i)
           : %_SubString(s, start_i, end_i));
 }
@@ -732,7 +756,7 @@
   var end = start + len;
   if (end > s.length) end = s.length;
 
-  return (start + 1 == end
+  return ((start + 1 == end)
           ? %_StringCharAt(s, start)
           : %_SubString(s, start, end));
 }
@@ -832,7 +856,7 @@
                               .replace(/>/g, "&gt;")
                               .replace(/"/g, "&quot;")
                               .replace(/'/g, "&#039;");
-};
+}
 
 
 // Compatibility support for KJS.
@@ -953,7 +977,7 @@
 
 
   // Set up the non-enumerable functions on the String prototype object.
-  InstallFunctionsOnHiddenPrototype($String.prototype, DONT_ENUM, $Array(
+  InstallFunctions($String.prototype, DONT_ENUM, $Array(
     "valueOf", StringValueOf,
     "toString", StringToString,
     "charAt", StringCharAt,
diff --git a/src/strtod.cc b/src/strtod.cc
index c89c8f3..0dc618a 100644
--- a/src/strtod.cc
+++ b/src/strtod.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,7 +27,6 @@
 
 #include <stdarg.h>
 #include <math.h>
-#include <limits>
 
 #include "globals.h"
 #include "utils.h"
@@ -176,13 +175,15 @@
 static bool DoubleStrtod(Vector<const char> trimmed,
                          int exponent,
                          double* result) {
-#if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) && !defined(WIN32)
+#if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) \
+    && !defined(_MSC_VER)
   // On x86 the floating-point stack can be 64 or 80 bits wide. If it is
   // 80 bits wide (as is the case on Linux) then double-rounding occurs and the
   // result is not accurate.
-  // We know that Windows32 uses 64 bits and is therefore accurate.
-  // Note that the ARM simulator is compiled for 32bits. It therefore exhibits
-  // the same problem.
+  // We know that Windows32 with MSVC, unlike with MinGW32, uses 64 bits and is
+  // therefore accurate.
+  // Note that the ARM and MIPS simulators are compiled for 32bits. They
+  // therefore exhibit the same problem.
   return false;
 #endif
   if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index cdb4874..3371b1b 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -45,18 +45,13 @@
 
 StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
   ASSERT(isolate == Isolate::Current());
-  memset(primary_, 0, sizeof(primary_[0]) * StubCache::kPrimaryTableSize);
-  memset(secondary_, 0, sizeof(secondary_[0]) * StubCache::kSecondaryTableSize);
 }
 
 
-void StubCache::Initialize(bool create_heap_objects) {
+void StubCache::Initialize() {
   ASSERT(IsPowerOf2(kPrimaryTableSize));
   ASSERT(IsPowerOf2(kSecondaryTableSize));
-  if (create_heap_objects) {
-    HandleScope scope;
-    Clear();
-  }
+  Clear();
 }
 
 
@@ -82,14 +77,15 @@
   // Compute the primary entry.
   int primary_offset = PrimaryOffset(name, flags, map);
   Entry* primary = entry(primary_, primary_offset);
-  Code* hit = primary->value;
+  Code* old_code = primary->value;
 
   // If the primary entry has useful data in it, we retire it to the
   // secondary cache before overwriting it.
-  if (hit != isolate_->builtins()->builtin(Builtins::kIllegal)) {
-    Code::Flags primary_flags = Code::RemoveTypeFromFlags(hit->flags());
-    int secondary_offset =
-        SecondaryOffset(primary->key, primary_flags, primary_offset);
+  if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
+    Map* old_map = primary->map;
+    Code::Flags old_flags = Code::RemoveTypeFromFlags(old_code->flags());
+    int seed = PrimaryOffset(primary->key, old_flags, old_map);
+    int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
     Entry* secondary = entry(secondary_, secondary_offset);
     *secondary = *primary;
   }
@@ -97,12 +93,14 @@
   // Update primary cache.
   primary->key = name;
   primary->value = code;
+  primary->map = map;
+  isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadNonexistent(String* name,
-                                               JSObject* receiver) {
+Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
+                                               Handle<JSObject> receiver) {
   ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
   // If no global objects are present in the prototype chain, the load
   // nonexistent IC stub can be shared for all names for a given map
@@ -110,558 +108,446 @@
   // there are global objects involved, we need to check global
   // property cells in the stub and therefore the stub will be
   // specific to the name.
-  String* cache_name = heap()->empty_string();
+  Handle<String> cache_name = factory()->empty_string();
   if (receiver->IsGlobalObject()) cache_name = name;
-  JSObject* last = receiver;
+  Handle<JSObject> last = receiver;
   while (last->GetPrototype() != heap()->null_value()) {
-    last = JSObject::cast(last->GetPrototype());
+    last = Handle<JSObject>(JSObject::cast(last->GetPrototype()));
     if (last->IsGlobalObject()) cache_name = name;
   }
   // Compile the stub that is either shared for all names or
   // name specific if there are global objects involved.
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, NONEXISTENT);
-  Object* code = receiver->map()->FindInCodeCache(cache_name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadNonexistent(cache_name, receiver, last);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, cache_name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(cache_name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadNonexistent(cache_name, receiver, last);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *cache_name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *cache_name, *code));
+  JSObject::UpdateMapCodeCache(receiver, cache_name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadField(String* name,
-                                         JSObject* receiver,
-                                         JSObject* holder,
+Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
+                                         Handle<JSObject> receiver,
+                                         Handle<JSObject> holder,
                                          int field_index) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadField(receiver, holder, field_index, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadField(receiver, holder, field_index, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadCallback(String* name,
-                                            JSObject* receiver,
-                                            JSObject* holder,
-                                            AccessorInfo* callback) {
+Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
+                                            Handle<JSObject> receiver,
+                                            Handle<JSObject> holder,
+                                            Handle<AccessorInfo> callback) {
   ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadCallback(name, receiver, holder, callback);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadCallback(name, receiver, holder, callback);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadConstant(String* name,
-                                            JSObject* receiver,
-                                            JSObject* holder,
-                                            Object* value) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
+                                            Handle<JSObject> receiver,
+                                            Handle<JSObject> holder,
+                                            Handle<JSFunction> value) {
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadConstant(receiver, holder, value, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+        compiler.CompileLoadConstant(receiver, holder, value, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadInterceptor(String* name,
-                                               JSObject* receiver,
-                                               JSObject* holder) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
+                                               Handle<JSObject> receiver,
+                                               Handle<JSObject> holder) {
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadInterceptor(receiver, holder, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+        compiler.CompileLoadInterceptor(receiver, holder, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadNormal() {
-  return isolate_->builtins()->builtin(Builtins::kLoadIC_Normal);
+Handle<Code> StubCache::ComputeLoadNormal() {
+  return isolate_->builtins()->LoadIC_Normal();
 }
 
 
-MaybeObject* StubCache::ComputeLoadGlobal(String* name,
-                                          JSObject* receiver,
-                                          GlobalObject* holder,
-                                          JSGlobalPropertyCell* cell,
+Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
+                                          Handle<JSObject> receiver,
+                                          Handle<GlobalObject> holder,
+                                          Handle<JSGlobalPropertyCell> cell,
                                           bool is_dont_delete) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code = compiler.CompileLoadGlobal(receiver,
-                                                           holder,
-                                                           cell,
-                                                           name,
-                                                           is_dont_delete);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadField(String* name,
-                                              JSObject* receiver,
-                                              JSObject* holder,
+Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
+                                              Handle<JSObject> receiver,
+                                              Handle<JSObject> holder,
                                               int field_index) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadField(name, receiver, holder, field_index);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadField(name, receiver, holder, field_index);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadConstant(String* name,
-                                                 JSObject* receiver,
-                                                 JSObject* holder,
-                                                 Object* value) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
+                                                 Handle<JSObject> receiver,
+                                                 Handle<JSObject> holder,
+                                                 Handle<JSFunction> value) {
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadConstant(name, receiver, holder, value);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadConstant(name, receiver, holder, value);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadInterceptor(String* name,
-                                                    JSObject* receiver,
-                                                    JSObject* holder) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
+                                                    Handle<JSObject> receiver,
+                                                    Handle<JSObject> holder) {
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadInterceptor(receiver, holder, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileLoadInterceptor(receiver, holder, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadCallback(String* name,
-                                                 JSObject* receiver,
-                                                 JSObject* holder,
-                                                 AccessorInfo* callback) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeKeyedLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadCallback(name, receiver, holder, callback);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadCallback(name, receiver, holder, callback);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-
-MaybeObject* StubCache::ComputeKeyedLoadArrayLength(String* name,
-                                                    JSArray* receiver) {
+Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
+                                                    Handle<JSArray> receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  ASSERT(receiver->IsJSObject());
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code = compiler.CompileLoadArrayLength(name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileLoadArrayLength(name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadStringLength(String* name,
-                                                     String* receiver) {
+Handle<Code> StubCache::ComputeKeyedLoadStringLength(Handle<String> name,
+                                                     Handle<String> receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Map* map = receiver->map();
-  Object* code = map->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code = compiler.CompileLoadStringLength(name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result = map->UpdateCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Map> map(receiver->map());
+  Handle<Object> probe(map->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileLoadStringLength(name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  Map::UpdateCodeCache(map, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
-    String* name,
-    JSFunction* receiver) {
+Handle<Code> StubCache::ComputeKeyedLoadFunctionPrototype(
+    Handle<String> name,
+    Handle<JSFunction> receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code = compiler.CompileLoadFunctionPrototype(name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileLoadFunctionPrototype(name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeStoreField(String* name,
-                                          JSObject* receiver,
+Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
+                                          Handle<JSObject> receiver,
                                           int field_index,
-                                          Map* transition,
+                                          Handle<Map> transition,
                                           StrictModeFlag strict_mode) {
-  PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
+  PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, type, strict_mode);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    StoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code =
-          compiler.CompileStoreField(receiver, field_index, transition, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  StoreStubCompiler compiler(isolate_, strict_mode);
+  Handle<Code> code =
+      compiler.CompileStoreField(receiver, field_index, transition, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadOrStoreElement(
-    JSObject* receiver,
-    bool is_store,
+Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
+    Handle<JSObject> receiver,
+    KeyedIC::StubKind stub_kind,
     StrictModeFlag strict_mode) {
+  KeyedAccessGrowMode grow_mode =
+      KeyedIC::GetGrowModeFromStubKind(stub_kind);
+  Code::ExtraICState extra_state =
+      Code::ComputeExtraICState(grow_mode, strict_mode);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(
-          is_store ? Code::KEYED_STORE_IC :
-                     Code::KEYED_LOAD_IC,
+          stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC
+                                     : Code::KEYED_STORE_IC,
           NORMAL,
-          strict_mode);
-  String* name = is_store
-      ? isolate()->heap()->KeyedStoreElementMonomorphic_symbol()
-      : isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
-  Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
-  if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
+          extra_state);
+  Handle<String> name;
+  switch (stub_kind) {
+    case KeyedIC::LOAD:
+      name = isolate()->factory()->KeyedLoadElementMonomorphic_symbol();
+      break;
+    case KeyedIC::STORE_NO_TRANSITION:
+      name = isolate()->factory()->KeyedStoreElementMonomorphic_symbol();
+      break;
+    case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
+      name = isolate()->factory()->KeyedStoreAndGrowElementMonomorphic_symbol();
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  Handle<Map> receiver_map(receiver->map());
+  Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
 
-  MaybeObject* maybe_new_code = NULL;
-  Map* receiver_map = receiver->map();
-  if (is_store) {
-    KeyedStoreStubCompiler compiler(strict_mode);
-    maybe_new_code = compiler.CompileStoreElement(receiver_map);
+  Handle<Code> code;
+  switch (stub_kind) {
+    case KeyedIC::LOAD: {
+      KeyedLoadStubCompiler compiler(isolate_);
+      code = compiler.CompileLoadElement(receiver_map);
+      break;
+    }
+    case KeyedIC::STORE_AND_GROW_NO_TRANSITION: {
+      KeyedStoreStubCompiler compiler(isolate_, strict_mode,
+                                      ALLOW_JSARRAY_GROWTH);
+      code = compiler.CompileStoreElement(receiver_map);
+      break;
+    }
+    case KeyedIC::STORE_NO_TRANSITION: {
+      KeyedStoreStubCompiler compiler(isolate_, strict_mode,
+                                      DO_NOT_ALLOW_JSARRAY_GROWTH);
+      code = compiler.CompileStoreElement(receiver_map);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  ASSERT(!code.is_null());
+
+  if (stub_kind == KeyedIC::LOAD) {
+    PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, 0));
   } else {
-    KeyedLoadStubCompiler compiler;
-    maybe_new_code = compiler.CompileLoadElement(receiver_map);
+    PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, 0));
   }
-  Code* code;
-  if (!maybe_new_code->To(&code)) return maybe_new_code;
-  if (is_store) {
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
-                            Code::cast(code), 0));
-  } else {
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
-                            Code::cast(code), 0));
-  }
-  ASSERT(code->IsCode());
-  Object* result;
-  { MaybeObject* maybe_result =
-        receiver->UpdateMapCodeCache(name, Code::cast(code));
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
-  return isolate_->builtins()->builtin((strict_mode == kStrictMode)
-                            ? Builtins::kStoreIC_Normal_Strict
-                            : Builtins::kStoreIC_Normal);
+Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
+  return (strict_mode == kStrictMode)
+      ? isolate_->builtins()->Builtins::StoreIC_Normal_Strict()
+      : isolate_->builtins()->Builtins::StoreIC_Normal();
 }
 
 
-MaybeObject* StubCache::ComputeStoreGlobal(String* name,
-                                           GlobalObject* receiver,
-                                           JSGlobalPropertyCell* cell,
+Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
+                                           Handle<GlobalObject> receiver,
+                                           Handle<JSGlobalPropertyCell> cell,
                                            StrictModeFlag strict_mode) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, NORMAL, strict_mode);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    StoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code =
-          compiler.CompileStoreGlobal(receiver, cell, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  StoreStubCompiler compiler(isolate_, strict_mode);
+  Handle<Code> code = compiler.CompileStoreGlobal(receiver, cell, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeStoreCallback(
-    String* name,
-    JSObject* receiver,
-    AccessorInfo* callback,
-    StrictModeFlag strict_mode) {
+Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
+                                             Handle<JSObject> receiver,
+                                             Handle<AccessorInfo> callback,
+                                             StrictModeFlag strict_mode) {
   ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, CALLBACKS, strict_mode);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    StoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code =
-          compiler.CompileStoreCallback(receiver, callback, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  StoreStubCompiler compiler(isolate_, strict_mode);
+  Handle<Code> code = compiler.CompileStoreCallback(receiver, callback, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeStoreInterceptor(
-    String* name,
-    JSObject* receiver,
-    StrictModeFlag strict_mode) {
+Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
+                                                Handle<JSObject> receiver,
+                                                StrictModeFlag strict_mode) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, INTERCEPTOR, strict_mode);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    StoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code =
-          compiler.CompileStoreInterceptor(receiver, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  StoreStubCompiler compiler(isolate_, strict_mode);
+  Handle<Code> code = compiler.CompileStoreInterceptor(receiver, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
-
-MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
-                                               JSObject* receiver,
+Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
+                                               Handle<JSObject> receiver,
                                                int field_index,
-                                               Map* transition,
+                                               Handle<Map> transition,
                                                StrictModeFlag strict_mode) {
-  PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
+  PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::KEYED_STORE_IC, type, strict_mode);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedStoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code =
-          compiler.CompileStoreField(receiver, field_index, transition, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
-                            Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedStoreStubCompiler compiler(isolate(), strict_mode,
+                                  DO_NOT_ALLOW_JSARRAY_GROWTH);
+  Handle<Code> code =
+      compiler.CompileStoreField(receiver, field_index, transition, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
+
 #define CALL_LOGGER_TAG(kind, type) \
     (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
 
-MaybeObject* StubCache::ComputeCallConstant(int argc,
+Handle<Code> StubCache::ComputeCallConstant(int argc,
                                             Code::Kind kind,
-                                            Code::ExtraICState extra_ic_state,
-                                            String* name,
-                                            Object* object,
-                                            JSObject* holder,
-                                            JSFunction* function) {
+                                            Code::ExtraICState extra_state,
+                                            Handle<String> name,
+                                            Handle<Object> object,
+                                            Handle<JSObject> holder,
+                                            Handle<JSFunction> function) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(object, holder);
-  JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
+      IC::GetCodeCacheForObject(*object, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
 
   // Compute check type based on receiver/holder.
   CheckType check = RECEIVER_MAP_CHECK;
@@ -673,51 +559,36 @@
     check = BOOLEAN_CHECK;
   }
 
-  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
-                                                    CONSTANT_FUNCTION,
-                                                    extra_ic_state,
-                                                    cache_holder,
-                                                    argc);
-  Object* code = map_holder->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    // If the function hasn't been compiled yet, we cannot do it now
-    // because it may cause GC. To avoid this issue, we return an
-    // internal error which will make sure we do not update any
-    // caches.
-    if (!function->is_compiled()) return Failure::InternalError();
-    // Compile the stub - only create stubs for fully compiled functions.
-    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
-    { MaybeObject* maybe_code =
-          compiler.CompileCallConstant(object, holder, function, name, check);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    Code::cast(code)->set_check_type(check);
-    ASSERT_EQ(flags, Code::cast(code)->flags());
-    PROFILE(isolate_,
-            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
-                            Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          map_holder->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(kind, CONSTANT_FUNCTION, extra_state,
+                                    cache_holder, argc);
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
+  Handle<Code> code =
+      compiler.CompileCallConstant(object, holder, function, name, check);
+  code->set_check_type(check);
+  ASSERT_EQ(flags, code->flags());
+  PROFILE(isolate_,
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallField(int argc,
+Handle<Code> StubCache::ComputeCallField(int argc,
                                          Code::Kind kind,
-                                         Code::ExtraICState extra_ic_state,
-                                         String* name,
-                                         Object* object,
-                                         JSObject* holder,
+                                         Code::ExtraICState extra_state,
+                                         Handle<String> name,
+                                         Handle<Object> object,
+                                         Handle<JSObject> holder,
                                          int index) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(object, holder);
-  JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
+      IC::GetCodeCacheForObject(*object, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
 
   // TODO(1233596): We cannot do receiver map check for non-JS objects
   // because they may be represented as immediates without a
@@ -726,47 +597,35 @@
     object = holder;
   }
 
-  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
-                                                    FIELD,
-                                                    extra_ic_state,
-                                                    cache_holder,
-                                                    argc);
-  Object* code = map_holder->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
-    { MaybeObject* maybe_code =
-          compiler.CompileCallField(JSObject::cast(object),
-                                    holder,
-                                    index,
-                                    name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    ASSERT_EQ(flags, Code::cast(code)->flags());
-    PROFILE(isolate_,
-            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
-                            Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          map_holder->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(kind, FIELD, extra_state,
+                                    cache_holder, argc);
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
+  Handle<Code> code =
+      compiler.CompileCallField(Handle<JSObject>::cast(object),
+                                holder, index, name);
+  ASSERT_EQ(flags, code->flags());
+  PROFILE(isolate_,
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallInterceptor(
-    int argc,
-    Code::Kind kind,
-    Code::ExtraICState extra_ic_state,
-    String* name,
-    Object* object,
-    JSObject* holder) {
+Handle<Code> StubCache::ComputeCallInterceptor(int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState extra_state,
+                                               Handle<String> name,
+                                               Handle<Object> object,
+                                               Handle<JSObject> holder) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(object, holder);
-  JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
+      IC::GetCodeCacheForObject(*object, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
 
   // TODO(1233596): We cannot do receiver map check for non-JS objects
   // because they may be represented as immediates without a
@@ -775,135 +634,60 @@
     object = holder;
   }
 
-  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
-                                                    INTERCEPTOR,
-                                                    extra_ic_state,
-                                                    cache_holder,
-                                                    argc);
-  Object* code = map_holder->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
-    { MaybeObject* maybe_code =
-          compiler.CompileCallInterceptor(JSObject::cast(object), holder, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    ASSERT_EQ(flags, Code::cast(code)->flags());
-    PROFILE(isolate(),
-            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
-                            Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          map_holder->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(kind, INTERCEPTOR, extra_state,
+                                    cache_holder, argc);
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
+  Handle<Code> code =
+      compiler.CompileCallInterceptor(Handle<JSObject>::cast(object),
+                                      holder, name);
+  ASSERT_EQ(flags, code->flags());
+  PROFILE(isolate(),
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallNormal(int argc,
+Handle<Code> StubCache::ComputeCallGlobal(int argc,
                                           Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state,
-                                          String* name,
-                                          JSObject* receiver) {
-  Object* code;
-  { MaybeObject* maybe_code = ComputeCallNormal(argc, kind, extra_ic_state);
-    if (!maybe_code->ToObject(&code)) return maybe_code;
-  }
-  return code;
-}
-
-
-MaybeObject* StubCache::ComputeCallGlobal(int argc,
-                                          Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state,
-                                          String* name,
-                                          JSObject* receiver,
-                                          GlobalObject* holder,
-                                          JSGlobalPropertyCell* cell,
-                                          JSFunction* function) {
+                                          Code::ExtraICState extra_state,
+                                          Handle<String> name,
+                                          Handle<JSObject> receiver,
+                                          Handle<GlobalObject> holder,
+                                          Handle<JSGlobalPropertyCell> cell,
+                                          Handle<JSFunction> function) {
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(receiver, holder);
-  JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
-  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
-                                                    NORMAL,
-                                                    extra_ic_state,
-                                                    cache_holder,
-                                                    argc);
-  Object* code = map_holder->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    // If the function hasn't been compiled yet, we cannot do it now
-    // because it may cause GC. To avoid this issue, we return an
-    // internal error which will make sure we do not update any
-    // caches.
-    if (!function->is_compiled()) return Failure::InternalError();
-    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
-    { MaybeObject* maybe_code =
-          compiler.CompileCallGlobal(receiver, holder, cell, function, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    ASSERT_EQ(flags, Code::cast(code)->flags());
-    PROFILE(isolate(),
-            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
-                            Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          map_holder->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(kind, NORMAL, extra_state,
+                                    cache_holder, argc);
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
+  Handle<Code> code =
+      compiler.CompileCallGlobal(receiver, holder, cell, function, name);
+  ASSERT_EQ(flags, code->flags());
+  PROFILE(isolate(),
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
 
-static Object* GetProbeValue(Isolate* isolate, Code::Flags flags) {
-  // Use raw_unchecked... so we don't get assert failures during GC.
-  UnseededNumberDictionary* dictionary =
-      isolate->heap()->raw_unchecked_non_monomorphic_cache();
-  int entry = dictionary->FindEntry(isolate, flags);
-  if (entry != -1) return dictionary->ValueAt(entry);
-  return isolate->heap()->raw_unchecked_undefined_value();
-}
-
-
-MUST_USE_RESULT static MaybeObject* ProbeCache(Isolate* isolate,
-                                               Code::Flags flags) {
-  Heap* heap = isolate->heap();
-  Object* probe = GetProbeValue(isolate, flags);
-  if (probe != heap->undefined_value()) return probe;
-  // Seed the cache with an undefined value to make sure that any
-  // generated code object can always be inserted into the cache
-  // without causing  allocation failures.
-  Object* result;
-  { MaybeObject* maybe_result =
-        heap->non_monomorphic_cache()->AtNumberPut(flags,
-                                                   heap->undefined_value());
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  heap->public_set_non_monomorphic_cache(
-      UnseededNumberDictionary::cast(result));
-  return probe;
-}
-
-
-static MaybeObject* FillCache(Isolate* isolate, MaybeObject* maybe_code) {
-  Object* code;
-  if (maybe_code->ToObject(&code)) {
-    if (code->IsCode()) {
-      Heap* heap = isolate->heap();
-      int entry = heap->non_monomorphic_cache()->FindEntry(
-          Code::cast(code)->flags());
-      // The entry must be present see comment in ProbeCache.
-      ASSERT(entry != -1);
-      ASSERT(heap->non_monomorphic_cache()->ValueAt(entry) ==
-             heap->undefined_value());
-      heap->non_monomorphic_cache()->ValueAtPut(entry, code);
-      CHECK(GetProbeValue(isolate, Code::cast(code)->flags()) == code);
-    }
-  }
-  return maybe_code;
+static void FillCache(Isolate* isolate, Handle<Code> code) {
+  Handle<UnseededNumberDictionary> dictionary =
+      UnseededNumberDictionary::Set(isolate->factory()->non_monomorphic_cache(),
+                                    code->flags(),
+                                    code);
+  isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
 }
 
 
@@ -913,209 +697,200 @@
   Code::ExtraICState extra_state =
       CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
       CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         UNINITIALIZED,
-                                         extra_state,
-                                         NORMAL,
-                                         argc);
-  Object* result = ProbeCache(isolate(), flags)->ToObjectUnchecked();
-  ASSERT(result != heap()->undefined_value());
+  Code::Flags flags =
+      Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
+
+  // Use raw_unchecked... so we don't get assert failures during GC.
+  UnseededNumberDictionary* dictionary =
+      isolate()->heap()->raw_unchecked_non_monomorphic_cache();
+  int entry = dictionary->FindEntry(isolate(), flags);
+  ASSERT(entry != -1);
+  Object* code = dictionary->ValueAt(entry);
   // This might be called during the marking phase of the collector
   // hence the unchecked cast.
-  return reinterpret_cast<Code*>(result);
+  return reinterpret_cast<Code*>(code);
 }
 
 
-MaybeObject* StubCache::ComputeCallInitialize(int argc,
+Handle<Code> StubCache::ComputeCallInitialize(int argc,
                                               RelocInfo::Mode mode,
                                               Code::Kind kind) {
   Code::ExtraICState extra_state =
       CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
       CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         UNINITIALIZED,
-                                         extra_state,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallInitialize(flags));
+  Code::Flags flags =
+      Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
+  Handle<UnseededNumberDictionary> cache =
+      isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallInitialize(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-Handle<Code> StubCache::ComputeCallInitialize(int argc,
-                                              RelocInfo::Mode mode) {
-  CALL_HEAP_FUNCTION(isolate_,
-                     ComputeCallInitialize(argc, mode, Code::CALL_IC),
-                     Code);
+Handle<Code> StubCache::ComputeCallInitialize(int argc, RelocInfo::Mode mode) {
+  return ComputeCallInitialize(argc, mode, Code::CALL_IC);
 }
 
 
 Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc) {
-  CALL_HEAP_FUNCTION(
-      isolate_,
-      ComputeCallInitialize(argc, RelocInfo::CODE_TARGET, Code::KEYED_CALL_IC),
-      Code);
+  return ComputeCallInitialize(argc, RelocInfo::CODE_TARGET,
+                               Code::KEYED_CALL_IC);
 }
 
 
-MaybeObject* StubCache::ComputeCallPreMonomorphic(
+Handle<Code> StubCache::ComputeCallPreMonomorphic(
     int argc,
     Code::Kind kind,
-    Code::ExtraICState extra_ic_state) {
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         PREMONOMORPHIC,
-                                         extra_ic_state,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallPreMonomorphic(flags));
+    Code::ExtraICState extra_state) {
+  Code::Flags flags =
+      Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, NORMAL, argc);
+  Handle<UnseededNumberDictionary> cache =
+      isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallPreMonomorphic(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallNormal(int argc,
+Handle<Code> StubCache::ComputeCallNormal(int argc,
                                           Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state) {
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         MONOMORPHIC,
-                                         extra_ic_state,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallNormal(flags));
+                                          Code::ExtraICState extra_state) {
+  Code::Flags flags =
+      Code::ComputeFlags(kind, MONOMORPHIC, extra_state, NORMAL, argc);
+  Handle<UnseededNumberDictionary> cache =
+      isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallNormal(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
+Handle<Code> StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
   ASSERT(kind == Code::KEYED_CALL_IC);
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         MEGAMORPHIC,
-                                         Code::kNoExtraICState,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallArguments(flags));
+  Code::Flags flags =
+      Code::ComputeFlags(kind, MEGAMORPHIC, Code::kNoExtraICState,
+                         NORMAL, argc);
+  Handle<UnseededNumberDictionary> cache =
+      isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallArguments(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallMegamorphic(
+Handle<Code> StubCache::ComputeCallMegamorphic(
     int argc,
     Code::Kind kind,
-    Code::ExtraICState extra_ic_state) {
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         MEGAMORPHIC,
-                                         extra_ic_state,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallMegamorphic(flags));
+    Code::ExtraICState extra_state) {
+  Code::Flags flags =
+      Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
+                         NORMAL, argc);
+  Handle<UnseededNumberDictionary> cache =
+      isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallMegamorphic(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallMiss(int argc,
+Handle<Code> StubCache::ComputeCallMiss(int argc,
                                         Code::Kind kind,
-                                        Code::ExtraICState extra_ic_state) {
+                                        Code::ExtraICState extra_state) {
   // MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
   // and monomorphic stubs are not mixed up together in the stub cache.
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         MONOMORPHIC_PROTOTYPE_FAILURE,
-                                         extra_ic_state,
-                                         NORMAL,
-                                         argc,
-                                         OWN_MAP);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallMiss(flags));
+  Code::Flags flags =
+      Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
+                         NORMAL, argc, OWN_MAP);
+  Handle<UnseededNumberDictionary> cache =
+      isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallMiss(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCache::ComputeCallDebugBreak(
-    int argc,
-    Code::Kind kind) {
+Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
+                                              Code::Kind kind) {
   // Extra IC state is irrelevant for debug break ICs. They jump to
   // the actual call ic to carry out the work.
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         DEBUG_BREAK,
-                                         Code::kNoExtraICState,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallDebugBreak(flags));
+  Code::Flags flags =
+      Code::ComputeFlags(kind, DEBUG_BREAK, Code::kNoExtraICState,
+                         NORMAL, argc);
+  Handle<UnseededNumberDictionary> cache =
+      isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallDebugBreak(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(
-    int argc,
-    Code::Kind kind) {
+Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
+                                                      Code::Kind kind) {
   // Extra IC state is irrelevant for debug break ICs. They jump to
   // the actual call ic to carry out the work.
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         DEBUG_PREPARE_STEP_IN,
-                                         Code::kNoExtraICState,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallDebugPrepareStepIn(flags));
+  Code::Flags flags =
+      Code::ComputeFlags(kind, DEBUG_PREPARE_STEP_IN, Code::kNoExtraICState,
+                         NORMAL, argc);
+  Handle<UnseededNumberDictionary> cache =
+      isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallDebugPrepareStepIn(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 #endif
 
 
 void StubCache::Clear() {
+  Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
   for (int i = 0; i < kPrimaryTableSize; i++) {
     primary_[i].key = heap()->empty_string();
-    primary_[i].value = isolate_->builtins()->builtin(
-        Builtins::kIllegal);
+    primary_[i].value = empty;
   }
   for (int j = 0; j < kSecondaryTableSize; j++) {
     secondary_[j].key = heap()->empty_string();
-    secondary_[j].value = isolate_->builtins()->builtin(
-        Builtins::kIllegal);
+    secondary_[j].value = empty;
   }
 }
 
 
 void StubCache::CollectMatchingMaps(SmallMapList* types,
                                     String* name,
-                                    Code::Flags flags) {
+                                    Code::Flags flags,
+                                    Handle<Context> global_context) {
   for (int i = 0; i < kPrimaryTableSize; i++) {
     if (primary_[i].key == name) {
       Map* map = primary_[i].value->FindFirstMap();
@@ -1124,7 +899,8 @@
       if (map == NULL) continue;
 
       int offset = PrimaryOffset(name, flags, map);
-      if (entry(primary_, offset) == &primary_[i]) {
+      if (entry(primary_, offset) == &primary_[i] &&
+          !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
         types->Add(Handle<Map>(map));
       }
     }
@@ -1147,7 +923,8 @@
 
       // Lookup in secondary table and add matches.
       int offset = SecondaryOffset(name, flags, primary_offset);
-      if (entry(secondary_, offset) == &secondary_[i]) {
+      if (entry(secondary_, offset) == &secondary_[i] &&
+          !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
         types->Add(Handle<Map>(map));
       }
     }
@@ -1342,8 +1119,8 @@
   JSObject* recv = JSObject::cast(args[0]);
   String* name = String::cast(args[1]);
   Object* value = args[2];
+  ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
   StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
-  ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
   ASSERT(recv->HasNamedInterceptor());
   PropertyAttributes attr = NONE;
   MaybeObject* result = recv->SetPropertyWithInterceptor(
@@ -1360,62 +1137,47 @@
 }
 
 
-MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallInitialize(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
+    CallIC::GenerateInitialize(masm(), argc, extra_state);
   } else {
     KeyedCallIC::GenerateInitialize(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallInitialize");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallInitialize");
   isolate()->counters()->call_initialize_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, Code::cast(code)));
-  return result;
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, *code));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   // The code of the PreMonomorphic stub is the same as the code
   // of the Initialized stub.  They just differ on the code object flags.
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
+    CallIC::GenerateInitialize(masm(), argc, extra_state);
   } else {
     KeyedCallIC::GenerateInitialize(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
   isolate()->counters()->call_premonomorphic_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, Code::cast(code)));
-  return result;
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, *code));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
@@ -1426,116 +1188,82 @@
   } else {
     KeyedCallIC::GenerateNormal(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallNormal");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallNormal");
   isolate()->counters()->call_normal_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, Code::cast(code)));
-  return result;
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, *code));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMegamorphic(masm(), argc, extra_ic_state);
+    CallIC::GenerateMegamorphic(masm(), argc, extra_state);
   } else {
     KeyedCallIC::GenerateMegamorphic(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallMegamorphic");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMegamorphic");
   isolate()->counters()->call_megamorphic_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
-  return result;
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallArguments(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallArguments(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   KeyedCallIC::GenerateNonStrictArguments(masm(), argc);
-  Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallArguments");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Code* code = Code::cast(result);
-  USE(code);
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallArguments");
   PROFILE(isolate(),
-          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
-  return result;
+          CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
+                                          CALL_MEGAMORPHIC_TAG),
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallMiss(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMiss(masm(), argc, extra_ic_state);
+    CallIC::GenerateMiss(masm(), argc, extra_state);
   } else {
     KeyedCallIC::GenerateMiss(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallMiss");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMiss");
   isolate()->counters()->call_megamorphic_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
-  return result;
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_MISS, *code));
+  return code;
 }
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
   Debug::GenerateCallICDebugBreak(masm());
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallDebugBreak");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Code* code = Code::cast(result);
-  USE(code);
-  Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  USE(kind);
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugBreak");
   PROFILE(isolate(),
-          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
-                          code, code->arguments_count()));
-  return result;
+          CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
+                                          CALL_DEBUG_BREAK_TAG),
+                          *code, code->arguments_count()));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
-  HandleScope scope(isolate());
-  // Use the same code for the the step in preparations as we do for
-  // the miss case.
+Handle<Code> StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
+  // Use the same code for the the step in preparations as we do for the
+  // miss case.
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
@@ -1544,133 +1272,96 @@
   } else {
     KeyedCallIC::GenerateMiss(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Code* code = Code::cast(result);
-  USE(code);
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
   PROFILE(isolate(),
           CodeCreateEvent(
               CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
-              code,
+              *code,
               code->arguments_count()));
-  return result;
+  return code;
 }
-#endif
+#endif  // ENABLE_DEBUGGER_SUPPORT
 
 #undef CALL_LOGGER_TAG
 
-MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags,
-                                            const char* name) {
-  // Check for allocation failures during stub compilation.
-  if (failure_->IsFailure()) return failure_;
 
+Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
+                                            const char* name) {
   // Create code object in the heap.
   CodeDesc desc;
   masm_.GetCode(&desc);
-  MaybeObject* result = heap()->CreateCode(desc, flags, masm_.CodeObject());
+  Handle<Code> code = factory()->NewCode(desc, flags, masm_.CodeObject());
 #ifdef ENABLE_DISASSEMBLER
-  if (FLAG_print_code_stubs && !result->IsFailure()) {
-    Code::cast(result->ToObjectUnchecked())->Disassemble(name);
-  }
+  if (FLAG_print_code_stubs) code->Disassemble(name);
 #endif
-  return result;
+  return code;
 }
 
 
-MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags, String* name) {
-  if (FLAG_print_code_stubs && (name != NULL)) {
-    return GetCodeWithFlags(flags, *name->ToCString());
-  }
-  return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
+Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
+                                            Handle<String> name) {
+  return (FLAG_print_code_stubs && !name.is_null())
+      ? GetCodeWithFlags(flags, *name->ToCString())
+      : GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
 }
 
 
-void StubCompiler::LookupPostInterceptor(JSObject* holder,
-                                         String* name,
+void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
+                                         Handle<String> name,
                                          LookupResult* lookup) {
-  holder->LocalLookupRealNamedProperty(name, lookup);
-  if (!lookup->IsProperty()) {
-    lookup->NotFound();
-    Object* proto = holder->GetPrototype();
-    if (!proto->IsNull()) {
-      proto->Lookup(name, lookup);
-    }
-  }
+  holder->LocalLookupRealNamedProperty(*name, lookup);
+  if (lookup->IsProperty()) return;
+
+  lookup->NotFound();
+  if (holder->GetPrototype()->IsNull()) return;
+
+  holder->GetPrototype()->Lookup(*name, lookup);
 }
 
 
-
-MaybeObject* LoadStubCompiler::GetCode(PropertyType type, String* name) {
+Handle<Code> LoadStubCompiler::GetCode(PropertyType type, Handle<String> name) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
-  MaybeObject* result = GetCodeWithFlags(flags, name);
-  if (!result->IsFailure()) {
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::LOAD_IC_TAG,
-                            Code::cast(result->ToObjectUnchecked()),
-                            name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
-                   name,
-                   Code::cast(result->ToObjectUnchecked())));
-  }
-  return result;
+  Handle<Code> code = GetCodeWithFlags(flags, name);
+  PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  return code;
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type,
-                                            String* name,
+Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
+                                            Handle<String> name,
                                             InlineCacheState state) {
   Code::Flags flags = Code::ComputeFlags(
       Code::KEYED_LOAD_IC, state, Code::kNoExtraICState, type);
-  MaybeObject* result = GetCodeWithFlags(flags, name);
-  if (!result->IsFailure()) {
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
-                            Code::cast(result->ToObjectUnchecked()),
-                            name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
-                   name,
-                   Code::cast(result->ToObjectUnchecked())));
-  }
-  return result;
+  Handle<Code> code = GetCodeWithFlags(flags, name);
+  PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  return code;
 }
 
 
-MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
+Handle<Code> StoreStubCompiler::GetCode(PropertyType type,
+                                        Handle<String> name) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::STORE_IC, type, strict_mode_);
-  MaybeObject* result = GetCodeWithFlags(flags, name);
-  if (!result->IsFailure()) {
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::STORE_IC_TAG,
-                            Code::cast(result->ToObjectUnchecked()),
-                            name));
-    GDBJIT(AddCode(GDBJITInterface::STORE_IC,
-                   name,
-                   Code::cast(result->ToObjectUnchecked())));
-  }
-  return result;
+  Handle<Code> code = GetCodeWithFlags(flags, name);
+  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+  return code;
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type,
-                                             String* name,
+Handle<Code> KeyedStoreStubCompiler::GetCode(PropertyType type,
+                                             Handle<String> name,
                                              InlineCacheState state) {
+  Code::ExtraICState extra_state =
+      Code::ComputeExtraICState(grow_mode_, strict_mode_);
   Code::Flags flags =
-      Code::ComputeFlags(Code::KEYED_STORE_IC, state, strict_mode_, type);
-  MaybeObject* result = GetCodeWithFlags(flags, name);
-  if (!result->IsFailure()) {
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
-                            Code::cast(result->ToObjectUnchecked()),
-                            name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC,
-                   name,
-                   Code::cast(result->ToObjectUnchecked())));
-  }
-  return result;
+      Code::ComputeFlags(Code::KEYED_STORE_IC, state, extra_state, type);
+  Handle<Code> code = GetCodeWithFlags(flags, name);
+  PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
+  return code;
 }
 
 
@@ -1680,50 +1371,49 @@
 }
 
 
-CallStubCompiler::CallStubCompiler(int argc,
+CallStubCompiler::CallStubCompiler(Isolate* isolate,
+                                   int argc,
                                    Code::Kind kind,
-                                   Code::ExtraICState extra_ic_state,
+                                   Code::ExtraICState extra_state,
                                    InlineCacheHolderFlag cache_holder)
-    : arguments_(argc),
+    : StubCompiler(isolate),
+      arguments_(argc),
       kind_(kind),
-      extra_ic_state_(extra_ic_state),
+      extra_state_(extra_state),
       cache_holder_(cache_holder) {
 }
 
 
-bool CallStubCompiler::HasCustomCallGenerator(JSFunction* function) {
-  SharedFunctionInfo* info = function->shared();
-  if (info->HasBuiltinFunctionId()) {
-    BuiltinFunctionId id = info->builtin_function_id();
+bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
+  if (function->shared()->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function->shared()->builtin_function_id();
 #define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
     CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
 #undef CALL_GENERATOR_CASE
   }
+
   CallOptimization optimization(function);
-  if (optimization.is_simple_api_call()) {
-    return true;
-  }
-  return false;
+  return optimization.is_simple_api_call();
 }
 
 
-MaybeObject* CallStubCompiler::CompileCustomCall(Object* object,
-                                                 JSObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* fname) {
+Handle<Code> CallStubCompiler::CompileCustomCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> fname) {
   ASSERT(HasCustomCallGenerator(function));
 
-  SharedFunctionInfo* info = function->shared();
-  if (info->HasBuiltinFunctionId()) {
-    BuiltinFunctionId id = info->builtin_function_id();
-#define CALL_GENERATOR_CASE(name)                           \
-    if (id == k##name) {                                    \
-      return CallStubCompiler::Compile##name##Call(object,  \
-                                                  holder,   \
-                                                  cell,     \
-                                                  function, \
-                                                  fname);   \
+  if (function->shared()->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function->shared()->builtin_function_id();
+#define CALL_GENERATOR_CASE(name)                               \
+    if (id == k##name) {                                        \
+      return CallStubCompiler::Compile##name##Call(object,      \
+                                                   holder,      \
+                                                   cell,        \
+                                                   function,    \
+                                                   fname);      \
     }
     CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
 #undef CALL_GENERATOR_CASE
@@ -1739,100 +1429,99 @@
 }
 
 
-MaybeObject* CallStubCompiler::GetCode(PropertyType type, String* name) {
+Handle<Code> CallStubCompiler::GetCode(PropertyType type, Handle<String> name) {
   int argc = arguments_.immediate();
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
                                                     type,
-                                                    extra_ic_state_,
+                                                    extra_state_,
                                                     cache_holder_,
                                                     argc);
   return GetCodeWithFlags(flags, name);
 }
 
 
-MaybeObject* CallStubCompiler::GetCode(JSFunction* function) {
-  String* function_name = NULL;
+Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
+  Handle<String> function_name;
   if (function->shared()->name()->IsString()) {
-    function_name = String::cast(function->shared()->name());
+    function_name = Handle<String>(String::cast(function->shared()->name()));
   }
   return GetCode(CONSTANT_FUNCTION, function_name);
 }
 
 
-MaybeObject* ConstructStubCompiler::GetCode() {
+Handle<Code> ConstructStubCompiler::GetCode() {
   Code::Flags flags = Code::ComputeFlags(Code::STUB);
-  Object* result;
-  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ConstructStub");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Code* code = Code::cast(result);
-  USE(code);
-  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
-  GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
-  return result;
+  Handle<Code> code = GetCodeWithFlags(flags, "ConstructStub");
+  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, "ConstructStub"));
+  GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", *code));
+  return code;
 }
 
 
 CallOptimization::CallOptimization(LookupResult* lookup) {
-  if (!lookup->IsProperty() || !lookup->IsCacheable() ||
-      lookup->type() != CONSTANT_FUNCTION) {
-    Initialize(NULL);
-  } else {
+  if (lookup->IsFound() &&
+      lookup->IsCacheable() &&
+      lookup->type() == CONSTANT_FUNCTION) {
     // We only optimize constant function calls.
-    Initialize(lookup->GetConstantFunction());
+    Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
+  } else {
+    Initialize(Handle<JSFunction>::null());
   }
 }
 
-CallOptimization::CallOptimization(JSFunction* function) {
+CallOptimization::CallOptimization(Handle<JSFunction> function) {
   Initialize(function);
 }
 
 
-int CallOptimization::GetPrototypeDepthOfExpectedType(JSObject* object,
-                                                      JSObject* holder) const {
-  ASSERT(is_simple_api_call_);
-  if (expected_receiver_type_ == NULL) return 0;
+int CallOptimization::GetPrototypeDepthOfExpectedType(
+    Handle<JSObject> object,
+    Handle<JSObject> holder) const {
+  ASSERT(is_simple_api_call());
+  if (expected_receiver_type_.is_null()) return 0;
   int depth = 0;
-  while (object != holder) {
-    if (object->IsInstanceOf(expected_receiver_type_)) return depth;
-    object = JSObject::cast(object->GetPrototype());
+  while (!object.is_identical_to(holder)) {
+    if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
+    object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
     ++depth;
   }
-  if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
+  if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
   return kInvalidProtoDepth;
 }
 
 
-void CallOptimization::Initialize(JSFunction* function) {
-  constant_function_ = NULL;
+void CallOptimization::Initialize(Handle<JSFunction> function) {
+  constant_function_ = Handle<JSFunction>::null();
   is_simple_api_call_ = false;
-  expected_receiver_type_ = NULL;
-  api_call_info_ = NULL;
+  expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
+  api_call_info_ = Handle<CallHandlerInfo>::null();
 
-  if (function == NULL || !function->is_compiled()) return;
+  if (function.is_null() || !function->is_compiled()) return;
 
   constant_function_ = function;
   AnalyzePossibleApiFunction(function);
 }
 
 
-void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
-  SharedFunctionInfo* sfi = function->shared();
-  if (!sfi->IsApiFunction()) return;
-  FunctionTemplateInfo* info = sfi->get_api_func_data();
+void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
+  if (!function->shared()->IsApiFunction()) return;
+  Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
 
   // Require a C++ callback.
   if (info->call_code()->IsUndefined()) return;
-  api_call_info_ = CallHandlerInfo::cast(info->call_code());
+  api_call_info_ =
+      Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
 
   // Accept signatures that either have no restrictions at all or
   // only have restrictions on the receiver.
   if (!info->signature()->IsUndefined()) {
-    SignatureInfo* signature = SignatureInfo::cast(info->signature());
+    Handle<SignatureInfo> signature =
+        Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
     if (!signature->args()->IsUndefined()) return;
     if (!signature->receiver()->IsUndefined()) {
       expected_receiver_type_ =
-          FunctionTemplateInfo::cast(signature->receiver());
+          Handle<FunctionTemplateInfo>(
+              FunctionTemplateInfo::cast(signature->receiver()));
     }
   }
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 18c157b..29bdb61 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include "allocation.h"
 #include "arguments.h"
+#include "ic-inl.h"
 #include "macro-assembler.h"
 #include "objects.h"
 #include "zone-inl.h"
@@ -68,214 +69,175 @@
   struct Entry {
     String* key;
     Code* value;
+    Map* map;
   };
 
-  void Initialize(bool create_heap_objects);
+  void Initialize();
 
 
   // Computes the right stub matching. Inserts the result in the
   // cache before returning.  This might compile a stub if needed.
-  MUST_USE_RESULT MaybeObject* ComputeLoadNonexistent(
-      String* name,
-      JSObject* receiver);
+  Handle<Code> ComputeLoadNonexistent(Handle<String> name,
+                                      Handle<JSObject> receiver);
 
-  MUST_USE_RESULT MaybeObject* ComputeLoadField(String* name,
-                                                JSObject* receiver,
-                                                JSObject* holder,
-                                                int field_index);
+  Handle<Code> ComputeLoadField(Handle<String> name,
+                                Handle<JSObject> receiver,
+                                Handle<JSObject> holder,
+                                int field_index);
 
-  MUST_USE_RESULT MaybeObject* ComputeLoadCallback(
-      String* name,
-      JSObject* receiver,
-      JSObject* holder,
-      AccessorInfo* callback);
+  Handle<Code> ComputeLoadCallback(Handle<String> name,
+                                   Handle<JSObject> receiver,
+                                   Handle<JSObject> holder,
+                                   Handle<AccessorInfo> callback);
 
-  MUST_USE_RESULT MaybeObject* ComputeLoadConstant(String* name,
-                                                   JSObject* receiver,
-                                                   JSObject* holder,
-                                                   Object* value);
+  Handle<Code> ComputeLoadConstant(Handle<String> name,
+                                   Handle<JSObject> receiver,
+                                   Handle<JSObject> holder,
+                                   Handle<JSFunction> value);
 
-  MUST_USE_RESULT MaybeObject* ComputeLoadInterceptor(
-      String* name,
-      JSObject* receiver,
-      JSObject* holder);
+  Handle<Code> ComputeLoadInterceptor(Handle<String> name,
+                                      Handle<JSObject> receiver,
+                                      Handle<JSObject> holder);
 
-  MUST_USE_RESULT MaybeObject* ComputeLoadNormal();
+  Handle<Code> ComputeLoadNormal();
 
-
-  MUST_USE_RESULT MaybeObject* ComputeLoadGlobal(
-      String* name,
-      JSObject* receiver,
-      GlobalObject* holder,
-      JSGlobalPropertyCell* cell,
-      bool is_dont_delete);
-
+  Handle<Code> ComputeLoadGlobal(Handle<String> name,
+                                 Handle<JSObject> receiver,
+                                 Handle<GlobalObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 bool is_dont_delete);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadField(String* name,
-                                                     JSObject* receiver,
-                                                     JSObject* holder,
-                                                     int field_index);
+  Handle<Code> ComputeKeyedLoadField(Handle<String> name,
+                                     Handle<JSObject> receiver,
+                                     Handle<JSObject> holder,
+                                     int field_index);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadCallback(
-      String* name,
-      JSObject* receiver,
-      JSObject* holder,
-      AccessorInfo* callback);
+  Handle<Code> ComputeKeyedLoadCallback(Handle<String> name,
+                                        Handle<JSObject> receiver,
+                                        Handle<JSObject> holder,
+                                        Handle<AccessorInfo> callback);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadConstant(
-      String* name,
-      JSObject* receiver,
-      JSObject* holder,
-      Object* value);
+  Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
+                                        Handle<JSObject> receiver,
+                                        Handle<JSObject> holder,
+                                        Handle<JSFunction> value);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadInterceptor(
-      String* name,
-      JSObject* receiver,
-      JSObject* holder);
+  Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
+                                           Handle<JSObject> receiver,
+                                           Handle<JSObject> holder);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadArrayLength(
-      String* name,
-      JSArray* receiver);
+  Handle<Code> ComputeKeyedLoadArrayLength(Handle<String> name,
+                                           Handle<JSArray> receiver);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadStringLength(
-      String* name,
-      String* receiver);
+  Handle<Code> ComputeKeyedLoadStringLength(Handle<String> name,
+                                            Handle<String> receiver);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadFunctionPrototype(
-      String* name,
-      JSFunction* receiver);
+  Handle<Code> ComputeKeyedLoadFunctionPrototype(Handle<String> name,
+                                                 Handle<JSFunction> receiver);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeStoreField(
-      String* name,
-      JSObject* receiver,
-      int field_index,
-      Map* transition,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeStoreField(Handle<String> name,
+                                 Handle<JSObject> receiver,
+                                 int field_index,
+                                 Handle<Map> transition,
+                                 StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeStoreNormal(
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeStoreGlobal(
-      String* name,
-      GlobalObject* receiver,
-      JSGlobalPropertyCell* cell,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeStoreGlobal(Handle<String> name,
+                                  Handle<GlobalObject> receiver,
+                                  Handle<JSGlobalPropertyCell> cell,
+                                  StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeStoreCallback(
-      String* name,
-      JSObject* receiver,
-      AccessorInfo* callback,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeStoreCallback(Handle<String> name,
+                                    Handle<JSObject> receiver,
+                                    Handle<AccessorInfo> callback,
+                                    StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeStoreInterceptor(
-      String* name,
-      JSObject* receiver,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeStoreInterceptor(Handle<String> name,
+                                       Handle<JSObject> receiver,
+                                       StrictModeFlag strict_mode);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedStoreField(
-      String* name,
-      JSObject* receiver,
-      int field_index,
-      Map* transition,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeKeyedStoreField(Handle<String> name,
+                                      Handle<JSObject> receiver,
+                                      int field_index,
+                                      Handle<Map> transition,
+                                      StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreElement(
-      JSObject* receiver,
-      bool is_store,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<JSObject> receiver,
+                                              KeyedIC::StubKind stub_kind,
+                                              StrictModeFlag strict_mode);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeCallField(
-      int argc,
-      Code::Kind,
-      Code::ExtraICState extra_ic_state,
-      String* name,
-      Object* object,
-      JSObject* holder,
-      int index);
+  Handle<Code> ComputeCallField(int argc,
+                                Code::Kind,
+                                Code::ExtraICState extra_state,
+                                Handle<String> name,
+                                Handle<Object> object,
+                                Handle<JSObject> holder,
+                                int index);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallConstant(
-      int argc,
-      Code::Kind,
-      Code::ExtraICState extra_ic_state,
-      String* name,
-      Object* object,
-      JSObject* holder,
-      JSFunction* function);
+  Handle<Code> ComputeCallConstant(int argc,
+                                   Code::Kind,
+                                   Code::ExtraICState extra_state,
+                                   Handle<String> name,
+                                   Handle<Object> object,
+                                   Handle<JSObject> holder,
+                                   Handle<JSFunction> function);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallNormal(
-      int argc,
-      Code::Kind,
-      Code::ExtraICState extra_ic_state,
-      String* name,
-      JSObject* receiver);
+  Handle<Code> ComputeCallInterceptor(int argc,
+                                      Code::Kind,
+                                      Code::ExtraICState extra_state,
+                                      Handle<String> name,
+                                      Handle<Object> object,
+                                      Handle<JSObject> holder);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(
-      int argc,
-      Code::Kind,
-      Code::ExtraICState extra_ic_state,
-      String* name,
-      Object* object,
-      JSObject* holder);
-
-  MUST_USE_RESULT MaybeObject* ComputeCallGlobal(
-      int argc,
-      Code::Kind,
-      Code::ExtraICState extra_ic_state,
-      String* name,
-      JSObject* receiver,
-      GlobalObject* holder,
-      JSGlobalPropertyCell* cell,
-      JSFunction* function);
+  Handle<Code> ComputeCallGlobal(int argc,
+                                 Code::Kind,
+                                 Code::ExtraICState extra_state,
+                                 Handle<String> name,
+                                 Handle<JSObject> receiver,
+                                 Handle<GlobalObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 Handle<JSFunction> function);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeCallInitialize(int argc,
-                                                     RelocInfo::Mode mode,
-                                                     Code::Kind kind);
-
-  Handle<Code> ComputeCallInitialize(int argc,
-                                     RelocInfo::Mode mode);
+  Handle<Code> ComputeCallInitialize(int argc, RelocInfo::Mode mode);
 
   Handle<Code> ComputeKeyedCallInitialize(int argc);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallPreMonomorphic(
-      int argc,
-      Code::Kind kind,
-      Code::ExtraICState extra_ic_state);
+  Handle<Code> ComputeCallPreMonomorphic(int argc,
+                                         Code::Kind kind,
+                                         Code::ExtraICState extra_state);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
-                                                 Code::Kind kind,
-                                                 Code::ExtraICState state);
+  Handle<Code> ComputeCallNormal(int argc,
+                                 Code::Kind kind,
+                                 Code::ExtraICState state);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallArguments(int argc,
-                                                    Code::Kind kind);
+  Handle<Code> ComputeCallArguments(int argc, Code::Kind kind);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallMegamorphic(int argc,
-                                                      Code::Kind kind,
-                                                      Code::ExtraICState state);
+  Handle<Code> ComputeCallMegamorphic(int argc,
+                                      Code::Kind kind,
+                                      Code::ExtraICState state);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc,
-                                               Code::Kind kind,
-                                               Code::ExtraICState state);
+  Handle<Code> ComputeCallMiss(int argc,
+                               Code::Kind kind,
+                               Code::ExtraICState state);
 
   // Finds the Code object stored in the Heap::non_monomorphic_cache().
-  MUST_USE_RESULT Code* FindCallInitialize(int argc,
-                                           RelocInfo::Mode mode,
-                                           Code::Kind kind);
+  Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  MUST_USE_RESULT MaybeObject* ComputeCallDebugBreak(int argc, Code::Kind kind);
+  Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallDebugPrepareStepIn(int argc,
-                                                             Code::Kind kind);
+  Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
 #endif
 
   // Update cache for entry hash(name, map).
@@ -287,10 +249,11 @@
   // Collect all maps that match the name and flags.
   void CollectMatchingMaps(SmallMapList* types,
                            String* name,
-                           Code::Flags flags);
+                           Code::Flags flags,
+                           Handle<Context> global_context);
 
   // Generate code for probing the stub cache table.
-  // Arguments extra and extra2 may be used to pass additional scratch
+  // Arguments extra, extra2 and extra3 may be used to pass additional scratch
   // registers. Set to no_reg if not needed.
   void GenerateProbe(MacroAssembler* masm,
                      Code::Flags flags,
@@ -298,7 +261,8 @@
                      Register name,
                      Register scratch,
                      Register extra,
-                     Register extra2 = no_reg);
+                     Register extra2 = no_reg,
+                     Register extra3 = no_reg);
 
   enum Table {
     kPrimary,
@@ -312,6 +276,12 @@
   }
 
 
+  SCTableReference map_reference(StubCache::Table table) {
+    return SCTableReference(
+        reinterpret_cast<Address>(&first_entry(table)->map));
+  }
+
+
   SCTableReference value_reference(StubCache::Table table) {
     return SCTableReference(
         reinterpret_cast<Address>(&first_entry(table)->value));
@@ -329,18 +299,25 @@
 
   Isolate* isolate() { return isolate_; }
   Heap* heap() { return isolate()->heap(); }
+  Factory* factory() { return isolate()->factory(); }
 
  private:
   explicit StubCache(Isolate* isolate);
 
-  friend class Isolate;
-  friend class SCTableReference;
-  static const int kPrimaryTableSize = 2048;
-  static const int kSecondaryTableSize = 512;
-  Entry primary_[kPrimaryTableSize];
-  Entry secondary_[kSecondaryTableSize];
+  Handle<Code> ComputeCallInitialize(int argc,
+                                     RelocInfo::Mode mode,
+                                     Code::Kind kind);
 
-  // Computes the hashed offsets for primary and secondary caches.
+  // The stub cache has a primary and secondary level.  The two levels have
+  // different hashing algorithms in order to avoid simultaneous collisions
+  // in both caches.  Unlike a probing strategy (quadratic or otherwise) the
+  // update strategy on updates is fairly clear and simple:  Any existing entry
+  // in the primary cache is moved to the secondary cache, and secondary cache
+  // entries are overwritten.
+
+  // Hash algorithm for the primary table.  This algorithm is replicated in
+  // assembler for every architecture.  Returns an index into the table that
+  // is scaled by 1 << kHeapObjectTagSize.
   static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
     // This works well because the heap object tag size and the hash
     // shift are equal.  Shifting down the length field to get the
@@ -364,27 +341,44 @@
     return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
   }
 
+  // Hash algorithm for the secondary table.  This algorithm is replicated in
+  // assembler for every architecture.  Returns an index into the table that
+  // is scaled by 1 << kHeapObjectTagSize.
   static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
     // Use the seed from the primary cache in the secondary cache.
     uint32_t string_low32bits =
         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
-    uint32_t key = seed - string_low32bits + flags;
+    // We always set the in_loop bit to zero when generating the lookup code
+    // so do it here too so the hash codes match.
+    uint32_t iflags =
+        (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
+    uint32_t key = (seed - string_low32bits) + iflags;
     return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
   }
 
   // Compute the entry for a given offset in exactly the same way as
   // we do in generated code.  We generate an hash code that already
-  // ends in String::kHashShift 0s.  Then we shift it so it is a multiple
+  // ends in String::kHashShift 0s.  Then we multiply it so it is a multiple
   // of sizeof(Entry).  This makes it easier to avoid making mistakes
   // in the hashed offset computations.
   static Entry* entry(Entry* table, int offset) {
-    const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
+    const int multiplier = sizeof(*table) >> String::kHashShift;
     return reinterpret_cast<Entry*>(
-        reinterpret_cast<Address>(table) + (offset << shift_amount));
+        reinterpret_cast<Address>(table) + offset * multiplier);
   }
 
+  static const int kPrimaryTableBits = 11;
+  static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
+  static const int kSecondaryTableBits = 9;
+  static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
+
+  Entry primary_[kPrimaryTableSize];
+  Entry secondary_[kSecondaryTableSize];
   Isolate* isolate_;
 
+  friend class Isolate;
+  friend class SCTableReference;
+
   DISALLOW_COPY_AND_ASSIGN(StubCache);
 };
 
@@ -406,21 +400,24 @@
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
 
 
-// The stub compiler compiles stubs for the stub cache.
+// The stub compilers compile stubs for the stub cache.
 class StubCompiler BASE_EMBEDDED {
  public:
-  StubCompiler()
-      : scope_(), masm_(Isolate::Current(), NULL, 256), failure_(NULL) { }
+  explicit StubCompiler(Isolate* isolate)
+      : isolate_(isolate), masm_(isolate, NULL, 256), failure_(NULL) { }
 
-  MUST_USE_RESULT MaybeObject* CompileCallInitialize(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallPreMonomorphic(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallNormal(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallMegamorphic(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallArguments(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallMiss(Code::Flags flags);
+  // Functions to compile either CallIC or KeyedCallIC.  The specific kind
+  // is extracted from the code flags.
+  Handle<Code> CompileCallInitialize(Code::Flags flags);
+  Handle<Code> CompileCallPreMonomorphic(Code::Flags flags);
+  Handle<Code> CompileCallNormal(Code::Flags flags);
+  Handle<Code> CompileCallMegamorphic(Code::Flags flags);
+  Handle<Code> CompileCallArguments(Code::Flags flags);
+  Handle<Code> CompileCallMiss(Code::Flags flags);
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  MUST_USE_RESULT MaybeObject* CompileCallDebugBreak(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallDebugPrepareStepIn(Code::Flags flags);
+  Handle<Code> CompileCallDebugBreak(Code::Flags flags);
+  Handle<Code> CompileCallDebugPrepareStepIn(Code::Flags flags);
 #endif
 
   // Static functions for generating parts of stubs.
@@ -440,8 +437,10 @@
                                                         Label* miss);
 
   static void GenerateFastPropertyLoad(MacroAssembler* masm,
-                                       Register dst, Register src,
-                                       JSObject* holder, int index);
+                                       Register dst,
+                                       Register src,
+                                       Handle<JSObject> holder,
+                                       int index);
 
   static void GenerateLoadArrayLength(MacroAssembler* masm,
                                       Register receiver,
@@ -462,9 +461,9 @@
                                             Label* miss_label);
 
   static void GenerateStoreField(MacroAssembler* masm,
-                                 JSObject* object,
+                                 Handle<JSObject> object,
                                  int index,
-                                 Map* transition,
+                                 Handle<Map> transition,
                                  Register receiver_reg,
                                  Register name_reg,
                                  Register scratch,
@@ -490,88 +489,87 @@
   // The function can optionally (when save_at_depth !=
   // kInvalidProtoDepth) save the object at the given depth by moving
   // it to [esp + kPointerSize].
-
-  Register CheckPrototypes(JSObject* object,
+  Register CheckPrototypes(Handle<JSObject> object,
                            Register object_reg,
-                           JSObject* holder,
+                           Handle<JSObject> holder,
                            Register holder_reg,
                            Register scratch1,
                            Register scratch2,
-                           String* name,
+                           Handle<String> name,
                            Label* miss) {
     return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
                            scratch2, name, kInvalidProtoDepth, miss);
   }
 
-  Register CheckPrototypes(JSObject* object,
+  Register CheckPrototypes(Handle<JSObject> object,
                            Register object_reg,
-                           JSObject* holder,
+                           Handle<JSObject> holder,
                            Register holder_reg,
                            Register scratch1,
                            Register scratch2,
-                           String* name,
+                           Handle<String> name,
                            int save_at_depth,
                            Label* miss);
 
  protected:
-  MaybeObject* GetCodeWithFlags(Code::Flags flags, const char* name);
-  MaybeObject* GetCodeWithFlags(Code::Flags flags, String* name);
+  Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
+  Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name);
 
   MacroAssembler* masm() { return &masm_; }
   void set_failure(Failure* failure) { failure_ = failure; }
 
-  void GenerateLoadField(JSObject* object,
-                         JSObject* holder,
+  void GenerateLoadField(Handle<JSObject> object,
+                         Handle<JSObject> holder,
                          Register receiver,
                          Register scratch1,
                          Register scratch2,
                          Register scratch3,
                          int index,
-                         String* name,
+                         Handle<String> name,
                          Label* miss);
 
-  MaybeObject* GenerateLoadCallback(JSObject* object,
-                                    JSObject* holder,
-                                    Register receiver,
-                                    Register name_reg,
-                                    Register scratch1,
-                                    Register scratch2,
-                                    Register scratch3,
-                                    AccessorInfo* callback,
-                                    String* name,
-                                    Label* miss);
+  void GenerateLoadCallback(Handle<JSObject> object,
+                            Handle<JSObject> holder,
+                            Register receiver,
+                            Register name_reg,
+                            Register scratch1,
+                            Register scratch2,
+                            Register scratch3,
+                            Handle<AccessorInfo> callback,
+                            Handle<String> name,
+                            Label* miss);
 
-  void GenerateLoadConstant(JSObject* object,
-                            JSObject* holder,
+  void GenerateLoadConstant(Handle<JSObject> object,
+                            Handle<JSObject> holder,
                             Register receiver,
                             Register scratch1,
                             Register scratch2,
                             Register scratch3,
-                            Object* value,
-                            String* name,
+                            Handle<JSFunction> value,
+                            Handle<String> name,
                             Label* miss);
 
-  void GenerateLoadInterceptor(JSObject* object,
-                               JSObject* holder,
+  void GenerateLoadInterceptor(Handle<JSObject> object,
+                               Handle<JSObject> holder,
                                LookupResult* lookup,
                                Register receiver,
                                Register name_reg,
                                Register scratch1,
                                Register scratch2,
                                Register scratch3,
-                               String* name,
+                               Handle<String> name,
                                Label* miss);
 
-  static void LookupPostInterceptor(JSObject* holder,
-                                    String* name,
+  static void LookupPostInterceptor(Handle<JSObject> holder,
+                                    Handle<String> name,
                                     LookupResult* lookup);
 
-  Isolate* isolate() { return scope_.isolate(); }
+  Isolate* isolate() { return isolate_; }
   Heap* heap() { return isolate()->heap(); }
   Factory* factory() { return isolate()->factory(); }
 
  private:
-  HandleScope scope_;
+  Isolate* isolate_;
   MacroAssembler masm_;
   Failure* failure_;
 };
@@ -579,70 +577,75 @@
 
 class LoadStubCompiler: public StubCompiler {
  public:
-  MUST_USE_RESULT MaybeObject* CompileLoadNonexistent(String* name,
-                                                      JSObject* object,
-                                                      JSObject* last);
+  explicit LoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
 
-  MUST_USE_RESULT MaybeObject* CompileLoadField(JSObject* object,
-                                                JSObject* holder,
-                                                int index,
-                                                String* name);
+  Handle<Code> CompileLoadNonexistent(Handle<String> name,
+                                      Handle<JSObject> object,
+                                      Handle<JSObject> last);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback);
+  Handle<Code> CompileLoadField(Handle<JSObject> object,
+                                Handle<JSObject> holder,
+                                int index,
+                                Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadConstant(JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value,
-                                                   String* name);
+  Handle<Code> CompileLoadCallback(Handle<String> name,
+                                   Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<AccessorInfo> callback);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name);
+  Handle<Code> CompileLoadConstant(Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<JSFunction> value,
+                                   Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 String* name,
-                                                 bool is_dont_delete);
+  Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
+                                      Handle<JSObject> holder,
+                                      Handle<String> name);
+
+  Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
+                                 Handle<GlobalObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 Handle<String> name,
+                                 bool is_dont_delete);
 
  private:
-  MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
+  Handle<Code> GetCode(PropertyType type, Handle<String> name);
 };
 
 
 class KeyedLoadStubCompiler: public StubCompiler {
  public:
-  MUST_USE_RESULT MaybeObject* CompileLoadField(String* name,
-                                                JSObject* object,
-                                                JSObject* holder,
-                                                int index);
+  explicit KeyedLoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
 
-  MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback);
+  Handle<Code> CompileLoadField(Handle<String> name,
+                                Handle<JSObject> object,
+                                Handle<JSObject> holder,
+                                int index);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadConstant(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value);
+  Handle<Code> CompileLoadCallback(Handle<String> name,
+                                   Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<AccessorInfo> callback);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name);
+  Handle<Code> CompileLoadConstant(Handle<String> name,
+                                   Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<JSFunction> value);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadArrayLength(String* name);
-  MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
-  MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
+  Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
+                                      Handle<JSObject> holder,
+                                      Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadElement(Map* receiver_map);
+  Handle<Code> CompileLoadArrayLength(Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadMegamorphic(
-      MapList* receiver_maps,
-      CodeList* handler_ics);
+  Handle<Code> CompileLoadStringLength(Handle<String> name);
+
+  Handle<Code> CompileLoadFunctionPrototype(Handle<String> name);
+
+  Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
+
+  Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
+                                      CodeHandleList* handler_ics);
 
   static void GenerateLoadExternalArray(MacroAssembler* masm,
                                         ElementsKind elements_kind);
@@ -654,34 +657,36 @@
   static void GenerateLoadDictionaryElement(MacroAssembler* masm);
 
  private:
-  MaybeObject* GetCode(PropertyType type,
-                       String* name,
+  Handle<Code> GetCode(PropertyType type,
+                       Handle<String> name,
                        InlineCacheState state = MONOMORPHIC);
 };
 
 
 class StoreStubCompiler: public StubCompiler {
  public:
-  explicit StoreStubCompiler(StrictModeFlag strict_mode)
-    : strict_mode_(strict_mode) { }
+  StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
+    : StubCompiler(isolate), strict_mode_(strict_mode) { }
 
-  MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
-                                                 int index,
-                                                 Map* transition,
-                                                 String* name);
 
-  MUST_USE_RESULT MaybeObject* CompileStoreCallback(JSObject* object,
-                                                    AccessorInfo* callbacks,
-                                                    String* name);
-  MUST_USE_RESULT MaybeObject* CompileStoreInterceptor(JSObject* object,
-                                                       String* name);
-  MUST_USE_RESULT MaybeObject* CompileStoreGlobal(GlobalObject* object,
-                                                  JSGlobalPropertyCell* holder,
-                                                  String* name);
+  Handle<Code> CompileStoreField(Handle<JSObject> object,
+                                 int index,
+                                 Handle<Map> transition,
+                                 Handle<String> name);
 
+  Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+                                    Handle<AccessorInfo> callback,
+                                    Handle<String> name);
+
+  Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
+                                       Handle<String> name);
+
+  Handle<Code> CompileStoreGlobal(Handle<GlobalObject> object,
+                                  Handle<JSGlobalPropertyCell> holder,
+                                  Handle<String> name);
 
  private:
-  MaybeObject* GetCode(PropertyType type, String* name);
+  Handle<Code> GetCode(PropertyType type, Handle<String> name);
 
   StrictModeFlag strict_mode_;
 };
@@ -689,25 +694,32 @@
 
 class KeyedStoreStubCompiler: public StubCompiler {
  public:
-  explicit KeyedStoreStubCompiler(StrictModeFlag strict_mode)
-    : strict_mode_(strict_mode) { }
+  KeyedStoreStubCompiler(Isolate* isolate,
+                         StrictModeFlag strict_mode,
+                         KeyedAccessGrowMode grow_mode)
+    : StubCompiler(isolate),
+      strict_mode_(strict_mode),
+      grow_mode_(grow_mode) { }
 
-  MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
-                                                 int index,
-                                                 Map* transition,
-                                                 String* name);
+  Handle<Code> CompileStoreField(Handle<JSObject> object,
+                                 int index,
+                                 Handle<Map> transition,
+                                 Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileStoreElement(Map* receiver_map);
+  Handle<Code> CompileStoreElement(Handle<Map> receiver_map);
 
-  MUST_USE_RESULT MaybeObject* CompileStoreMegamorphic(
-      MapList* receiver_maps,
-      CodeList* handler_ics);
+  Handle<Code> CompileStorePolymorphic(MapHandleList* receiver_maps,
+                                       CodeHandleList* handler_stubs,
+                                       MapHandleList* transitioned_maps);
 
   static void GenerateStoreFastElement(MacroAssembler* masm,
-                                       bool is_js_array);
+                                       bool is_js_array,
+                                       ElementsKind element_kind,
+                                       KeyedAccessGrowMode grow_mode);
 
   static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
-                                             bool is_js_array);
+                                             bool is_js_array,
+                                             KeyedAccessGrowMode grow_mode);
 
   static void GenerateStoreExternalArray(MacroAssembler* masm,
                                          ElementsKind elements_kind);
@@ -715,11 +727,12 @@
   static void GenerateStoreDictionaryElement(MacroAssembler* masm);
 
  private:
-  MaybeObject* GetCode(PropertyType type,
-                       String* name,
+  Handle<Code> GetCode(PropertyType type,
+                       Handle<String> name,
                        InlineCacheState state = MONOMORPHIC);
 
   StrictModeFlag strict_mode_;
+  KeyedAccessGrowMode grow_mode_;
 };
 
 
@@ -739,105 +752,97 @@
 
 class CallStubCompiler: public StubCompiler {
  public:
-  CallStubCompiler(int argc,
+  CallStubCompiler(Isolate* isolate,
+                   int argc,
                    Code::Kind kind,
-                   Code::ExtraICState extra_ic_state,
+                   Code::ExtraICState extra_state,
                    InlineCacheHolderFlag cache_holder);
 
-  MUST_USE_RESULT MaybeObject* CompileCallField(
-      JSObject* object,
-      JSObject* holder,
-      int index,
-      String* name);
+  Handle<Code> CompileCallField(Handle<JSObject> object,
+                                Handle<JSObject> holder,
+                                int index,
+                                Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileCallConstant(
-      Object* object,
-      JSObject* holder,
-      JSFunction* function,
-      String* name,
-      CheckType check);
+  Handle<Code> CompileCallConstant(Handle<Object> object,
+                                   Handle<JSObject> holder,
+                                   Handle<JSFunction> function,
+                                   Handle<String> name,
+                                   CheckType check);
 
-  MUST_USE_RESULT MaybeObject* CompileCallInterceptor(
-      JSObject* object,
-      JSObject* holder,
-      String* name);
+  Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
+                                      Handle<JSObject> holder,
+                                      Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileCallGlobal(
-      JSObject* object,
-      GlobalObject* holder,
-      JSGlobalPropertyCell* cell,
-      JSFunction* function,
-      String* name);
+  Handle<Code> CompileCallGlobal(Handle<JSObject> object,
+                                 Handle<GlobalObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 Handle<JSFunction> function,
+                                 Handle<String> name);
 
-  static bool HasCustomCallGenerator(JSFunction* function);
+  static bool HasCustomCallGenerator(Handle<JSFunction> function);
 
  private:
-  // Compiles a custom call constant/global IC. For constant calls
-  // cell is NULL. Returns undefined if there is no custom call code
-  // for the given function or it can't be generated.
-  MUST_USE_RESULT MaybeObject* CompileCustomCall(Object* object,
-                                                 JSObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name);
+  // Compiles a custom call constant/global IC.  For constant calls cell is
+  // NULL.  Returns an empty handle if there is no custom call code for the
+  // given function.
+  Handle<Code> CompileCustomCall(Handle<Object> object,
+                                 Handle<JSObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 Handle<JSFunction> function,
+                                 Handle<String> name);
 
-#define DECLARE_CALL_GENERATOR(name)                                           \
-  MUST_USE_RESULT MaybeObject* Compile##name##Call(Object* object,             \
-                                                   JSObject* holder,           \
-                                                   JSGlobalPropertyCell* cell, \
-                                                   JSFunction* function,       \
-                                                   String* fname);
+#define DECLARE_CALL_GENERATOR(name)                                    \
+  Handle<Code> Compile##name##Call(Handle<Object> object,               \
+                                   Handle<JSObject> holder,             \
+                                   Handle<JSGlobalPropertyCell> cell,   \
+                                   Handle<JSFunction> function,         \
+                                   Handle<String> fname);
   CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
 #undef DECLARE_CALL_GENERATOR
 
-  MUST_USE_RESULT MaybeObject* CompileFastApiCall(
-      const CallOptimization& optimization,
-      Object* object,
-      JSObject* holder,
-      JSGlobalPropertyCell* cell,
-      JSFunction* function,
-      String* name);
+  Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
+                                  Handle<Object> object,
+                                  Handle<JSObject> holder,
+                                  Handle<JSGlobalPropertyCell> cell,
+                                  Handle<JSFunction> function,
+                                  Handle<String> name);
 
-  const ParameterCount arguments_;
-  const Code::Kind kind_;
-  const Code::ExtraICState extra_ic_state_;
-  const InlineCacheHolderFlag cache_holder_;
+  Handle<Code> GetCode(PropertyType type, Handle<String> name);
+  Handle<Code> GetCode(Handle<JSFunction> function);
 
   const ParameterCount& arguments() { return arguments_; }
 
-  MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
+  void GenerateNameCheck(Handle<String> name, Label* miss);
 
-  // Convenience function. Calls GetCode above passing
-  // CONSTANT_FUNCTION type and the name of the given function.
-  MUST_USE_RESULT MaybeObject* GetCode(JSFunction* function);
-
-  void GenerateNameCheck(String* name, Label* miss);
-
-  void GenerateGlobalReceiverCheck(JSObject* object,
-                                   JSObject* holder,
-                                   String* name,
+  void GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<String> name,
                                    Label* miss);
 
   // Generates code to load the function from the cell checking that
   // it still contains the same function.
-  void GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                    JSFunction* function,
+  void GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,
+                                    Handle<JSFunction> function,
                                     Label* miss);
 
-  // Generates a jump to CallIC miss stub. Returns Failure if the jump cannot
-  // be generated.
-  MUST_USE_RESULT MaybeObject* GenerateMissBranch();
+  // Generates a jump to CallIC miss stub.
+  void GenerateMissBranch();
+
+  const ParameterCount arguments_;
+  const Code::Kind kind_;
+  const Code::ExtraICState extra_state_;
+  const InlineCacheHolderFlag cache_holder_;
 };
 
 
 class ConstructStubCompiler: public StubCompiler {
  public:
-  explicit ConstructStubCompiler() {}
+  explicit ConstructStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
 
-  MUST_USE_RESULT MaybeObject* CompileConstructStub(JSFunction* function);
+  Handle<Code> CompileConstructStub(Handle<JSFunction> function);
 
  private:
-  MaybeObject* GetCode();
+  Handle<Code> GetCode();
 };
 
 
@@ -846,14 +851,14 @@
  public:
   explicit CallOptimization(LookupResult* lookup);
 
-  explicit CallOptimization(JSFunction* function);
+  explicit CallOptimization(Handle<JSFunction> function);
 
   bool is_constant_call() const {
-    return constant_function_ != NULL;
+    return !constant_function_.is_null();
   }
 
-  JSFunction* constant_function() const {
-    ASSERT(constant_function_ != NULL);
+  Handle<JSFunction> constant_function() const {
+    ASSERT(is_constant_call());
     return constant_function_;
   }
 
@@ -861,32 +866,32 @@
     return is_simple_api_call_;
   }
 
-  FunctionTemplateInfo* expected_receiver_type() const {
-    ASSERT(is_simple_api_call_);
+  Handle<FunctionTemplateInfo> expected_receiver_type() const {
+    ASSERT(is_simple_api_call());
     return expected_receiver_type_;
   }
 
-  CallHandlerInfo* api_call_info() const {
-    ASSERT(is_simple_api_call_);
+  Handle<CallHandlerInfo> api_call_info() const {
+    ASSERT(is_simple_api_call());
     return api_call_info_;
   }
 
   // Returns the depth of the object having the expected type in the
   // prototype chain between the two arguments.
-  int GetPrototypeDepthOfExpectedType(JSObject* object,
-                                      JSObject* holder) const;
+  int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
+                                      Handle<JSObject> holder) const;
 
  private:
-  void Initialize(JSFunction* function);
+  void Initialize(Handle<JSFunction> function);
 
   // Determines whether the given function can be called using the
   // fast api call builtin.
-  void AnalyzePossibleApiFunction(JSFunction* function);
+  void AnalyzePossibleApiFunction(Handle<JSFunction> function);
 
-  JSFunction* constant_function_;
+  Handle<JSFunction> constant_function_;
   bool is_simple_api_call_;
-  FunctionTemplateInfo* expected_receiver_type_;
-  CallHandlerInfo* api_call_info_;
+  Handle<FunctionTemplateInfo> expected_receiver_type_;
+  Handle<CallHandlerInfo> api_call_info_;
 };
 
 
diff --git a/src/token.h b/src/token.h
index eb825c1..3036e55 100644
--- a/src/token.h
+++ b/src/token.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -73,6 +73,7 @@
   T(INIT_VAR, "=init_var", 2)  /* AST-use only. */                      \
   T(INIT_LET, "=init_let", 2)  /* AST-use only. */                      \
   T(INIT_CONST, "=init_const", 2)  /* AST-use only. */                  \
+  T(INIT_CONST_HARMONY, "=init_const_harmony", 2)  /* AST-use only. */  \
   T(ASSIGN, "=", 2)                                                     \
   T(ASSIGN_BIT_OR, "|=", 2)                                             \
   T(ASSIGN_BIT_XOR, "^=", 2)                                            \
@@ -169,6 +170,8 @@
   T(FUTURE_RESERVED_WORD, NULL, 0)                                      \
   T(FUTURE_STRICT_RESERVED_WORD, NULL, 0)                               \
   K(CONST, "const", 0)                                                  \
+  K(EXPORT, "export", 0)                                                \
+  K(IMPORT, "import", 0)                                                \
   K(LET, "let", 0)                                                      \
                                                                         \
   /* Illegal token - not able to scan. */                               \
@@ -212,10 +215,14 @@
     return EQ <= op && op <= IN;
   }
 
-  static bool IsOrderedCompareOp(Value op) {
+  static bool IsOrderedRelationalCompareOp(Value op) {
     return op == LT || op == LTE || op == GT || op == GTE;
   }
 
+  static bool IsEqualityOp(Value op) {
+    return op == EQ || op == EQ_STRICT;
+  }
+
   static Value NegateCompareOp(Value op) {
     ASSERT(IsCompareOp(op));
     switch (op) {
diff --git a/src/type-info.cc b/src/type-info.cc
index 4df7ece..159be6a 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -60,8 +60,10 @@
 
 
 TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
-                                       Handle<Context> global_context) {
+                                       Handle<Context> global_context,
+                                       Isolate* isolate) {
   global_context_ = global_context;
+  isolate_ = isolate;
   BuildDictionary(code);
   ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
 }
@@ -71,29 +73,41 @@
   int entry = dictionary_->FindEntry(ast_id);
   return entry != UnseededNumberDictionary::kNotFound
       ? Handle<Object>(dictionary_->ValueAt(entry))
-      : Isolate::Current()->factory()->undefined_value();
+      : Handle<Object>::cast(isolate_->factory()->undefined_value());
+}
+
+
+bool TypeFeedbackOracle::LoadIsUninitialized(Property* expr) {
+  Handle<Object> map_or_code = GetInfo(expr->id());
+  if (map_or_code->IsMap()) return false;
+  if (map_or_code->IsCode()) {
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    return code->is_inline_cache_stub() && code->ic_state() == UNINITIALIZED;
+  }
+  return false;
 }
 
 
 bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     return code->is_keyed_load_stub() &&
         code->ic_state() == MONOMORPHIC &&
         Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
-        code->FindFirstMap() != NULL;
+        code->FindFirstMap() != NULL &&
+        !CanRetainOtherContext(code->FindFirstMap(), *global_context_);
   }
   return false;
 }
 
 
 bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
-    Builtins* builtins = Isolate::Current()->builtins();
+    Builtins* builtins = isolate_->builtins();
     return code->is_keyed_load_stub() &&
         *code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) &&
         code->ic_state() == MEGAMORPHIC;
@@ -103,24 +117,34 @@
 
 
 bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
+    bool allow_growth =
+        Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
+        ALLOW_JSARRAY_GROWTH;
     return code->is_keyed_store_stub() &&
+        !allow_growth &&
         code->ic_state() == MONOMORPHIC &&
-        Code::ExtractTypeFromFlags(code->flags()) == NORMAL;
+        Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
+        code->FindFirstMap() != NULL &&
+        !CanRetainOtherContext(code->FindFirstMap(), *global_context_);
   }
   return false;
 }
 
 
 bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
-    Builtins* builtins = Isolate::Current()->builtins();
+    Builtins* builtins = isolate_->builtins();
+    bool allow_growth =
+        Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
+        ALLOW_JSARRAY_GROWTH;
     return code->is_keyed_store_stub() &&
+        !allow_growth &&
         *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
         *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
         code->ic_state() == MEGAMORPHIC;
@@ -131,18 +155,40 @@
 
 bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
   Handle<Object> value = GetInfo(expr->id());
-  return value->IsMap() || value->IsSmi();
+  return value->IsMap() || value->IsSmi() || value->IsJSFunction();
+}
+
+
+bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
+  Handle<Object> value = GetInfo(expr->id());
+  return value->IsJSFunction();
+}
+
+
+bool TypeFeedbackOracle::ObjectLiteralStoreIsMonomorphic(
+    ObjectLiteral::Property* prop) {
+  Handle<Object> map_or_code = GetInfo(prop->key()->id());
+  return map_or_code->IsMap();
+}
+
+
+bool TypeFeedbackOracle::IsForInFastCase(ForInStatement* stmt) {
+  Handle<Object> value = GetInfo(stmt->PrepareId());
+  return value->IsSmi() &&
+      Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker;
 }
 
 
 Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
   ASSERT(LoadIsMonomorphicNormal(expr));
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     Map* first_map = code->FindFirstMap();
     ASSERT(first_map != NULL);
-    return Handle<Map>(first_map);
+    return CanRetainOtherContext(first_map, *global_context_)
+        ? Handle<Map>::null()
+        : Handle<Map>(first_map);
   }
   return Handle<Map>::cast(map_or_code);
 }
@@ -150,10 +196,14 @@
 
 Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
   ASSERT(StoreIsMonomorphicNormal(expr));
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
-    return Handle<Map>(code->FindFirstMap());
+    Map* first_map = code->FindFirstMap();
+    ASSERT(first_map != NULL);
+    return CanRetainOtherContext(first_map, *global_context_)
+        ? Handle<Map>::null()
+        : Handle<Map>(first_map);
   }
   return Handle<Map>::cast(map_or_code);
 }
@@ -203,6 +253,7 @@
   return check;
 }
 
+
 Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
     CheckType check) {
   JSFunction* function = NULL;
@@ -225,9 +276,26 @@
 }
 
 
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
+  return Handle<JSFunction>::cast(GetInfo(expr->id()));
+}
+
+
+Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
+  return Handle<JSFunction>::cast(GetInfo(expr->id()));
+}
+
+
+Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
+    ObjectLiteral::Property* prop) {
+  ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
+  return Handle<Map>::cast(GetInfo(prop->key()->id()));
+}
+
+
 bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
   return *GetInfo(expr->id()) ==
-      Isolate::Current()->builtins()->builtin(id);
+      isolate_->builtins()->builtin(id);
 }
 
 
@@ -251,6 +319,7 @@
     case CompareIC::STRINGS:
       return TypeInfo::String();
     case CompareIC::OBJECTS:
+    case CompareIC::KNOWN_OBJECTS:
       // TODO(kasperl): We really need a type for JS objects here.
       return TypeInfo::NonPrimitive();
     case CompareIC::GENERIC:
@@ -270,6 +339,23 @@
 }
 
 
+Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
+  Handle<Object> object = GetInfo(expr->id());
+  if (!object->IsCode()) return Handle<Map>::null();
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (!code->is_compare_ic_stub()) return Handle<Map>::null();
+  CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+  if (state != CompareIC::KNOWN_OBJECTS) {
+    return Handle<Map>::null();
+  }
+  Map* first_map = code->FindFirstMap();
+  ASSERT(first_map != NULL);
+  return CanRetainOtherContext(first_map, *global_context_)
+      ? Handle<Map>::null()
+      : Handle<Map>(first_map);
+}
+
+
 TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
   Handle<Object> object = GetInfo(expr->id());
   TypeInfo unknown = TypeInfo::Unknown();
@@ -307,6 +393,10 @@
       case BinaryOpIC::SMI:
         switch (result_type) {
           case BinaryOpIC::UNINITIALIZED:
+            if (expr->op() == Token::DIV) {
+              return TypeInfo::Double();
+            }
+            return TypeInfo::Smi();
           case BinaryOpIC::SMI:
             return TypeInfo::Smi();
           case BinaryOpIC::INT32:
@@ -352,9 +442,14 @@
       return unknown;
     case CompareIC::SMIS:
       return TypeInfo::Smi();
+    case CompareIC::STRINGS:
+      return TypeInfo::String();
+    case CompareIC::SYMBOLS:
+      return TypeInfo::Symbol();
     case CompareIC::HEAP_NUMBERS:
       return TypeInfo::Number();
     case CompareIC::OBJECTS:
+    case CompareIC::KNOWN_OBJECTS:
       // TODO(kasperl): We really need a type for JS objects here.
       return TypeInfo::NonPrimitive();
     case CompareIC::GENERIC:
@@ -397,24 +492,70 @@
                                               Handle<String> name,
                                               Code::Flags flags,
                                               SmallMapList* types) {
-  Isolate* isolate = Isolate::Current();
   Handle<Object> object = GetInfo(ast_id);
   if (object->IsUndefined() || object->IsSmi()) return;
 
-  if (*object == isolate->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
+  if (*object ==
+      isolate_->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
     // TODO(fschneider): We could collect the maps and signal that
     // we need a generic store (or load) here.
     ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
   } else if (object->IsMap()) {
     types->Add(Handle<Map>::cast(object));
-  } else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
+  } else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
+      Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
     types->Reserve(4);
     ASSERT(object->IsCode());
-    isolate->stub_cache()->CollectMatchingMaps(types, *name, flags);
+    isolate_->stub_cache()->CollectMatchingMaps(types,
+                                                *name,
+                                                flags,
+                                                global_context_);
   }
 }
 
 
+// Check if a map originates from a given global context. We use this
+// information to filter out maps from different context to avoid
+// retaining objects from different tabs in Chrome via optimized code.
+bool TypeFeedbackOracle::CanRetainOtherContext(Map* map,
+                                               Context* global_context) {
+  Object* constructor = NULL;
+  while (!map->prototype()->IsNull()) {
+    constructor = map->constructor();
+    if (!constructor->IsNull()) {
+      // If the constructor is not null or a JSFunction, we have to
+      // conservatively assume that it may retain a global context.
+      if (!constructor->IsJSFunction()) return true;
+      // Check if the constructor directly references a foreign context.
+      if (CanRetainOtherContext(JSFunction::cast(constructor),
+                                global_context)) {
+        return true;
+      }
+    }
+    map = HeapObject::cast(map->prototype())->map();
+  }
+  constructor = map->constructor();
+  if (constructor->IsNull()) return false;
+  JSFunction* function = JSFunction::cast(constructor);
+  return CanRetainOtherContext(function, global_context);
+}
+
+
+bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
+                                               Context* global_context) {
+  return function->context()->global() != global_context->global()
+      && function->context()->global() != global_context->builtins();
+}
+
+
+static void AddMapIfMissing(Handle<Map> map, SmallMapList* list) {
+  for (int i = 0; i < list->length(); ++i) {
+    if (list->at(i).is_identical_to(map)) return;
+  }
+  list->Add(map);
+}
+
+
 void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
                                                    SmallMapList* types) {
   Handle<Object> object = GetInfo(ast_id);
@@ -428,7 +569,10 @@
       RelocInfo* info = it.rinfo();
       Object* object = info->target_object();
       if (object->IsMap()) {
-        types->Add(Handle<Map>(Map::cast(object)));
+        Map* map = Map::cast(object);
+        if (!CanRetainOtherContext(map, *global_context_)) {
+          AddMapIfMissing(Handle<Map>(map), types);
+        }
       }
     }
   }
@@ -452,6 +596,7 @@
   GetRelocInfos(code, &infos);
   CreateDictionary(code, &infos);
   ProcessRelocInfos(&infos);
+  ProcessTypeFeedbackCells(code);
   // Allocate handle in the parent scope.
   dictionary_ = scope.CloseAndEscape(dictionary_);
 }
@@ -469,8 +614,13 @@
 void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
                                           ZoneList<RelocInfo>* infos) {
   DisableAssertNoAllocation allocation_allowed;
+  int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
+      ? TypeFeedbackInfo::cast(code->type_feedback_info())->
+          type_feedback_cells()->CellCount()
+      : 0;
+  int length = infos->length() + cell_count;
   byte* old_start = code->instruction_start();
-  dictionary_ = FACTORY->NewUnseededNumberDictionary(infos->length());
+  dictionary_ = FACTORY->NewUnseededNumberDictionary(length);
   byte* new_start = code->instruction_start();
   RelocateRelocInfos(infos, old_start, new_start);
 }
@@ -488,49 +638,69 @@
 
 void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
   for (int i = 0; i < infos->length(); i++) {
+    RelocInfo reloc_entry = (*infos)[i];
+    Address target_address = reloc_entry.target_address();
     unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
-    Code* target = Code::GetCodeFromTargetAddress((*infos)[i].target_address());
-    ProcessTarget(ast_id, target);
+    Code* target = Code::GetCodeFromTargetAddress(target_address);
+    switch (target->kind()) {
+      case Code::LOAD_IC:
+      case Code::STORE_IC:
+      case Code::CALL_IC:
+      case Code::KEYED_CALL_IC:
+        if (target->ic_state() == MONOMORPHIC) {
+          if (target->kind() == Code::CALL_IC &&
+              target->check_type() != RECEIVER_MAP_CHECK) {
+            SetInfo(ast_id, Smi::FromInt(target->check_type()));
+          } else {
+            Object* map = target->FindFirstMap();
+            if (map == NULL) {
+              SetInfo(ast_id, static_cast<Object*>(target));
+            } else if (!CanRetainOtherContext(Map::cast(map),
+                                              *global_context_)) {
+              SetInfo(ast_id, map);
+            }
+          }
+        } else {
+          SetInfo(ast_id, target);
+        }
+        break;
+
+      case Code::KEYED_LOAD_IC:
+      case Code::KEYED_STORE_IC:
+        if (target->ic_state() == MONOMORPHIC ||
+            target->ic_state() == MEGAMORPHIC) {
+          SetInfo(ast_id, target);
+        }
+        break;
+
+      case Code::UNARY_OP_IC:
+      case Code::BINARY_OP_IC:
+      case Code::COMPARE_IC:
+      case Code::TO_BOOLEAN_IC:
+        SetInfo(ast_id, target);
+        break;
+
+      default:
+        break;
+    }
   }
 }
 
 
-void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) {
-  switch (target->kind()) {
-    case Code::LOAD_IC:
-    case Code::STORE_IC:
-    case Code::CALL_IC:
-    case Code::KEYED_CALL_IC:
-      if (target->ic_state() == MONOMORPHIC) {
-        if (target->kind() == Code::CALL_IC &&
-            target->check_type() != RECEIVER_MAP_CHECK) {
-          SetInfo(ast_id,  Smi::FromInt(target->check_type()));
-        } else {
-          Object* map = target->FindFirstMap();
-          SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map);
-        }
-      } else if (target->ic_state() == MEGAMORPHIC) {
-        SetInfo(ast_id, target);
-      }
-      break;
-
-    case Code::KEYED_LOAD_IC:
-    case Code::KEYED_STORE_IC:
-      if (target->ic_state() == MONOMORPHIC ||
-          target->ic_state() == MEGAMORPHIC) {
-        SetInfo(ast_id, target);
-      }
-      break;
-
-    case Code::UNARY_OP_IC:
-    case Code::BINARY_OP_IC:
-    case Code::COMPARE_IC:
-    case Code::TO_BOOLEAN_IC:
-      SetInfo(ast_id, target);
-      break;
-
-    default:
-      break;
+void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
+  Object* raw_info = code->type_feedback_info();
+  if (!raw_info->IsTypeFeedbackInfo()) return;
+  Handle<TypeFeedbackCells> cache(
+      TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
+  for (int i = 0; i < cache->CellCount(); i++) {
+    unsigned ast_id = cache->AstId(i)->value();
+    Object* value = cache->Cell(i)->value();
+    if (value->IsSmi() ||
+        (value->IsJSFunction() &&
+         !CanRetainOtherContext(JSFunction::cast(value),
+                                *global_context_))) {
+      SetInfo(ast_id, value);
+    }
   }
 }
 
diff --git a/src/type-info.h b/src/type-info.h
index a031740..d461331 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_TYPE_INFO_H_
 
 #include "allocation.h"
+#include "ast.h"
 #include "globals.h"
 #include "zone-inl.h"
 
@@ -64,6 +65,8 @@
   static TypeInfo Integer32() { return TypeInfo(kInteger32); }
   // We know it's a Smi.
   static TypeInfo Smi() { return TypeInfo(kSmi); }
+  // We know it's a Symbol.
+  static TypeInfo Symbol() { return TypeInfo(kSymbol); }
   // We know it's a heap number.
   static TypeInfo Double() { return TypeInfo(kDouble); }
   // We know it's a string.
@@ -137,6 +140,16 @@
     return ((type_ & kSmi) == kSmi);
   }
 
+  inline bool IsSymbol() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kSymbol) == kSymbol);
+  }
+
+  inline bool IsNonSymbol() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kSymbol) == kString);
+  }
+
   inline bool IsInteger32() {
     ASSERT(type_ != kUninitialized);
     return ((type_ & kInteger32) == kInteger32);
@@ -168,6 +181,7 @@
       case kNumber: return "Number";
       case kInteger32: return "Integer32";
       case kSmi: return "Smi";
+      case kSymbol: return "Symbol";
       case kDouble: return "Double";
       case kString: return "String";
       case kNonPrimitive: return "Object";
@@ -186,6 +200,7 @@
     kSmi = 0x17,           // 0010111
     kDouble = 0x19,        // 0011001
     kString = 0x30,        // 0110000
+    kSymbol = 0x32,        // 0110010
     kNonPrimitive = 0x40,  // 1000000
     kUninitialized = 0x7f  // 1111111
   };
@@ -205,24 +220,34 @@
 class Assignment;
 class BinaryOperation;
 class Call;
+class CallNew;
 class CaseClause;
 class CompareOperation;
 class CompilationInfo;
 class CountOperation;
+class Expression;
 class Property;
 class SmallMapList;
 class UnaryOperation;
+class ForInStatement;
 
 
 class TypeFeedbackOracle BASE_EMBEDDED {
  public:
-  TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
+  TypeFeedbackOracle(Handle<Code> code,
+                     Handle<Context> global_context,
+                     Isolate* isolate);
 
   bool LoadIsMonomorphicNormal(Property* expr);
+  bool LoadIsUninitialized(Property* expr);
   bool LoadIsMegamorphicWithTypeInfo(Property* expr);
   bool StoreIsMonomorphicNormal(Expression* expr);
   bool StoreIsMegamorphicWithTypeInfo(Expression* expr);
   bool CallIsMonomorphic(Call* expr);
+  bool CallNewIsMonomorphic(CallNew* expr);
+  bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property* prop);
+
+  bool IsForInFastCase(ForInStatement* expr);
 
   Handle<Map> LoadMonomorphicReceiverType(Property* expr);
   Handle<Map> StoreMonomorphicReceiverType(Expression* expr);
@@ -240,9 +265,18 @@
   void CollectKeyedReceiverTypes(unsigned ast_id,
                                  SmallMapList* types);
 
+  static bool CanRetainOtherContext(Map* map, Context* global_context);
+  static bool CanRetainOtherContext(JSFunction* function,
+                                    Context* global_context);
+
   CheckType GetCallCheckType(Call* expr);
   Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
 
+  Handle<JSFunction> GetCallTarget(Call* expr);
+  Handle<JSFunction> GetCallNewTarget(CallNew* expr);
+
+  Handle<Map> GetObjectLiteralStoreMap(ObjectLiteral::Property* prop);
+
   bool LoadIsBuiltin(Property* expr, Builtins::Name id);
 
   // TODO(1571) We can't use ToBooleanStub::Types as the return value because
@@ -255,6 +289,7 @@
   TypeInfo BinaryType(BinaryOperation* expr);
   TypeInfo CompareType(CompareOperation* expr);
   bool IsSymbolCompare(CompareOperation* expr);
+  Handle<Map> GetCompareMap(CompareOperation* expr);
   TypeInfo SwitchType(CaseClause* clause);
   TypeInfo IncrementType(CountOperation* expr);
 
@@ -273,13 +308,14 @@
                           byte* old_start,
                           byte* new_start);
   void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
-  void ProcessTarget(unsigned ast_id, Code* target);
+  void ProcessTypeFeedbackCells(Handle<Code> code);
 
   // Returns an element from the backing store. Returns undefined if
   // there is no information.
   Handle<Object> GetInfo(unsigned ast_id);
 
   Handle<Context> global_context_;
+  Isolate* isolate_;
   Handle<UnseededNumberDictionary> dictionary_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
diff --git a/src/unicode-inl.h b/src/unicode-inl.h
index c0649d7..9c0ebf9 100644
--- a/src/unicode-inl.h
+++ b/src/unicode-inl.h
@@ -78,7 +78,7 @@
 }
 
 
-unsigned Utf8::Encode(char* str, uchar c) {
+unsigned Utf8::Encode(char* str, uchar c, int previous) {
   static const int kMask = ~(1 << 6);
   if (c <= kMaxOneByteChar) {
     str[0] = c;
@@ -88,6 +88,13 @@
     str[1] = 0x80 | (c & kMask);
     return 2;
   } else if (c <= kMaxThreeByteChar) {
+    if (Utf16::IsTrailSurrogate(c) &&
+        Utf16::IsLeadSurrogate(previous)) {
+      const int kUnmatchedSize = kSizeOfUnmatchedSurrogate;
+      return Encode(str - kUnmatchedSize,
+                    Utf16::CombineSurrogatePair(previous, c),
+                    Utf16::kNoPreviousCharacter) - kUnmatchedSize;
+    }
     str[0] = 0xE0 | (c >> 12);
     str[1] = 0x80 | ((c >> 6) & kMask);
     str[2] = 0x80 | (c & kMask);
@@ -113,12 +120,16 @@
   return CalculateValue(bytes, length, cursor);
 }
 
-unsigned Utf8::Length(uchar c) {
+unsigned Utf8::Length(uchar c, int previous) {
   if (c <= kMaxOneByteChar) {
     return 1;
   } else if (c <= kMaxTwoByteChar) {
     return 2;
   } else if (c <= kMaxThreeByteChar) {
+    if (Utf16::IsTrailSurrogate(c) &&
+        Utf16::IsLeadSurrogate(previous)) {
+      return kSizeOfUnmatchedSurrogate - kBytesSavedByCombiningSurrogates;
+    }
     return 3;
   } else {
     return 4;
diff --git a/src/unicode.cc b/src/unicode.cc
index 6e0ac1a..14f3806 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
-// This file was generated at 2011-01-03 10:57:02.088925
+// This file was generated at 2012-03-06 09:55:58.934483
 
 #include "unicode-inl.h"
 #include <stdlib.h>
@@ -210,7 +210,7 @@
 uchar Utf8::CalculateValue(const byte* str,
                            unsigned length,
                            unsigned* cursor) {
-  // We only get called for non-ascii characters.
+  // We only get called for non-ASCII characters.
   if (length == 1) {
     *cursor += 1;
     return kBadChar;
@@ -276,6 +276,7 @@
   return kBadChar;
 }
 
+
 const byte* Utf8::ReadBlock(Buffer<const char*> str, byte* buffer,
     unsigned capacity, unsigned* chars_read_ptr, unsigned* offset_ptr) {
   unsigned offset = *offset_ptr;
@@ -286,8 +287,8 @@
   }
   const byte* data = reinterpret_cast<const byte*>(str.data());
   if (data[offset] <= kMaxOneByteChar) {
-    // The next character is an ascii char so we scan forward over
-    // the following ascii characters and return the next pure ascii
+    // The next character is an ASCII char so we scan forward over
+    // the following ASCII characters and return the next pure ASCII
     // substring
     const byte* result = data + offset;
     offset++;
@@ -297,13 +298,13 @@
     *offset_ptr = offset;
     return result;
   } else {
-    // The next character is non-ascii so we just fill the buffer
+    // The next character is non-ASCII so we just fill the buffer
     unsigned cursor = 0;
     unsigned chars_read = 0;
     while (offset < str.length()) {
       uchar c = data[offset];
       if (c <= kMaxOneByteChar) {
-        // Fast case for ascii characters
+        // Fast case for ASCII characters
         if (!CharacterStream::EncodeAsciiCharacter(c,
                                                    buffer,
                                                    capacity,
@@ -338,6 +339,16 @@
   return result;
 }
 
+unsigned CharacterStream::Utf16Length() {
+  unsigned result = 0;
+  while (has_more()) {
+    uchar c = GetNext();
+    result += c > Utf16::kMaxNonSurrogateCharCode ? 2 : 1;
+  }
+  Rewind();
+  return result;
+}
+
 void CharacterStream::Seek(unsigned position) {
   Rewind();
   for (unsigned i = 0; i < position; i++) {
@@ -347,8 +358,8 @@
 
 // Uppercase:            point.category == 'Lu'
 
-static const uint16_t kUppercaseTable0Size = 430;
-static const int32_t kUppercaseTable0[430] = {
+static const uint16_t kUppercaseTable0Size = 450;
+static const int32_t kUppercaseTable0[450] = {
   1073741889, 90, 1073742016, 214, 1073742040, 222, 256, 258,  // NOLINT
   260, 262, 264, 266, 268, 270, 272, 274,  // NOLINT
   276, 278, 280, 282, 284, 286, 288, 290,  // NOLINT
@@ -369,22 +380,24 @@
   530, 532, 534, 536, 538, 540, 542, 544,  // NOLINT
   546, 548, 550, 552, 554, 556, 558, 560,  // NOLINT
   562, 1073742394, 571, 1073742397, 574, 577, 1073742403, 582,  // NOLINT
-  584, 586, 588, 590, 902, 1073742728, 906, 908,  // NOLINT
-  1073742734, 911, 1073742737, 929, 1073742755, 939, 1073742802, 980,  // NOLINT
-  984, 986, 988, 990, 992, 994, 996, 998,  // NOLINT
-  1000, 1002, 1004, 1006, 1012, 1015, 1073742841, 1018,  // NOLINT
-  1073742845, 1071, 1120, 1122, 1124, 1126, 1128, 1130,  // NOLINT
-  1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146,  // NOLINT
-  1148, 1150, 1152, 1162, 1164, 1166, 1168, 1170,  // NOLINT
-  1172, 1174, 1176, 1178, 1180, 1182, 1184, 1186,  // NOLINT
-  1188, 1190, 1192, 1194, 1196, 1198, 1200, 1202,  // NOLINT
-  1204, 1206, 1208, 1210, 1212, 1214, 1073743040, 1217,  // NOLINT
-  1219, 1221, 1223, 1225, 1227, 1229, 1232, 1234,  // NOLINT
-  1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250,  // NOLINT
-  1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266,  // NOLINT
-  1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282,  // NOLINT
-  1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298,  // NOLINT
-  1073743153, 1366, 1073746080, 4293, 7680, 7682, 7684, 7686,  // NOLINT
+  584, 586, 588, 590, 880, 882, 886, 902,  // NOLINT
+  1073742728, 906, 908, 1073742734, 911, 1073742737, 929, 1073742755,  // NOLINT
+  939, 975, 1073742802, 980, 984, 986, 988, 990,  // NOLINT
+  992, 994, 996, 998, 1000, 1002, 1004, 1006,  // NOLINT
+  1012, 1015, 1073742841, 1018, 1073742845, 1071, 1120, 1122,  // NOLINT
+  1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138,  // NOLINT
+  1140, 1142, 1144, 1146, 1148, 1150, 1152, 1162,  // NOLINT
+  1164, 1166, 1168, 1170, 1172, 1174, 1176, 1178,  // NOLINT
+  1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194,  // NOLINT
+  1196, 1198, 1200, 1202, 1204, 1206, 1208, 1210,  // NOLINT
+  1212, 1214, 1073743040, 1217, 1219, 1221, 1223, 1225,  // NOLINT
+  1227, 1229, 1232, 1234, 1236, 1238, 1240, 1242,  // NOLINT
+  1244, 1246, 1248, 1250, 1252, 1254, 1256, 1258,  // NOLINT
+  1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274,  // NOLINT
+  1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290,  // NOLINT
+  1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306,  // NOLINT
+  1308, 1310, 1312, 1314, 1316, 1318, 1073743153, 1366,  // NOLINT
+  1073746080, 4293, 4295, 4301, 7680, 7682, 7684, 7686,  // NOLINT
   7688, 7690, 7692, 7694, 7696, 7698, 7700, 7702,  // NOLINT
   7704, 7706, 7708, 7710, 7712, 7714, 7716, 7718,  // NOLINT
   7720, 7722, 7724, 7726, 7728, 7730, 7732, 7734,  // NOLINT
@@ -393,28 +406,44 @@
   7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782,  // NOLINT
   7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798,  // NOLINT
   7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814,  // NOLINT
-  7816, 7818, 7820, 7822, 7824, 7826, 7828, 7840,  // NOLINT
-  7842, 7844, 7846, 7848, 7850, 7852, 7854, 7856,  // NOLINT
-  7858, 7860, 7862, 7864, 7866, 7868, 7870, 7872,  // NOLINT
-  7874, 7876, 7878, 7880, 7882, 7884, 7886, 7888,  // NOLINT
-  7890, 7892, 7894, 7896, 7898, 7900, 7902, 7904,  // NOLINT
-  7906, 7908, 7910, 7912, 7914, 7916, 7918, 7920,  // NOLINT
-  7922, 7924, 7926, 7928, 1073749768, 7951, 1073749784, 7965,  // NOLINT
-  1073749800, 7983, 1073749816, 7999, 1073749832, 8013, 8025, 8027,  // NOLINT
-  8029, 8031, 1073749864, 8047, 1073749944, 8123, 1073749960, 8139,  // NOLINT
-  1073749976, 8155, 1073749992, 8172, 1073750008, 8187 };  // NOLINT
-static const uint16_t kUppercaseTable1Size = 79;
-static const int32_t kUppercaseTable1[79] = {
+  7816, 7818, 7820, 7822, 7824, 7826, 7828, 7838,  // NOLINT
+  7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854,  // NOLINT
+  7856, 7858, 7860, 7862, 7864, 7866, 7868, 7870,  // NOLINT
+  7872, 7874, 7876, 7878, 7880, 7882, 7884, 7886,  // NOLINT
+  7888, 7890, 7892, 7894, 7896, 7898, 7900, 7902,  // NOLINT
+  7904, 7906, 7908, 7910, 7912, 7914, 7916, 7918,  // NOLINT
+  7920, 7922, 7924, 7926, 7928, 7930, 7932, 7934,  // NOLINT
+  1073749768, 7951, 1073749784, 7965, 1073749800, 7983, 1073749816, 7999,  // NOLINT
+  1073749832, 8013, 8025, 8027, 8029, 8031, 1073749864, 8047,  // NOLINT
+  1073749944, 8123, 1073749960, 8139, 1073749976, 8155, 1073749992, 8172,  // NOLINT
+  1073750008, 8187 };  // NOLINT
+static const uint16_t kUppercaseTable1Size = 86;
+static const int32_t kUppercaseTable1[86] = {
   258, 263, 1073742091, 269, 1073742096, 274, 277, 1073742105,  // NOLINT
   285, 292, 294, 296, 1073742122, 301, 1073742128, 307,  // NOLINT
   1073742142, 319, 325, 387, 1073744896, 3118, 3168, 1073744994,  // NOLINT
-  3172, 3175, 3177, 3179, 3189, 3200, 3202, 3204,  // NOLINT
-  3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220,  // NOLINT
-  3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236,  // NOLINT
-  3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252,  // NOLINT
-  3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268,  // NOLINT
-  3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284,  // NOLINT
-  3286, 3288, 3290, 3292, 3294, 3296, 3298 };  // NOLINT
+  3172, 3175, 3177, 3179, 1073745005, 3184, 3186, 3189,  // NOLINT
+  1073745022, 3200, 3202, 3204, 3206, 3208, 3210, 3212,  // NOLINT
+  3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228,  // NOLINT
+  3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244,  // NOLINT
+  3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260,  // NOLINT
+  3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276,  // NOLINT
+  3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292,  // NOLINT
+  3294, 3296, 3298, 3307, 3309, 3314 };  // NOLINT
+static const uint16_t kUppercaseTable5Size = 91;
+static const int32_t kUppercaseTable5[91] = {
+  1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614,  // NOLINT
+  1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630,  // NOLINT
+  1632, 1634, 1636, 1638, 1640, 1642, 1644, 1664,  // NOLINT
+  1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680,  // NOLINT
+  1682, 1684, 1686, 1826, 1828, 1830, 1832, 1834,  // NOLINT
+  1836, 1838, 1842, 1844, 1846, 1848, 1850, 1852,  // NOLINT
+  1854, 1856, 1858, 1860, 1862, 1864, 1866, 1868,  // NOLINT
+  1870, 1872, 1874, 1876, 1878, 1880, 1882, 1884,  // NOLINT
+  1886, 1888, 1890, 1892, 1894, 1896, 1898, 1900,  // NOLINT
+  1902, 1913, 1915, 1073743741, 1918, 1920, 1922, 1924,  // NOLINT
+  1926, 1931, 1933, 1936, 1938, 1952, 1954, 1956,  // NOLINT
+  1958, 1960, 1962 };  // NOLINT
 static const uint16_t kUppercaseTable7Size = 2;
 static const int32_t kUppercaseTable7[2] = {
   1073749793, 7994 };  // NOLINT
@@ -427,6 +456,9 @@
     case 1: return LookupPredicate(kUppercaseTable1,
                                        kUppercaseTable1Size,
                                        c);
+    case 5: return LookupPredicate(kUppercaseTable5,
+                                       kUppercaseTable5Size,
+                                       c);
     case 7: return LookupPredicate(kUppercaseTable7,
                                        kUppercaseTable7Size,
                                        c);
@@ -436,77 +468,93 @@
 
 // Lowercase:            point.category == 'Ll'
 
-static const uint16_t kLowercaseTable0Size = 449;
-static const int32_t kLowercaseTable0[449] = {
-  1073741921, 122, 170, 181, 186, 1073742047, 246, 1073742072,  // NOLINT
-  255, 257, 259, 261, 263, 265, 267, 269,  // NOLINT
-  271, 273, 275, 277, 279, 281, 283, 285,  // NOLINT
-  287, 289, 291, 293, 295, 297, 299, 301,  // NOLINT
-  303, 305, 307, 309, 1073742135, 312, 314, 316,  // NOLINT
-  318, 320, 322, 324, 326, 1073742152, 329, 331,  // NOLINT
-  333, 335, 337, 339, 341, 343, 345, 347,  // NOLINT
-  349, 351, 353, 355, 357, 359, 361, 363,  // NOLINT
-  365, 367, 369, 371, 373, 375, 378, 380,  // NOLINT
-  1073742206, 384, 387, 389, 392, 1073742220, 397, 402,  // NOLINT
-  405, 1073742233, 411, 414, 417, 419, 421, 424,  // NOLINT
-  1073742250, 427, 429, 432, 436, 438, 1073742265, 442,  // NOLINT
-  1073742269, 447, 454, 457, 460, 462, 464, 466,  // NOLINT
-  468, 470, 472, 474, 1073742300, 477, 479, 481,  // NOLINT
-  483, 485, 487, 489, 491, 493, 1073742319, 496,  // NOLINT
-  499, 501, 505, 507, 509, 511, 513, 515,  // NOLINT
-  517, 519, 521, 523, 525, 527, 529, 531,  // NOLINT
-  533, 535, 537, 539, 541, 543, 545, 547,  // NOLINT
-  549, 551, 553, 555, 557, 559, 561, 1073742387,  // NOLINT
-  569, 572, 1073742399, 576, 578, 583, 585, 587,  // NOLINT
-  589, 1073742415, 659, 1073742485, 687, 1073742715, 893, 912,  // NOLINT
-  1073742764, 974, 1073742800, 977, 1073742805, 983, 985, 987,  // NOLINT
-  989, 991, 993, 995, 997, 999, 1001, 1003,  // NOLINT
-  1005, 1073742831, 1011, 1013, 1016, 1073742843, 1020, 1073742896,  // NOLINT
-  1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133,  // NOLINT
-  1135, 1137, 1139, 1141, 1143, 1145, 1147, 1149,  // NOLINT
-  1151, 1153, 1163, 1165, 1167, 1169, 1171, 1173,  // NOLINT
-  1175, 1177, 1179, 1181, 1183, 1185, 1187, 1189,  // NOLINT
-  1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205,  // NOLINT
-  1207, 1209, 1211, 1213, 1215, 1218, 1220, 1222,  // NOLINT
-  1224, 1226, 1228, 1073743054, 1231, 1233, 1235, 1237,  // NOLINT
-  1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253,  // NOLINT
-  1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269,  // NOLINT
-  1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285,  // NOLINT
-  1287, 1289, 1291, 1293, 1295, 1297, 1299, 1073743201,  // NOLINT
-  1415, 1073749248, 7467, 1073749346, 7543, 1073749369, 7578, 7681,  // NOLINT
-  7683, 7685, 7687, 7689, 7691, 7693, 7695, 7697,  // NOLINT
-  7699, 7701, 7703, 7705, 7707, 7709, 7711, 7713,  // NOLINT
-  7715, 7717, 7719, 7721, 7723, 7725, 7727, 7729,  // NOLINT
-  7731, 7733, 7735, 7737, 7739, 7741, 7743, 7745,  // NOLINT
-  7747, 7749, 7751, 7753, 7755, 7757, 7759, 7761,  // NOLINT
-  7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777,  // NOLINT
-  7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793,  // NOLINT
-  7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809,  // NOLINT
-  7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825,  // NOLINT
-  7827, 1073749653, 7835, 7841, 7843, 7845, 7847, 7849,  // NOLINT
-  7851, 7853, 7855, 7857, 7859, 7861, 7863, 7865,  // NOLINT
-  7867, 7869, 7871, 7873, 7875, 7877, 7879, 7881,  // NOLINT
-  7883, 7885, 7887, 7889, 7891, 7893, 7895, 7897,  // NOLINT
-  7899, 7901, 7903, 7905, 7907, 7909, 7911, 7913,  // NOLINT
-  7915, 7917, 7919, 7921, 7923, 7925, 7927, 7929,  // NOLINT
-  1073749760, 7943, 1073749776, 7957, 1073749792, 7975, 1073749808, 7991,  // NOLINT
-  1073749824, 8005, 1073749840, 8023, 1073749856, 8039, 1073749872, 8061,  // NOLINT
-  1073749888, 8071, 1073749904, 8087, 1073749920, 8103, 1073749936, 8116,  // NOLINT
-  1073749942, 8119, 8126, 1073749954, 8132, 1073749958, 8135, 1073749968,  // NOLINT
-  8147, 1073749974, 8151, 1073749984, 8167, 1073750002, 8180, 1073750006,  // NOLINT
-  8183 };  // NOLINT
-static const uint16_t kLowercaseTable1Size = 79;
-static const int32_t kLowercaseTable1[79] = {
-  113, 127, 266, 1073742094, 271, 275, 303, 308,  // NOLINT
-  313, 1073742140, 317, 1073742150, 329, 334, 388, 1073744944,  // NOLINT
-  3166, 3169, 1073744997, 3174, 3176, 3178, 3180, 3188,  // NOLINT
-  1073745014, 3191, 3201, 3203, 3205, 3207, 3209, 3211,  // NOLINT
+static const uint16_t kLowercaseTable0Size = 463;
+static const int32_t kLowercaseTable0[463] = {
+  1073741921, 122, 181, 1073742047, 246, 1073742072, 255, 257,  // NOLINT
+  259, 261, 263, 265, 267, 269, 271, 273,  // NOLINT
+  275, 277, 279, 281, 283, 285, 287, 289,  // NOLINT
+  291, 293, 295, 297, 299, 301, 303, 305,  // NOLINT
+  307, 309, 1073742135, 312, 314, 316, 318, 320,  // NOLINT
+  322, 324, 326, 1073742152, 329, 331, 333, 335,  // NOLINT
+  337, 339, 341, 343, 345, 347, 349, 351,  // NOLINT
+  353, 355, 357, 359, 361, 363, 365, 367,  // NOLINT
+  369, 371, 373, 375, 378, 380, 1073742206, 384,  // NOLINT
+  387, 389, 392, 1073742220, 397, 402, 405, 1073742233,  // NOLINT
+  411, 414, 417, 419, 421, 424, 1073742250, 427,  // NOLINT
+  429, 432, 436, 438, 1073742265, 442, 1073742269, 447,  // NOLINT
+  454, 457, 460, 462, 464, 466, 468, 470,  // NOLINT
+  472, 474, 1073742300, 477, 479, 481, 483, 485,  // NOLINT
+  487, 489, 491, 493, 1073742319, 496, 499, 501,  // NOLINT
+  505, 507, 509, 511, 513, 515, 517, 519,  // NOLINT
+  521, 523, 525, 527, 529, 531, 533, 535,  // NOLINT
+  537, 539, 541, 543, 545, 547, 549, 551,  // NOLINT
+  553, 555, 557, 559, 561, 1073742387, 569, 572,  // NOLINT
+  1073742399, 576, 578, 583, 585, 587, 589, 1073742415,  // NOLINT
+  659, 1073742485, 687, 881, 883, 887, 1073742715, 893,  // NOLINT
+  912, 1073742764, 974, 1073742800, 977, 1073742805, 983, 985,  // NOLINT
+  987, 989, 991, 993, 995, 997, 999, 1001,  // NOLINT
+  1003, 1005, 1073742831, 1011, 1013, 1016, 1073742843, 1020,  // NOLINT
+  1073742896, 1119, 1121, 1123, 1125, 1127, 1129, 1131,  // NOLINT
+  1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147,  // NOLINT
+  1149, 1151, 1153, 1163, 1165, 1167, 1169, 1171,  // NOLINT
+  1173, 1175, 1177, 1179, 1181, 1183, 1185, 1187,  // NOLINT
+  1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203,  // NOLINT
+  1205, 1207, 1209, 1211, 1213, 1215, 1218, 1220,  // NOLINT
+  1222, 1224, 1226, 1228, 1073743054, 1231, 1233, 1235,  // NOLINT
+  1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251,  // NOLINT
+  1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267,  // NOLINT
+  1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283,  // NOLINT
+  1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299,  // NOLINT
+  1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315,  // NOLINT
+  1317, 1319, 1073743201, 1415, 1073749248, 7467, 1073749355, 7543,  // NOLINT
+  1073749369, 7578, 7681, 7683, 7685, 7687, 7689, 7691,  // NOLINT
+  7693, 7695, 7697, 7699, 7701, 7703, 7705, 7707,  // NOLINT
+  7709, 7711, 7713, 7715, 7717, 7719, 7721, 7723,  // NOLINT
+  7725, 7727, 7729, 7731, 7733, 7735, 7737, 7739,  // NOLINT
+  7741, 7743, 7745, 7747, 7749, 7751, 7753, 7755,  // NOLINT
+  7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771,  // NOLINT
+  7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787,  // NOLINT
+  7789, 7791, 7793, 7795, 7797, 7799, 7801, 7803,  // NOLINT
+  7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819,  // NOLINT
+  7821, 7823, 7825, 7827, 1073749653, 7837, 7839, 7841,  // NOLINT
+  7843, 7845, 7847, 7849, 7851, 7853, 7855, 7857,  // NOLINT
+  7859, 7861, 7863, 7865, 7867, 7869, 7871, 7873,  // NOLINT
+  7875, 7877, 7879, 7881, 7883, 7885, 7887, 7889,  // NOLINT
+  7891, 7893, 7895, 7897, 7899, 7901, 7903, 7905,  // NOLINT
+  7907, 7909, 7911, 7913, 7915, 7917, 7919, 7921,  // NOLINT
+  7923, 7925, 7927, 7929, 7931, 7933, 1073749759, 7943,  // NOLINT
+  1073749776, 7957, 1073749792, 7975, 1073749808, 7991, 1073749824, 8005,  // NOLINT
+  1073749840, 8023, 1073749856, 8039, 1073749872, 8061, 1073749888, 8071,  // NOLINT
+  1073749904, 8087, 1073749920, 8103, 1073749936, 8116, 1073749942, 8119,  // NOLINT
+  8126, 1073749954, 8132, 1073749958, 8135, 1073749968, 8147, 1073749974,  // NOLINT
+  8151, 1073749984, 8167, 1073750002, 8180, 1073750006, 8183 };  // NOLINT
+static const uint16_t kLowercaseTable1Size = 84;
+static const int32_t kLowercaseTable1[84] = {
+  266, 1073742094, 271, 275, 303, 308, 313, 1073742140,  // NOLINT
+  317, 1073742150, 329, 334, 388, 1073744944, 3166, 3169,  // NOLINT
+  1073744997, 3174, 3176, 3178, 3180, 3185, 1073745011, 3188,  // NOLINT
+  1073745014, 3195, 3201, 3203, 3205, 3207, 3209, 3211,  // NOLINT
   3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227,  // NOLINT
   3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243,  // NOLINT
   3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259,  // NOLINT
   3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275,  // NOLINT
   3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291,  // NOLINT
-  3293, 3295, 3297, 1073745123, 3300, 1073745152, 3365 };  // NOLINT
+  3293, 3295, 3297, 1073745123, 3300, 3308, 3310, 3315,  // NOLINT
+  1073745152, 3365, 3367, 3373 };  // NOLINT
+static const uint16_t kLowercaseTable5Size = 93;
+static const int32_t kLowercaseTable5[93] = {
+  1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615,  // NOLINT
+  1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631,  // NOLINT
+  1633, 1635, 1637, 1639, 1641, 1643, 1645, 1665,  // NOLINT
+  1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681,  // NOLINT
+  1683, 1685, 1687, 1827, 1829, 1831, 1833, 1835,  // NOLINT
+  1837, 1073743663, 1841, 1843, 1845, 1847, 1849, 1851,  // NOLINT
+  1853, 1855, 1857, 1859, 1861, 1863, 1865, 1867,  // NOLINT
+  1869, 1871, 1873, 1875, 1877, 1879, 1881, 1883,  // NOLINT
+  1885, 1887, 1889, 1891, 1893, 1895, 1897, 1899,  // NOLINT
+  1901, 1903, 1073743729, 1912, 1914, 1916, 1919, 1921,  // NOLINT
+  1923, 1925, 1927, 1932, 1934, 1937, 1939, 1953,  // NOLINT
+  1955, 1957, 1959, 1961, 2042 };  // NOLINT
 static const uint16_t kLowercaseTable7Size = 6;
 static const int32_t kLowercaseTable7[6] = {
   1073748736, 6918, 1073748755, 6935, 1073749825, 8026 };  // NOLINT
@@ -519,6 +567,9 @@
     case 1: return LookupPredicate(kLowercaseTable1,
                                        kLowercaseTable1Size,
                                        c);
+    case 5: return LookupPredicate(kLowercaseTable5,
+                                       kLowercaseTable5Size,
+                                       c);
     case 7: return LookupPredicate(kLowercaseTable7,
                                        kLowercaseTable7Size,
                                        c);
@@ -528,71 +579,76 @@
 
 // Letter:               point.category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl' ]
 
-static const uint16_t kLetterTable0Size = 394;
-static const int32_t kLetterTable0[394] = {
+static const uint16_t kLetterTable0Size = 435;
+static const int32_t kLetterTable0[435] = {
   1073741889, 90, 1073741921, 122, 170, 181, 186, 1073742016,  // NOLINT
   214, 1073742040, 246, 1073742072, 705, 1073742534, 721, 1073742560,  // NOLINT
-  740, 750, 1073742714, 893, 902, 1073742728, 906, 908,  // NOLINT
-  1073742734, 929, 1073742755, 974, 1073742800, 1013, 1073742839, 1153,  // NOLINT
-  1073742986, 1299, 1073743153, 1366, 1369, 1073743201, 1415, 1073743312,  // NOLINT
-  1514, 1073743344, 1522, 1073743393, 1594, 1073743424, 1610, 1073743470,  // NOLINT
-  1647, 1073743473, 1747, 1749, 1073743589, 1766, 1073743598, 1775,  // NOLINT
-  1073743610, 1788, 1791, 1808, 1073743634, 1839, 1073743693, 1901,  // NOLINT
-  1073743744, 1957, 1969, 1073743818, 2026, 1073743860, 2037, 2042,  // NOLINT
-  1073744132, 2361, 2365, 2384, 1073744216, 2401, 1073744251, 2431,  // NOLINT
-  1073744261, 2444, 1073744271, 2448, 1073744275, 2472, 1073744298, 2480,  // NOLINT
-  2482, 1073744310, 2489, 2493, 2510, 1073744348, 2525, 1073744351,  // NOLINT
-  2529, 1073744368, 2545, 1073744389, 2570, 1073744399, 2576, 1073744403,  // NOLINT
-  2600, 1073744426, 2608, 1073744434, 2611, 1073744437, 2614, 1073744440,  // NOLINT
-  2617, 1073744473, 2652, 2654, 1073744498, 2676, 1073744517, 2701,  // NOLINT
-  1073744527, 2705, 1073744531, 2728, 1073744554, 2736, 1073744562, 2739,  // NOLINT
-  1073744565, 2745, 2749, 2768, 1073744608, 2785, 1073744645, 2828,  // NOLINT
-  1073744655, 2832, 1073744659, 2856, 1073744682, 2864, 1073744690, 2867,  // NOLINT
-  1073744693, 2873, 2877, 1073744732, 2909, 1073744735, 2913, 2929,  // NOLINT
-  2947, 1073744773, 2954, 1073744782, 2960, 1073744786, 2965, 1073744793,  // NOLINT
-  2970, 2972, 1073744798, 2975, 1073744803, 2980, 1073744808, 2986,  // NOLINT
-  1073744814, 3001, 1073744901, 3084, 1073744910, 3088, 1073744914, 3112,  // NOLINT
-  1073744938, 3123, 1073744949, 3129, 1073744992, 3169, 1073745029, 3212,  // NOLINT
-  1073745038, 3216, 1073745042, 3240, 1073745066, 3251, 1073745077, 3257,  // NOLINT
-  3261, 3294, 1073745120, 3297, 1073745157, 3340, 1073745166, 3344,  // NOLINT
-  1073745170, 3368, 1073745194, 3385, 1073745248, 3425, 1073745285, 3478,  // NOLINT
-  1073745306, 3505, 1073745331, 3515, 3517, 1073745344, 3526, 1073745409,  // NOLINT
-  3632, 1073745458, 3635, 1073745472, 3654, 1073745537, 3714, 3716,  // NOLINT
-  1073745543, 3720, 3722, 3725, 1073745556, 3735, 1073745561, 3743,  // NOLINT
-  1073745569, 3747, 3749, 3751, 1073745578, 3755, 1073745581, 3760,  // NOLINT
-  1073745586, 3763, 3773, 1073745600, 3780, 3782, 1073745628, 3805,  // NOLINT
-  3840, 1073745728, 3911, 1073745737, 3946, 1073745800, 3979, 1073745920,  // NOLINT
-  4129, 1073745955, 4135, 1073745961, 4138, 1073746000, 4181, 1073746080,  // NOLINT
-  4293, 1073746128, 4346, 4348, 1073746176, 4441, 1073746271, 4514,  // NOLINT
-  1073746344, 4601, 1073746432, 4680, 1073746506, 4685, 1073746512, 4694,  // NOLINT
-  4696, 1073746522, 4701, 1073746528, 4744, 1073746570, 4749, 1073746576,  // NOLINT
-  4784, 1073746610, 4789, 1073746616, 4798, 4800, 1073746626, 4805,  // NOLINT
-  1073746632, 4822, 1073746648, 4880, 1073746706, 4885, 1073746712, 4954,  // NOLINT
-  1073746816, 5007, 1073746848, 5108, 1073746945, 5740, 1073747567, 5750,  // NOLINT
-  1073747585, 5786, 1073747616, 5866, 1073747694, 5872, 1073747712, 5900,  // NOLINT
-  1073747726, 5905, 1073747744, 5937, 1073747776, 5969, 1073747808, 5996,  // NOLINT
-  1073747822, 6000, 1073747840, 6067, 6103, 6108, 1073748000, 6263,  // NOLINT
-  1073748096, 6312, 1073748224, 6428, 1073748304, 6509, 1073748336, 6516,  // NOLINT
-  1073748352, 6569, 1073748417, 6599, 1073748480, 6678, 1073748741, 6963,  // NOLINT
-  1073748805, 6987, 1073749248, 7615, 1073749504, 7835, 1073749664, 7929,  // NOLINT
-  1073749760, 7957, 1073749784, 7965, 1073749792, 8005, 1073749832, 8013,  // NOLINT
-  1073749840, 8023, 8025, 8027, 8029, 1073749855, 8061, 1073749888,  // NOLINT
-  8116, 1073749942, 8124, 8126, 1073749954, 8132, 1073749958, 8140,  // NOLINT
-  1073749968, 8147, 1073749974, 8155, 1073749984, 8172, 1073750002, 8180,  // NOLINT
-  1073750006, 8188 };  // NOLINT
-static const uint16_t kLetterTable1Size = 84;
-static const int32_t kLetterTable1[84] = {
-  113, 127, 1073741968, 148, 258, 263, 1073742090, 275,  // NOLINT
+  740, 748, 750, 1073742704, 884, 1073742710, 887, 1073742714,  // NOLINT
+  893, 902, 1073742728, 906, 908, 1073742734, 929, 1073742755,  // NOLINT
+  1013, 1073742839, 1153, 1073742986, 1319, 1073743153, 1366, 1369,  // NOLINT
+  1073743201, 1415, 1073743312, 1514, 1073743344, 1522, 1073743392, 1610,  // NOLINT
+  1073743470, 1647, 1073743473, 1747, 1749, 1073743589, 1766, 1073743598,  // NOLINT
+  1775, 1073743610, 1788, 1791, 1808, 1073743634, 1839, 1073743693,  // NOLINT
+  1957, 1969, 1073743818, 2026, 1073743860, 2037, 2042, 1073743872,  // NOLINT
+  2069, 2074, 2084, 2088, 1073743936, 2136, 2208, 1073744034,  // NOLINT
+  2220, 1073744132, 2361, 2365, 2384, 1073744216, 2401, 1073744241,  // NOLINT
+  2423, 1073744249, 2431, 1073744261, 2444, 1073744271, 2448, 1073744275,  // NOLINT
+  2472, 1073744298, 2480, 2482, 1073744310, 2489, 2493, 2510,  // NOLINT
+  1073744348, 2525, 1073744351, 2529, 1073744368, 2545, 1073744389, 2570,  // NOLINT
+  1073744399, 2576, 1073744403, 2600, 1073744426, 2608, 1073744434, 2611,  // NOLINT
+  1073744437, 2614, 1073744440, 2617, 1073744473, 2652, 2654, 1073744498,  // NOLINT
+  2676, 1073744517, 2701, 1073744527, 2705, 1073744531, 2728, 1073744554,  // NOLINT
+  2736, 1073744562, 2739, 1073744565, 2745, 2749, 2768, 1073744608,  // NOLINT
+  2785, 1073744645, 2828, 1073744655, 2832, 1073744659, 2856, 1073744682,  // NOLINT
+  2864, 1073744690, 2867, 1073744693, 2873, 2877, 1073744732, 2909,  // NOLINT
+  1073744735, 2913, 2929, 2947, 1073744773, 2954, 1073744782, 2960,  // NOLINT
+  1073744786, 2965, 1073744793, 2970, 2972, 1073744798, 2975, 1073744803,  // NOLINT
+  2980, 1073744808, 2986, 1073744814, 3001, 3024, 1073744901, 3084,  // NOLINT
+  1073744910, 3088, 1073744914, 3112, 1073744938, 3123, 1073744949, 3129,  // NOLINT
+  3133, 1073744984, 3161, 1073744992, 3169, 1073745029, 3212, 1073745038,  // NOLINT
+  3216, 1073745042, 3240, 1073745066, 3251, 1073745077, 3257, 3261,  // NOLINT
+  3294, 1073745120, 3297, 1073745137, 3314, 1073745157, 3340, 1073745166,  // NOLINT
+  3344, 1073745170, 3386, 3389, 3406, 1073745248, 3425, 1073745274,  // NOLINT
+  3455, 1073745285, 3478, 1073745306, 3505, 1073745331, 3515, 3517,  // NOLINT
+  1073745344, 3526, 1073745409, 3632, 1073745458, 3635, 1073745472, 3654,  // NOLINT
+  1073745537, 3714, 3716, 1073745543, 3720, 3722, 3725, 1073745556,  // NOLINT
+  3735, 1073745561, 3743, 1073745569, 3747, 3749, 3751, 1073745578,  // NOLINT
+  3755, 1073745581, 3760, 1073745586, 3763, 3773, 1073745600, 3780,  // NOLINT
+  3782, 1073745628, 3807, 3840, 1073745728, 3911, 1073745737, 3948,  // NOLINT
+  1073745800, 3980, 1073745920, 4138, 4159, 1073746000, 4181, 1073746010,  // NOLINT
+  4189, 4193, 1073746021, 4198, 1073746030, 4208, 1073746037, 4225,  // NOLINT
+  4238, 1073746080, 4293, 4295, 4301, 1073746128, 4346, 1073746172,  // NOLINT
+  4680, 1073746506, 4685, 1073746512, 4694, 4696, 1073746522, 4701,  // NOLINT
+  1073746528, 4744, 1073746570, 4749, 1073746576, 4784, 1073746610, 4789,  // NOLINT
+  1073746616, 4798, 4800, 1073746626, 4805, 1073746632, 4822, 1073746648,  // NOLINT
+  4880, 1073746706, 4885, 1073746712, 4954, 1073746816, 5007, 1073746848,  // NOLINT
+  5108, 1073746945, 5740, 1073747567, 5759, 1073747585, 5786, 1073747616,  // NOLINT
+  5866, 1073747694, 5872, 1073747712, 5900, 1073747726, 5905, 1073747744,  // NOLINT
+  5937, 1073747776, 5969, 1073747808, 5996, 1073747822, 6000, 1073747840,  // NOLINT
+  6067, 6103, 6108, 1073748000, 6263, 1073748096, 6312, 6314,  // NOLINT
+  1073748144, 6389, 1073748224, 6428, 1073748304, 6509, 1073748336, 6516,  // NOLINT
+  1073748352, 6571, 1073748417, 6599, 1073748480, 6678, 1073748512, 6740,  // NOLINT
+  6823, 1073748741, 6963, 1073748805, 6987, 1073748867, 7072, 1073748910,  // NOLINT
+  7087, 1073748922, 7141, 1073748992, 7203, 1073749069, 7247, 1073749082,  // NOLINT
+  7293, 1073749225, 7404, 1073749230, 7409, 1073749237, 7414, 1073749248,  // NOLINT
+  7615, 1073749504, 7957, 1073749784, 7965, 1073749792, 8005, 1073749832,  // NOLINT
+  8013, 1073749840, 8023, 8025, 8027, 8029, 1073749855, 8061,  // NOLINT
+  1073749888, 8116, 1073749942, 8124, 8126, 1073749954, 8132, 1073749958,  // NOLINT
+  8140, 1073749968, 8147, 1073749974, 8155, 1073749984, 8172, 1073750002,  // NOLINT
+  8180, 1073750006, 8188 };  // NOLINT
+static const uint16_t kLetterTable1Size = 87;
+static const int32_t kLetterTable1[87] = {
+  113, 127, 1073741968, 156, 258, 263, 1073742090, 275,  // NOLINT
   277, 1073742105, 285, 292, 294, 296, 1073742122, 301,  // NOLINT
   1073742127, 313, 1073742140, 319, 1073742149, 329, 334, 1073742176,  // NOLINT
-  388, 1073744896, 3118, 1073744944, 3166, 1073744992, 3180, 1073745012,  // NOLINT
-  3191, 1073745024, 3300, 1073745152, 3365, 1073745200, 3429, 3439,  // NOLINT
-  1073745280, 3478, 1073745312, 3494, 1073745320, 3502, 1073745328, 3510,  // NOLINT
-  1073745336, 3518, 1073745344, 3526, 1073745352, 3534, 1073745360, 3542,  // NOLINT
-  1073745368, 3550, 1073745925, 4103, 1073745953, 4137, 1073745969, 4149,  // NOLINT
-  1073745976, 4156, 1073745985, 4246, 1073746077, 4255, 1073746081, 4346,  // NOLINT
-  1073746172, 4351, 1073746181, 4396, 1073746225, 4494, 1073746336, 4535,  // NOLINT
-  1073746416, 4607, 1073746944, 8191 };  // NOLINT
+  392, 1073744896, 3118, 1073744944, 3166, 1073744992, 3300, 1073745131,  // NOLINT
+  3310, 1073745138, 3315, 1073745152, 3365, 3367, 3373, 1073745200,  // NOLINT
+  3431, 3439, 1073745280, 3478, 1073745312, 3494, 1073745320, 3502,  // NOLINT
+  1073745328, 3510, 1073745336, 3518, 1073745344, 3526, 1073745352, 3534,  // NOLINT
+  1073745360, 3542, 1073745368, 3550, 3631, 1073745925, 4103, 1073745953,  // NOLINT
+  4137, 1073745969, 4149, 1073745976, 4156, 1073745985, 4246, 1073746077,  // NOLINT
+  4255, 1073746081, 4346, 1073746172, 4351, 1073746181, 4397, 1073746225,  // NOLINT
+  4494, 1073746336, 4538, 1073746416, 4607, 1073746944, 8191 };  // NOLINT
 static const uint16_t kLetterTable2Size = 4;
 static const int32_t kLetterTable2[4] = {
   1073741824, 3509, 1073745408, 8191 };  // NOLINT
@@ -601,23 +657,31 @@
   1073741824, 8191 };  // NOLINT
 static const uint16_t kLetterTable4Size = 2;
 static const int32_t kLetterTable4[2] = {
-  1073741824, 8123 };  // NOLINT
-static const uint16_t kLetterTable5Size = 16;
-static const int32_t kLetterTable5[16] = {
-  1073741824, 1164, 1073743639, 1818, 1073743872, 2049, 1073743875, 2053,  // NOLINT
-  1073743879, 2058, 1073743884, 2082, 1073743936, 2163, 1073744896, 8191 };  // NOLINT
-static const uint16_t kLetterTable6Size = 2;
-static const int32_t kLetterTable6[2] = {
-  1073741824, 6051 };  // NOLINT
-static const uint16_t kLetterTable7Size = 50;
-static const int32_t kLetterTable7[50] = {
-  1073748224, 6701, 1073748528, 6762, 1073748592, 6873, 1073748736, 6918,  // NOLINT
-  1073748755, 6935, 6941, 1073748767, 6952, 1073748778, 6966, 1073748792,  // NOLINT
-  6972, 6974, 1073748800, 6977, 1073748803, 6980, 1073748806, 7089,  // NOLINT
-  1073748947, 7485, 1073749328, 7567, 1073749394, 7623, 1073749488, 7675,  // NOLINT
-  1073749616, 7796, 1073749622, 7932, 1073749793, 7994, 1073749825, 8026,  // NOLINT
-  1073749862, 8126, 1073749954, 8135, 1073749962, 8143, 1073749970, 8151,  // NOLINT
-  1073749978, 8156 };  // NOLINT
+  1073741824, 8140 };  // NOLINT
+static const uint16_t kLetterTable5Size = 88;
+static const int32_t kLetterTable5[88] = {
+  1073741824, 1164, 1073743056, 1277, 1073743104, 1548, 1073743376, 1567,  // NOLINT
+  1073743402, 1579, 1073743424, 1646, 1073743487, 1687, 1073743520, 1775,  // NOLINT
+  1073743639, 1823, 1073743650, 1928, 1073743755, 1934, 1073743760, 1939,  // NOLINT
+  1073743776, 1962, 1073743864, 2049, 1073743875, 2053, 1073743879, 2058,  // NOLINT
+  1073743884, 2082, 1073743936, 2163, 1073744002, 2227, 1073744114, 2295,  // NOLINT
+  2299, 1073744138, 2341, 1073744176, 2374, 1073744224, 2428, 1073744260,  // NOLINT
+  2482, 2511, 1073744384, 2600, 1073744448, 2626, 1073744452, 2635,  // NOLINT
+  1073744480, 2678, 2682, 1073744512, 2735, 2737, 1073744565, 2742,  // NOLINT
+  1073744569, 2749, 2752, 2754, 1073744603, 2781, 1073744608, 2794,  // NOLINT
+  1073744626, 2804, 1073744641, 2822, 1073744649, 2830, 1073744657, 2838,  // NOLINT
+  1073744672, 2854, 1073744680, 2862, 1073744832, 3042, 1073744896, 8191 };  // NOLINT
+static const uint16_t kLetterTable6Size = 6;
+static const int32_t kLetterTable6[6] = {
+  1073741824, 6051, 1073747888, 6086, 1073747915, 6139 };  // NOLINT
+static const uint16_t kLetterTable7Size = 48;
+static const int32_t kLetterTable7[48] = {
+  1073748224, 6765, 1073748592, 6873, 1073748736, 6918, 1073748755, 6935,  // NOLINT
+  6941, 1073748767, 6952, 1073748778, 6966, 1073748792, 6972, 6974,  // NOLINT
+  1073748800, 6977, 1073748803, 6980, 1073748806, 7089, 1073748947, 7485,  // NOLINT
+  1073749328, 7567, 1073749394, 7623, 1073749488, 7675, 1073749616, 7796,  // NOLINT
+  1073749622, 7932, 1073749793, 7994, 1073749825, 8026, 1073749862, 8126,  // NOLINT
+  1073749954, 8135, 1073749962, 8143, 1073749970, 8151, 1073749978, 8156 };  // NOLINT
 bool Letter::Is(uchar c) {
   int chunk_index = c >> 13;
   switch (chunk_index) {
@@ -672,14 +736,19 @@
 
 // Number:               point.category == 'Nd'
 
-static const uint16_t kNumberTable0Size = 44;
-static const int32_t kNumberTable0[44] = {
+static const uint16_t kNumberTable0Size = 56;
+static const int32_t kNumberTable0[56] = {
   1073741872, 57, 1073743456, 1641, 1073743600, 1785, 1073743808, 1993,  // NOLINT
   1073744230, 2415, 1073744358, 2543, 1073744486, 2671, 1073744614, 2799,  // NOLINT
   1073744742, 2927, 1073744870, 3055, 1073744998, 3183, 1073745126, 3311,  // NOLINT
   1073745254, 3439, 1073745488, 3673, 1073745616, 3801, 1073745696, 3881,  // NOLINT
-  1073745984, 4169, 1073747936, 6121, 1073747984, 6169, 1073748294, 6479,  // NOLINT
-  1073748432, 6617, 1073748816, 7001 };  // NOLINT
+  1073745984, 4169, 1073746064, 4249, 1073747936, 6121, 1073747984, 6169,  // NOLINT
+  1073748294, 6479, 1073748432, 6617, 1073748608, 6793, 1073748624, 6809,  // NOLINT
+  1073748816, 7001, 1073748912, 7097, 1073749056, 7241, 1073749072, 7257 };  // NOLINT
+static const uint16_t kNumberTable5Size = 12;
+static const int32_t kNumberTable5[12] = {
+  1073743392, 1577, 1073744080, 2265, 1073744128, 2313, 1073744336, 2521,  // NOLINT
+  1073744464, 2649, 1073744880, 3065 };  // NOLINT
 static const uint16_t kNumberTable7Size = 2;
 static const int32_t kNumberTable7[2] = {
   1073749776, 7961 };  // NOLINT
@@ -689,6 +758,9 @@
     case 0: return LookupPredicate(kNumberTable0,
                                        kNumberTable0Size,
                                        c);
+    case 5: return LookupPredicate(kNumberTable5,
+                                       kNumberTable5Size,
+                                       c);
     case 7: return LookupPredicate(kNumberTable7,
                                        kNumberTable7Size,
                                        c);
@@ -740,44 +812,56 @@
 
 // CombiningMark:        point.category in ['Mn', 'Mc']
 
-static const uint16_t kCombiningMarkTable0Size = 205;
-static const int32_t kCombiningMarkTable0[205] = {
-  1073742592, 879, 1073742979, 1158, 1073743249, 1469, 1471, 1073743297,  // NOLINT
-  1474, 1073743300, 1477, 1479, 1073743376, 1557, 1073743435, 1630,  // NOLINT
+static const uint16_t kCombiningMarkTable0Size = 258;
+static const int32_t kCombiningMarkTable0[258] = {
+  1073742592, 879, 1073742979, 1159, 1073743249, 1469, 1471, 1073743297,  // NOLINT
+  1474, 1073743300, 1477, 1479, 1073743376, 1562, 1073743435, 1631,  // NOLINT
   1648, 1073743574, 1756, 1073743583, 1764, 1073743591, 1768, 1073743594,  // NOLINT
   1773, 1809, 1073743664, 1866, 1073743782, 1968, 1073743851, 2035,  // NOLINT
-  1073744129, 2307, 2364, 1073744190, 2381, 1073744209, 2388, 1073744226,  // NOLINT
-  2403, 1073744257, 2435, 2492, 1073744318, 2500, 1073744327, 2504,  // NOLINT
-  1073744331, 2509, 2519, 1073744354, 2531, 1073744385, 2563, 2620,  // NOLINT
-  1073744446, 2626, 1073744455, 2632, 1073744459, 2637, 1073744496, 2673,  // NOLINT
-  1073744513, 2691, 2748, 1073744574, 2757, 1073744583, 2761, 1073744587,  // NOLINT
-  2765, 1073744610, 2787, 1073744641, 2819, 2876, 1073744702, 2883,  // NOLINT
-  1073744711, 2888, 1073744715, 2893, 1073744726, 2903, 2946, 1073744830,  // NOLINT
-  3010, 1073744838, 3016, 1073744842, 3021, 3031, 1073744897, 3075,  // NOLINT
-  1073744958, 3140, 1073744966, 3144, 1073744970, 3149, 1073744981, 3158,  // NOLINT
-  1073745026, 3203, 3260, 1073745086, 3268, 1073745094, 3272, 1073745098,  // NOLINT
-  3277, 1073745109, 3286, 1073745122, 3299, 1073745154, 3331, 1073745214,  // NOLINT
-  3395, 1073745222, 3400, 1073745226, 3405, 3415, 1073745282, 3459,  // NOLINT
-  3530, 1073745359, 3540, 3542, 1073745368, 3551, 1073745394, 3571,  // NOLINT
-  3633, 1073745460, 3642, 1073745479, 3662, 3761, 1073745588, 3769,  // NOLINT
-  1073745595, 3772, 1073745608, 3789, 1073745688, 3865, 3893, 3895,  // NOLINT
-  3897, 1073745726, 3903, 1073745777, 3972, 1073745798, 3975, 1073745808,  // NOLINT
-  3991, 1073745817, 4028, 4038, 1073745964, 4146, 1073745974, 4153,  // NOLINT
-  1073746006, 4185, 4959, 1073747730, 5908, 1073747762, 5940, 1073747794,  // NOLINT
-  5971, 1073747826, 6003, 1073747894, 6099, 6109, 1073747979, 6157,  // NOLINT
-  6313, 1073748256, 6443, 1073748272, 6459, 1073748400, 6592, 1073748424,  // NOLINT
-  6601, 1073748503, 6683, 1073748736, 6916, 1073748788, 6980, 1073748843,  // NOLINT
-  7027, 1073749440, 7626, 1073749502, 7679 };  // NOLINT
-static const uint16_t kCombiningMarkTable1Size = 9;
-static const int32_t kCombiningMarkTable1[9] = {
-  1073742032, 220, 225, 1073742053, 239, 1073745962, 4143, 1073746073,  // NOLINT
-  4250 };  // NOLINT
-static const uint16_t kCombiningMarkTable5Size = 5;
-static const int32_t kCombiningMarkTable5[5] = {
-  2050, 2054, 2059, 1073743907, 2087 };  // NOLINT
+  1073743894, 2073, 1073743899, 2083, 1073743909, 2087, 1073743913, 2093,  // NOLINT
+  1073743961, 2139, 1073744100, 2302, 1073744128, 2307, 1073744186, 2364,  // NOLINT
+  1073744190, 2383, 1073744209, 2391, 1073744226, 2403, 1073744257, 2435,  // NOLINT
+  2492, 1073744318, 2500, 1073744327, 2504, 1073744331, 2509, 2519,  // NOLINT
+  1073744354, 2531, 1073744385, 2563, 2620, 1073744446, 2626, 1073744455,  // NOLINT
+  2632, 1073744459, 2637, 2641, 1073744496, 2673, 2677, 1073744513,  // NOLINT
+  2691, 2748, 1073744574, 2757, 1073744583, 2761, 1073744587, 2765,  // NOLINT
+  1073744610, 2787, 1073744641, 2819, 2876, 1073744702, 2884, 1073744711,  // NOLINT
+  2888, 1073744715, 2893, 1073744726, 2903, 1073744738, 2915, 2946,  // NOLINT
+  1073744830, 3010, 1073744838, 3016, 1073744842, 3021, 3031, 1073744897,  // NOLINT
+  3075, 1073744958, 3140, 1073744966, 3144, 1073744970, 3149, 1073744981,  // NOLINT
+  3158, 1073744994, 3171, 1073745026, 3203, 3260, 1073745086, 3268,  // NOLINT
+  1073745094, 3272, 1073745098, 3277, 1073745109, 3286, 1073745122, 3299,  // NOLINT
+  1073745154, 3331, 1073745214, 3396, 1073745222, 3400, 1073745226, 3405,  // NOLINT
+  3415, 1073745250, 3427, 1073745282, 3459, 3530, 1073745359, 3540,  // NOLINT
+  3542, 1073745368, 3551, 1073745394, 3571, 3633, 1073745460, 3642,  // NOLINT
+  1073745479, 3662, 3761, 1073745588, 3769, 1073745595, 3772, 1073745608,  // NOLINT
+  3789, 1073745688, 3865, 3893, 3895, 3897, 1073745726, 3903,  // NOLINT
+  1073745777, 3972, 1073745798, 3975, 1073745805, 3991, 1073745817, 4028,  // NOLINT
+  4038, 1073745963, 4158, 1073746006, 4185, 1073746014, 4192, 1073746018,  // NOLINT
+  4196, 1073746023, 4205, 1073746033, 4212, 1073746050, 4237, 4239,  // NOLINT
+  1073746074, 4253, 1073746781, 4959, 1073747730, 5908, 1073747762, 5940,  // NOLINT
+  1073747794, 5971, 1073747826, 6003, 1073747892, 6099, 6109, 1073747979,  // NOLINT
+  6157, 6313, 1073748256, 6443, 1073748272, 6459, 1073748400, 6592,  // NOLINT
+  1073748424, 6601, 1073748503, 6683, 1073748565, 6750, 1073748576, 6780,  // NOLINT
+  6783, 1073748736, 6916, 1073748788, 6980, 1073748843, 7027, 1073748864,  // NOLINT
+  7042, 1073748897, 7085, 1073748966, 7155, 1073749028, 7223, 1073749200,  // NOLINT
+  7378, 1073749204, 7400, 7405, 1073749234, 7412, 1073749440, 7654,  // NOLINT
+  1073749500, 7679 };  // NOLINT
+static const uint16_t kCombiningMarkTable1Size = 14;
+static const int32_t kCombiningMarkTable1[14] = {
+  1073742032, 220, 225, 1073742053, 240, 1073745135, 3313, 3455,  // NOLINT
+  1073745376, 3583, 1073745962, 4143, 1073746073, 4250 };  // NOLINT
+static const uint16_t kCombiningMarkTable5Size = 47;
+static const int32_t kCombiningMarkTable5[47] = {
+  1647, 1073743476, 1661, 1695, 1073743600, 1777, 2050, 2054,  // NOLINT
+  2059, 1073743907, 2087, 1073744000, 2177, 1073744052, 2244, 1073744096,  // NOLINT
+  2289, 1073744166, 2349, 1073744199, 2387, 1073744256, 2435, 1073744307,  // NOLINT
+  2496, 1073744425, 2614, 2627, 1073744460, 2637, 2683, 2736,  // NOLINT
+  1073744562, 2740, 1073744567, 2744, 1073744574, 2751, 2753, 1073744619,  // NOLINT
+  2799, 1073744629, 2806, 1073744867, 3050, 1073744876, 3053 };  // NOLINT
 static const uint16_t kCombiningMarkTable7Size = 5;
 static const int32_t kCombiningMarkTable7[5] = {
-  6942, 1073749504, 7695, 1073749536, 7715 };  // NOLINT
+  6942, 1073749504, 7695, 1073749536, 7718 };  // NOLINT
 bool CombiningMark::Is(uchar c) {
   int chunk_index = c >> 13;
   switch (chunk_index) {
@@ -826,8 +910,8 @@
 
 static const MultiCharacterSpecialCase<2> kToLowercaseMultiStrings0[2] = {  // NOLINT
   {{105, 775}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable0Size = 463;  // NOLINT
-static const int32_t kToLowercaseTable0[926] = {
+static const uint16_t kToLowercaseTable0Size = 483;  // NOLINT
+static const int32_t kToLowercaseTable0[966] = {
   1073741889, 128, 90, 128, 1073742016, 128, 214, 128, 1073742040, 128, 222, 128, 256, 4, 258, 4,  // NOLINT
   260, 4, 262, 4, 264, 4, 266, 4, 268, 4, 270, 4, 272, 4, 274, 4,  // NOLINT
   276, 4, 278, 4, 280, 4, 282, 4, 284, 4, 286, 4, 288, 4, 290, 4,  // NOLINT
@@ -850,22 +934,24 @@
   542, 4, 544, -520, 546, 4, 548, 4, 550, 4, 552, 4, 554, 4, 556, 4,  // NOLINT
   558, 4, 560, 4, 562, 4, 570, 43180, 571, 4, 573, -652, 574, 43168, 577, 4,  // NOLINT
   579, -780, 580, 276, 581, 284, 582, 4, 584, 4, 586, 4, 588, 4, 590, 4,  // NOLINT
-  902, 152, 1073742728, 148, 906, 148, 908, 256, 1073742734, 252, 911, 252, 1073742737, 128, 929, 128,  // NOLINT
-  931, 6, 1073742756, 128, 939, 128, 984, 4, 986, 4, 988, 4, 990, 4, 992, 4,  // NOLINT
-  994, 4, 996, 4, 998, 4, 1000, 4, 1002, 4, 1004, 4, 1006, 4, 1012, -240,  // NOLINT
-  1015, 4, 1017, -28, 1018, 4, 1073742845, -520, 1023, -520, 1073742848, 320, 1039, 320, 1073742864, 128,  // NOLINT
-  1071, 128, 1120, 4, 1122, 4, 1124, 4, 1126, 4, 1128, 4, 1130, 4, 1132, 4,  // NOLINT
-  1134, 4, 1136, 4, 1138, 4, 1140, 4, 1142, 4, 1144, 4, 1146, 4, 1148, 4,  // NOLINT
-  1150, 4, 1152, 4, 1162, 4, 1164, 4, 1166, 4, 1168, 4, 1170, 4, 1172, 4,  // NOLINT
-  1174, 4, 1176, 4, 1178, 4, 1180, 4, 1182, 4, 1184, 4, 1186, 4, 1188, 4,  // NOLINT
-  1190, 4, 1192, 4, 1194, 4, 1196, 4, 1198, 4, 1200, 4, 1202, 4, 1204, 4,  // NOLINT
-  1206, 4, 1208, 4, 1210, 4, 1212, 4, 1214, 4, 1216, 60, 1217, 4, 1219, 4,  // NOLINT
-  1221, 4, 1223, 4, 1225, 4, 1227, 4, 1229, 4, 1232, 4, 1234, 4, 1236, 4,  // NOLINT
-  1238, 4, 1240, 4, 1242, 4, 1244, 4, 1246, 4, 1248, 4, 1250, 4, 1252, 4,  // NOLINT
-  1254, 4, 1256, 4, 1258, 4, 1260, 4, 1262, 4, 1264, 4, 1266, 4, 1268, 4,  // NOLINT
-  1270, 4, 1272, 4, 1274, 4, 1276, 4, 1278, 4, 1280, 4, 1282, 4, 1284, 4,  // NOLINT
-  1286, 4, 1288, 4, 1290, 4, 1292, 4, 1294, 4, 1296, 4, 1298, 4, 1073743153, 192,  // NOLINT
-  1366, 192, 1073746080, 29056, 4293, 29056, 7680, 4, 7682, 4, 7684, 4, 7686, 4, 7688, 4,  // NOLINT
+  880, 4, 882, 4, 886, 4, 902, 152, 1073742728, 148, 906, 148, 908, 256, 1073742734, 252,  // NOLINT
+  911, 252, 1073742737, 128, 929, 128, 931, 6, 1073742756, 128, 939, 128, 975, 32, 984, 4,  // NOLINT
+  986, 4, 988, 4, 990, 4, 992, 4, 994, 4, 996, 4, 998, 4, 1000, 4,  // NOLINT
+  1002, 4, 1004, 4, 1006, 4, 1012, -240, 1015, 4, 1017, -28, 1018, 4, 1073742845, -520,  // NOLINT
+  1023, -520, 1073742848, 320, 1039, 320, 1073742864, 128, 1071, 128, 1120, 4, 1122, 4, 1124, 4,  // NOLINT
+  1126, 4, 1128, 4, 1130, 4, 1132, 4, 1134, 4, 1136, 4, 1138, 4, 1140, 4,  // NOLINT
+  1142, 4, 1144, 4, 1146, 4, 1148, 4, 1150, 4, 1152, 4, 1162, 4, 1164, 4,  // NOLINT
+  1166, 4, 1168, 4, 1170, 4, 1172, 4, 1174, 4, 1176, 4, 1178, 4, 1180, 4,  // NOLINT
+  1182, 4, 1184, 4, 1186, 4, 1188, 4, 1190, 4, 1192, 4, 1194, 4, 1196, 4,  // NOLINT
+  1198, 4, 1200, 4, 1202, 4, 1204, 4, 1206, 4, 1208, 4, 1210, 4, 1212, 4,  // NOLINT
+  1214, 4, 1216, 60, 1217, 4, 1219, 4, 1221, 4, 1223, 4, 1225, 4, 1227, 4,  // NOLINT
+  1229, 4, 1232, 4, 1234, 4, 1236, 4, 1238, 4, 1240, 4, 1242, 4, 1244, 4,  // NOLINT
+  1246, 4, 1248, 4, 1250, 4, 1252, 4, 1254, 4, 1256, 4, 1258, 4, 1260, 4,  // NOLINT
+  1262, 4, 1264, 4, 1266, 4, 1268, 4, 1270, 4, 1272, 4, 1274, 4, 1276, 4,  // NOLINT
+  1278, 4, 1280, 4, 1282, 4, 1284, 4, 1286, 4, 1288, 4, 1290, 4, 1292, 4,  // NOLINT
+  1294, 4, 1296, 4, 1298, 4, 1300, 4, 1302, 4, 1304, 4, 1306, 4, 1308, 4,  // NOLINT
+  1310, 4, 1312, 4, 1314, 4, 1316, 4, 1318, 4, 1073743153, 192, 1366, 192, 1073746080, 29056,  // NOLINT
+  4293, 29056, 4295, 29056, 4301, 29056, 7680, 4, 7682, 4, 7684, 4, 7686, 4, 7688, 4,  // NOLINT
   7690, 4, 7692, 4, 7694, 4, 7696, 4, 7698, 4, 7700, 4, 7702, 4, 7704, 4,  // NOLINT
   7706, 4, 7708, 4, 7710, 4, 7712, 4, 7714, 4, 7716, 4, 7718, 4, 7720, 4,  // NOLINT
   7722, 4, 7724, 4, 7726, 4, 7728, 4, 7730, 4, 7732, 4, 7734, 4, 7736, 4,  // NOLINT
@@ -874,33 +960,52 @@
   7770, 4, 7772, 4, 7774, 4, 7776, 4, 7778, 4, 7780, 4, 7782, 4, 7784, 4,  // NOLINT
   7786, 4, 7788, 4, 7790, 4, 7792, 4, 7794, 4, 7796, 4, 7798, 4, 7800, 4,  // NOLINT
   7802, 4, 7804, 4, 7806, 4, 7808, 4, 7810, 4, 7812, 4, 7814, 4, 7816, 4,  // NOLINT
-  7818, 4, 7820, 4, 7822, 4, 7824, 4, 7826, 4, 7828, 4, 7840, 4, 7842, 4,  // NOLINT
-  7844, 4, 7846, 4, 7848, 4, 7850, 4, 7852, 4, 7854, 4, 7856, 4, 7858, 4,  // NOLINT
-  7860, 4, 7862, 4, 7864, 4, 7866, 4, 7868, 4, 7870, 4, 7872, 4, 7874, 4,  // NOLINT
-  7876, 4, 7878, 4, 7880, 4, 7882, 4, 7884, 4, 7886, 4, 7888, 4, 7890, 4,  // NOLINT
-  7892, 4, 7894, 4, 7896, 4, 7898, 4, 7900, 4, 7902, 4, 7904, 4, 7906, 4,  // NOLINT
-  7908, 4, 7910, 4, 7912, 4, 7914, 4, 7916, 4, 7918, 4, 7920, 4, 7922, 4,  // NOLINT
-  7924, 4, 7926, 4, 7928, 4, 1073749768, -32, 7951, -32, 1073749784, -32, 7965, -32, 1073749800, -32,  // NOLINT
-  7983, -32, 1073749816, -32, 7999, -32, 1073749832, -32, 8013, -32, 8025, -32, 8027, -32, 8029, -32,  // NOLINT
-  8031, -32, 1073749864, -32, 8047, -32, 1073749896, -32, 8079, -32, 1073749912, -32, 8095, -32, 1073749928, -32,  // NOLINT
-  8111, -32, 1073749944, -32, 8121, -32, 1073749946, -296, 8123, -296, 8124, -36, 1073749960, -344, 8139, -344,  // NOLINT
-  8140, -36, 1073749976, -32, 8153, -32, 1073749978, -400, 8155, -400, 1073749992, -32, 8169, -32, 1073749994, -448,  // NOLINT
-  8171, -448, 8172, -28, 1073750008, -512, 8185, -512, 1073750010, -504, 8187, -504, 8188, -36 };  // NOLINT
+  7818, 4, 7820, 4, 7822, 4, 7824, 4, 7826, 4, 7828, 4, 7838, -30460, 7840, 4,  // NOLINT
+  7842, 4, 7844, 4, 7846, 4, 7848, 4, 7850, 4, 7852, 4, 7854, 4, 7856, 4,  // NOLINT
+  7858, 4, 7860, 4, 7862, 4, 7864, 4, 7866, 4, 7868, 4, 7870, 4, 7872, 4,  // NOLINT
+  7874, 4, 7876, 4, 7878, 4, 7880, 4, 7882, 4, 7884, 4, 7886, 4, 7888, 4,  // NOLINT
+  7890, 4, 7892, 4, 7894, 4, 7896, 4, 7898, 4, 7900, 4, 7902, 4, 7904, 4,  // NOLINT
+  7906, 4, 7908, 4, 7910, 4, 7912, 4, 7914, 4, 7916, 4, 7918, 4, 7920, 4,  // NOLINT
+  7922, 4, 7924, 4, 7926, 4, 7928, 4, 7930, 4, 7932, 4, 7934, 4, 1073749768, -32,  // NOLINT
+  7951, -32, 1073749784, -32, 7965, -32, 1073749800, -32, 7983, -32, 1073749816, -32, 7999, -32, 1073749832, -32,  // NOLINT
+  8013, -32, 8025, -32, 8027, -32, 8029, -32, 8031, -32, 1073749864, -32, 8047, -32, 1073749896, -32,  // NOLINT
+  8079, -32, 1073749912, -32, 8095, -32, 1073749928, -32, 8111, -32, 1073749944, -32, 8121, -32, 1073749946, -296,  // NOLINT
+  8123, -296, 8124, -36, 1073749960, -344, 8139, -344, 8140, -36, 1073749976, -32, 8153, -32, 1073749978, -400,  // NOLINT
+  8155, -400, 1073749992, -32, 8169, -32, 1073749994, -448, 8171, -448, 8172, -28, 1073750008, -512, 8185, -512,  // NOLINT
+  1073750010, -504, 8187, -504, 8188, -36 };  // NOLINT
 static const uint16_t kToLowercaseMultiStrings0Size = 2;  // NOLINT
 static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings1[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable1Size = 69;  // NOLINT
-static const int32_t kToLowercaseTable1[138] = {
+static const uint16_t kToLowercaseTable1Size = 79;  // NOLINT
+static const int32_t kToLowercaseTable1[158] = {
   294, -30068, 298, -33532, 299, -33048, 306, 112, 1073742176, 64, 367, 64, 387, 4, 1073743030, 104,  // NOLINT
   1231, 104, 1073744896, 192, 3118, 192, 3168, 4, 3170, -42972, 3171, -15256, 3172, -42908, 3175, 4,  // NOLINT
-  3177, 4, 3179, 4, 3189, 4, 3200, 4, 3202, 4, 3204, 4, 3206, 4, 3208, 4,  // NOLINT
-  3210, 4, 3212, 4, 3214, 4, 3216, 4, 3218, 4, 3220, 4, 3222, 4, 3224, 4,  // NOLINT
-  3226, 4, 3228, 4, 3230, 4, 3232, 4, 3234, 4, 3236, 4, 3238, 4, 3240, 4,  // NOLINT
-  3242, 4, 3244, 4, 3246, 4, 3248, 4, 3250, 4, 3252, 4, 3254, 4, 3256, 4,  // NOLINT
-  3258, 4, 3260, 4, 3262, 4, 3264, 4, 3266, 4, 3268, 4, 3270, 4, 3272, 4,  // NOLINT
-  3274, 4, 3276, 4, 3278, 4, 3280, 4, 3282, 4, 3284, 4, 3286, 4, 3288, 4,  // NOLINT
-  3290, 4, 3292, 4, 3294, 4, 3296, 4, 3298, 4 };  // NOLINT
+  3177, 4, 3179, 4, 3181, -43120, 3182, -42996, 3183, -43132, 3184, -43128, 3186, 4, 3189, 4,  // NOLINT
+  1073745022, -43260, 3199, -43260, 3200, 4, 3202, 4, 3204, 4, 3206, 4, 3208, 4, 3210, 4,  // NOLINT
+  3212, 4, 3214, 4, 3216, 4, 3218, 4, 3220, 4, 3222, 4, 3224, 4, 3226, 4,  // NOLINT
+  3228, 4, 3230, 4, 3232, 4, 3234, 4, 3236, 4, 3238, 4, 3240, 4, 3242, 4,  // NOLINT
+  3244, 4, 3246, 4, 3248, 4, 3250, 4, 3252, 4, 3254, 4, 3256, 4, 3258, 4,  // NOLINT
+  3260, 4, 3262, 4, 3264, 4, 3266, 4, 3268, 4, 3270, 4, 3272, 4, 3274, 4,  // NOLINT
+  3276, 4, 3278, 4, 3280, 4, 3282, 4, 3284, 4, 3286, 4, 3288, 4, 3290, 4,  // NOLINT
+  3292, 4, 3294, 4, 3296, 4, 3298, 4, 3307, 4, 3309, 4, 3314, 4 };  // NOLINT
 static const uint16_t kToLowercaseMultiStrings1Size = 1;  // NOLINT
+static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings5[1] = {  // NOLINT
+  {{kSentinel}} }; // NOLINT
+static const uint16_t kToLowercaseTable5Size = 91;  // NOLINT
+static const int32_t kToLowercaseTable5[182] = {
+  1600, 4, 1602, 4, 1604, 4, 1606, 4, 1608, 4, 1610, 4, 1612, 4, 1614, 4,  // NOLINT
+  1616, 4, 1618, 4, 1620, 4, 1622, 4, 1624, 4, 1626, 4, 1628, 4, 1630, 4,  // NOLINT
+  1632, 4, 1634, 4, 1636, 4, 1638, 4, 1640, 4, 1642, 4, 1644, 4, 1664, 4,  // NOLINT
+  1666, 4, 1668, 4, 1670, 4, 1672, 4, 1674, 4, 1676, 4, 1678, 4, 1680, 4,  // NOLINT
+  1682, 4, 1684, 4, 1686, 4, 1826, 4, 1828, 4, 1830, 4, 1832, 4, 1834, 4,  // NOLINT
+  1836, 4, 1838, 4, 1842, 4, 1844, 4, 1846, 4, 1848, 4, 1850, 4, 1852, 4,  // NOLINT
+  1854, 4, 1856, 4, 1858, 4, 1860, 4, 1862, 4, 1864, 4, 1866, 4, 1868, 4,  // NOLINT
+  1870, 4, 1872, 4, 1874, 4, 1876, 4, 1878, 4, 1880, 4, 1882, 4, 1884, 4,  // NOLINT
+  1886, 4, 1888, 4, 1890, 4, 1892, 4, 1894, 4, 1896, 4, 1898, 4, 1900, 4,  // NOLINT
+  1902, 4, 1913, 4, 1915, 4, 1917, -141328, 1918, 4, 1920, 4, 1922, 4, 1924, 4,  // NOLINT
+  1926, 4, 1931, 4, 1933, -169120, 1936, 4, 1938, 4, 1952, 4, 1954, 4, 1956, 4,  // NOLINT
+  1958, 4, 1960, 4, 1962, -169232 };  // NOLINT
+static const uint16_t kToLowercaseMultiStrings5Size = 1;  // NOLINT
 static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings7[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
 static const uint16_t kToLowercaseTable7Size = 2;  // NOLINT
@@ -927,6 +1032,13 @@
                                            n,
                                            result,
                                            allow_caching_ptr);
+    case 5: return LookupMapping<true>(kToLowercaseTable5,
+                                           kToLowercaseTable5Size,
+                                           kToLowercaseMultiStrings5,
+                                           c,
+                                           n,
+                                           result,
+                                           allow_caching_ptr);
     case 7: return LookupMapping<true>(kToLowercaseTable7,
                                            kToLowercaseTable7Size,
                                            kToLowercaseMultiStrings7,
@@ -955,8 +1067,8 @@
   {{933, 776, 768}}, {{929, 787, kSentinel}}, {{933, 834, kSentinel}}, {{933, 776, 834}},  // NOLINT
   {{8186, 921, kSentinel}}, {{937, 921, kSentinel}}, {{911, 921, kSentinel}}, {{937, 834, kSentinel}},  // NOLINT
   {{937, 834, 921}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable0Size = 554;  // NOLINT
-static const int32_t kToUppercaseTable0[1108] = {
+static const uint16_t kToUppercaseTable0Size = 580;  // NOLINT
+static const int32_t kToUppercaseTable0[1160] = {
   1073741921, -128, 122, -128, 181, 2972, 223, 1, 1073742048, -128, 246, -128, 1073742072, -128, 254, -128,  // NOLINT
   255, 484, 257, -4, 259, -4, 261, -4, 263, -4, 265, -4, 267, -4, 269, -4,  // NOLINT
   271, -4, 273, -4, 275, -4, 277, -4, 279, -4, 281, -4, 283, -4, 285, -4,  // NOLINT
@@ -976,72 +1088,92 @@
   517, -4, 519, -4, 521, -4, 523, -4, 525, -4, 527, -4, 529, -4, 531, -4,  // NOLINT
   533, -4, 535, -4, 537, -4, 539, -4, 541, -4, 543, -4, 547, -4, 549, -4,  // NOLINT
   551, -4, 553, -4, 555, -4, 557, -4, 559, -4, 561, -4, 563, -4, 572, -4,  // NOLINT
-  578, -4, 583, -4, 585, -4, 587, -4, 589, -4, 591, -4, 595, -840, 596, -824,  // NOLINT
-  1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820, 611, -828, 616, -836, 617, -844,  // NOLINT
-  619, 42972, 623, -844, 626, -852, 629, -856, 637, 42908, 640, -872, 643, -872, 648, -872,  // NOLINT
-  649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876, 837, 336, 1073742715, 520, 893, 520,  // NOLINT
-  912, 13, 940, -152, 1073742765, -148, 943, -148, 944, 17, 1073742769, -128, 961, -128, 962, -124,  // NOLINT
-  1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252, 976, -248, 977, -228, 981, -188,  // NOLINT
-  982, -216, 985, -4, 987, -4, 989, -4, 991, -4, 993, -4, 995, -4, 997, -4,  // NOLINT
-  999, -4, 1001, -4, 1003, -4, 1005, -4, 1007, -4, 1008, -344, 1009, -320, 1010, 28,  // NOLINT
-  1013, -384, 1016, -4, 1019, -4, 1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320, 1121, -4,  // NOLINT
-  1123, -4, 1125, -4, 1127, -4, 1129, -4, 1131, -4, 1133, -4, 1135, -4, 1137, -4,  // NOLINT
-  1139, -4, 1141, -4, 1143, -4, 1145, -4, 1147, -4, 1149, -4, 1151, -4, 1153, -4,  // NOLINT
-  1163, -4, 1165, -4, 1167, -4, 1169, -4, 1171, -4, 1173, -4, 1175, -4, 1177, -4,  // NOLINT
-  1179, -4, 1181, -4, 1183, -4, 1185, -4, 1187, -4, 1189, -4, 1191, -4, 1193, -4,  // NOLINT
-  1195, -4, 1197, -4, 1199, -4, 1201, -4, 1203, -4, 1205, -4, 1207, -4, 1209, -4,  // NOLINT
-  1211, -4, 1213, -4, 1215, -4, 1218, -4, 1220, -4, 1222, -4, 1224, -4, 1226, -4,  // NOLINT
-  1228, -4, 1230, -4, 1231, -60, 1233, -4, 1235, -4, 1237, -4, 1239, -4, 1241, -4,  // NOLINT
-  1243, -4, 1245, -4, 1247, -4, 1249, -4, 1251, -4, 1253, -4, 1255, -4, 1257, -4,  // NOLINT
-  1259, -4, 1261, -4, 1263, -4, 1265, -4, 1267, -4, 1269, -4, 1271, -4, 1273, -4,  // NOLINT
-  1275, -4, 1277, -4, 1279, -4, 1281, -4, 1283, -4, 1285, -4, 1287, -4, 1289, -4,  // NOLINT
-  1291, -4, 1293, -4, 1295, -4, 1297, -4, 1299, -4, 1073743201, -192, 1414, -192, 1415, 21,  // NOLINT
-  7549, 15256, 7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4,  // NOLINT
-  7695, -4, 7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4,  // NOLINT
-  7711, -4, 7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4,  // NOLINT
-  7727, -4, 7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4,  // NOLINT
-  7743, -4, 7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4,  // NOLINT
-  7759, -4, 7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4,  // NOLINT
-  7775, -4, 7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4,  // NOLINT
-  7791, -4, 7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4,  // NOLINT
-  7807, -4, 7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4,  // NOLINT
-  7823, -4, 7825, -4, 7827, -4, 7829, -4, 7830, 25, 7831, 29, 7832, 33, 7833, 37,  // NOLINT
-  7834, 41, 7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4, 7849, -4, 7851, -4,  // NOLINT
-  7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4, 7865, -4, 7867, -4,  // NOLINT
-  7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4, 7881, -4, 7883, -4,  // NOLINT
-  7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4, 7897, -4, 7899, -4,  // NOLINT
-  7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4, 7913, -4, 7915, -4,  // NOLINT
-  7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4, 7929, -4, 1073749760, 32,  // NOLINT
-  7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32, 1073749808, 32, 7991, 32, 1073749824, 32,  // NOLINT
-  8005, 32, 8016, 45, 8017, 32, 8018, 49, 8019, 32, 8020, 53, 8021, 32, 8022, 57,  // NOLINT
-  8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344, 1073749878, 400,  // NOLINT
-  8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504, 8064, 61,  // NOLINT
-  8065, 65, 8066, 69, 8067, 73, 8068, 77, 8069, 81, 8070, 85, 8071, 89, 8072, 61,  // NOLINT
-  8073, 65, 8074, 69, 8075, 73, 8076, 77, 8077, 81, 8078, 85, 8079, 89, 8080, 93,  // NOLINT
-  8081, 97, 8082, 101, 8083, 105, 8084, 109, 8085, 113, 8086, 117, 8087, 121, 8088, 93,  // NOLINT
-  8089, 97, 8090, 101, 8091, 105, 8092, 109, 8093, 113, 8094, 117, 8095, 121, 8096, 125,  // NOLINT
-  8097, 129, 8098, 133, 8099, 137, 8100, 141, 8101, 145, 8102, 149, 8103, 153, 8104, 125,  // NOLINT
-  8105, 129, 8106, 133, 8107, 137, 8108, 141, 8109, 145, 8110, 149, 8111, 153, 1073749936, 32,  // NOLINT
-  8113, 32, 8114, 157, 8115, 161, 8116, 165, 8118, 169, 8119, 173, 8124, 161, 8126, -28820,  // NOLINT
-  8130, 177, 8131, 181, 8132, 185, 8134, 189, 8135, 193, 8140, 181, 1073749968, 32, 8145, 32,  // NOLINT
-  8146, 197, 8147, 13, 8150, 201, 8151, 205, 1073749984, 32, 8161, 32, 8162, 209, 8163, 17,  // NOLINT
-  8164, 213, 8165, 28, 8166, 217, 8167, 221, 8178, 225, 8179, 229, 8180, 233, 8182, 237,  // NOLINT
-  8183, 241, 8188, 229 };  // NOLINT
+  1073742399, 43260, 576, 43260, 578, -4, 583, -4, 585, -4, 587, -4, 589, -4, 591, -4,  // NOLINT
+  592, 43132, 593, 43120, 594, 43128, 595, -840, 596, -824, 1073742422, -820, 599, -820, 601, -808,  // NOLINT
+  603, -812, 608, -820, 611, -828, 613, 169120, 614, 169232, 616, -836, 617, -844, 619, 42972,  // NOLINT
+  623, -844, 625, 42996, 626, -852, 629, -856, 637, 42908, 640, -872, 643, -872, 648, -872,  // NOLINT
+  649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876, 837, 336, 881, -4, 883, -4,  // NOLINT
+  887, -4, 1073742715, 520, 893, 520, 912, 13, 940, -152, 1073742765, -148, 943, -148, 944, 17,  // NOLINT
+  1073742769, -128, 961, -128, 962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252,  // NOLINT
+  976, -248, 977, -228, 981, -188, 982, -216, 983, -32, 985, -4, 987, -4, 989, -4,  // NOLINT
+  991, -4, 993, -4, 995, -4, 997, -4, 999, -4, 1001, -4, 1003, -4, 1005, -4,  // NOLINT
+  1007, -4, 1008, -344, 1009, -320, 1010, 28, 1013, -384, 1016, -4, 1019, -4, 1073742896, -128,  // NOLINT
+  1103, -128, 1073742928, -320, 1119, -320, 1121, -4, 1123, -4, 1125, -4, 1127, -4, 1129, -4,  // NOLINT
+  1131, -4, 1133, -4, 1135, -4, 1137, -4, 1139, -4, 1141, -4, 1143, -4, 1145, -4,  // NOLINT
+  1147, -4, 1149, -4, 1151, -4, 1153, -4, 1163, -4, 1165, -4, 1167, -4, 1169, -4,  // NOLINT
+  1171, -4, 1173, -4, 1175, -4, 1177, -4, 1179, -4, 1181, -4, 1183, -4, 1185, -4,  // NOLINT
+  1187, -4, 1189, -4, 1191, -4, 1193, -4, 1195, -4, 1197, -4, 1199, -4, 1201, -4,  // NOLINT
+  1203, -4, 1205, -4, 1207, -4, 1209, -4, 1211, -4, 1213, -4, 1215, -4, 1218, -4,  // NOLINT
+  1220, -4, 1222, -4, 1224, -4, 1226, -4, 1228, -4, 1230, -4, 1231, -60, 1233, -4,  // NOLINT
+  1235, -4, 1237, -4, 1239, -4, 1241, -4, 1243, -4, 1245, -4, 1247, -4, 1249, -4,  // NOLINT
+  1251, -4, 1253, -4, 1255, -4, 1257, -4, 1259, -4, 1261, -4, 1263, -4, 1265, -4,  // NOLINT
+  1267, -4, 1269, -4, 1271, -4, 1273, -4, 1275, -4, 1277, -4, 1279, -4, 1281, -4,  // NOLINT
+  1283, -4, 1285, -4, 1287, -4, 1289, -4, 1291, -4, 1293, -4, 1295, -4, 1297, -4,  // NOLINT
+  1299, -4, 1301, -4, 1303, -4, 1305, -4, 1307, -4, 1309, -4, 1311, -4, 1313, -4,  // NOLINT
+  1315, -4, 1317, -4, 1319, -4, 1073743201, -192, 1414, -192, 1415, 21, 7545, 141328, 7549, 15256,  // NOLINT
+  7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4, 7695, -4,  // NOLINT
+  7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4, 7711, -4,  // NOLINT
+  7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4, 7727, -4,  // NOLINT
+  7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4, 7743, -4,  // NOLINT
+  7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4, 7759, -4,  // NOLINT
+  7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4, 7775, -4,  // NOLINT
+  7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4, 7791, -4,  // NOLINT
+  7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4, 7807, -4,  // NOLINT
+  7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4, 7823, -4,  // NOLINT
+  7825, -4, 7827, -4, 7829, -4, 7830, 25, 7831, 29, 7832, 33, 7833, 37, 7834, 41,  // NOLINT
+  7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4, 7849, -4, 7851, -4, 7853, -4,  // NOLINT
+  7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4, 7865, -4, 7867, -4, 7869, -4,  // NOLINT
+  7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4, 7881, -4, 7883, -4, 7885, -4,  // NOLINT
+  7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4, 7897, -4, 7899, -4, 7901, -4,  // NOLINT
+  7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4, 7913, -4, 7915, -4, 7917, -4,  // NOLINT
+  7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4, 7929, -4, 7931, -4, 7933, -4,  // NOLINT
+  7935, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32, 1073749808, 32,  // NOLINT
+  7991, 32, 1073749824, 32, 8005, 32, 8016, 45, 8017, 32, 8018, 49, 8019, 32, 8020, 53,  // NOLINT
+  8021, 32, 8022, 57, 8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344,  // NOLINT
+  8053, 344, 1073749878, 400, 8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504,  // NOLINT
+  8061, 504, 8064, 61, 8065, 65, 8066, 69, 8067, 73, 8068, 77, 8069, 81, 8070, 85,  // NOLINT
+  8071, 89, 8072, 61, 8073, 65, 8074, 69, 8075, 73, 8076, 77, 8077, 81, 8078, 85,  // NOLINT
+  8079, 89, 8080, 93, 8081, 97, 8082, 101, 8083, 105, 8084, 109, 8085, 113, 8086, 117,  // NOLINT
+  8087, 121, 8088, 93, 8089, 97, 8090, 101, 8091, 105, 8092, 109, 8093, 113, 8094, 117,  // NOLINT
+  8095, 121, 8096, 125, 8097, 129, 8098, 133, 8099, 137, 8100, 141, 8101, 145, 8102, 149,  // NOLINT
+  8103, 153, 8104, 125, 8105, 129, 8106, 133, 8107, 137, 8108, 141, 8109, 145, 8110, 149,  // NOLINT
+  8111, 153, 1073749936, 32, 8113, 32, 8114, 157, 8115, 161, 8116, 165, 8118, 169, 8119, 173,  // NOLINT
+  8124, 161, 8126, -28820, 8130, 177, 8131, 181, 8132, 185, 8134, 189, 8135, 193, 8140, 181,  // NOLINT
+  1073749968, 32, 8145, 32, 8146, 197, 8147, 13, 8150, 201, 8151, 205, 1073749984, 32, 8161, 32,  // NOLINT
+  8162, 209, 8163, 17, 8164, 213, 8165, 28, 8166, 217, 8167, 221, 8178, 225, 8179, 229,  // NOLINT
+  8180, 233, 8182, 237, 8183, 241, 8188, 229 };  // NOLINT
 static const uint16_t kToUppercaseMultiStrings0Size = 62;  // NOLINT
 static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings1[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable1Size = 67;  // NOLINT
-static const int32_t kToUppercaseTable1[134] = {
+static const uint16_t kToUppercaseTable1Size = 73;  // NOLINT
+static const int32_t kToUppercaseTable1[146] = {
   334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104, 1257, -104, 1073744944, -192, 3166, -192,  // NOLINT
-  3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3190, -4, 3201, -4,  // NOLINT
-  3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4, 3217, -4,  // NOLINT
-  3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4, 3233, -4,  // NOLINT
-  3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4, 3249, -4,  // NOLINT
-  3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4, 3265, -4,  // NOLINT
-  3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4, 3281, -4,  // NOLINT
-  3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4, 3297, -4,  // NOLINT
-  3299, -4, 1073745152, -29056, 3365, -29056 };  // NOLINT
+  3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3187, -4, 3190, -4,  // NOLINT
+  3201, -4, 3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4,  // NOLINT
+  3217, -4, 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4,  // NOLINT
+  3233, -4, 3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4,  // NOLINT
+  3249, -4, 3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4,  // NOLINT
+  3265, -4, 3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4,  // NOLINT
+  3281, -4, 3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4,  // NOLINT
+  3297, -4, 3299, -4, 3308, -4, 3310, -4, 3315, -4, 1073745152, -29056, 3365, -29056, 3367, -29056,  // NOLINT
+  3373, -29056 };  // NOLINT
 static const uint16_t kToUppercaseMultiStrings1Size = 1;  // NOLINT
+static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings5[1] = {  // NOLINT
+  {{kSentinel}} }; // NOLINT
+static const uint16_t kToUppercaseTable5Size = 88;  // NOLINT
+static const int32_t kToUppercaseTable5[176] = {
+  1601, -4, 1603, -4, 1605, -4, 1607, -4, 1609, -4, 1611, -4, 1613, -4, 1615, -4,  // NOLINT
+  1617, -4, 1619, -4, 1621, -4, 1623, -4, 1625, -4, 1627, -4, 1629, -4, 1631, -4,  // NOLINT
+  1633, -4, 1635, -4, 1637, -4, 1639, -4, 1641, -4, 1643, -4, 1645, -4, 1665, -4,  // NOLINT
+  1667, -4, 1669, -4, 1671, -4, 1673, -4, 1675, -4, 1677, -4, 1679, -4, 1681, -4,  // NOLINT
+  1683, -4, 1685, -4, 1687, -4, 1827, -4, 1829, -4, 1831, -4, 1833, -4, 1835, -4,  // NOLINT
+  1837, -4, 1839, -4, 1843, -4, 1845, -4, 1847, -4, 1849, -4, 1851, -4, 1853, -4,  // NOLINT
+  1855, -4, 1857, -4, 1859, -4, 1861, -4, 1863, -4, 1865, -4, 1867, -4, 1869, -4,  // NOLINT
+  1871, -4, 1873, -4, 1875, -4, 1877, -4, 1879, -4, 1881, -4, 1883, -4, 1885, -4,  // NOLINT
+  1887, -4, 1889, -4, 1891, -4, 1893, -4, 1895, -4, 1897, -4, 1899, -4, 1901, -4,  // NOLINT
+  1903, -4, 1914, -4, 1916, -4, 1919, -4, 1921, -4, 1923, -4, 1925, -4, 1927, -4,  // NOLINT
+  1932, -4, 1937, -4, 1939, -4, 1953, -4, 1955, -4, 1957, -4, 1959, -4, 1961, -4 };  // NOLINT
+static const uint16_t kToUppercaseMultiStrings5Size = 1;  // NOLINT
 static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings7[12] = {  // NOLINT
   {{70, 70, kSentinel}}, {{70, 73, kSentinel}}, {{70, 76, kSentinel}}, {{70, 70, 73}},  // NOLINT
   {{70, 70, 76}}, {{83, 84, kSentinel}}, {{1348, 1350, kSentinel}}, {{1348, 1333, kSentinel}},  // NOLINT
@@ -1071,6 +1203,13 @@
                                            n,
                                            result,
                                            allow_caching_ptr);
+    case 5: return LookupMapping<true>(kToUppercaseTable5,
+                                           kToUppercaseTable5Size,
+                                           kToUppercaseMultiStrings5,
+                                           c,
+                                           n,
+                                           result,
+                                           allow_caching_ptr);
     case 7: return LookupMapping<true>(kToUppercaseTable7,
                                            kToUppercaseTable7Size,
                                            kToUppercaseMultiStrings7,
@@ -1084,8 +1223,8 @@
 
 static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings0[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable0Size = 462;  // NOLINT
-static const int32_t kEcma262CanonicalizeTable0[924] = {
+static const uint16_t kEcma262CanonicalizeTable0Size = 488;  // NOLINT
+static const int32_t kEcma262CanonicalizeTable0[976] = {
   1073741921, -128, 122, -128, 181, 2972, 1073742048, -128, 246, -128, 1073742072, -128, 254, -128, 255, 484,  // NOLINT
   257, -4, 259, -4, 261, -4, 263, -4, 265, -4, 267, -4, 269, -4, 271, -4,  // NOLINT
   273, -4, 275, -4, 277, -4, 279, -4, 281, -4, 283, -4, 285, -4, 287, -4,  // NOLINT
@@ -1104,61 +1243,81 @@
   511, -4, 513, -4, 515, -4, 517, -4, 519, -4, 521, -4, 523, -4, 525, -4,  // NOLINT
   527, -4, 529, -4, 531, -4, 533, -4, 535, -4, 537, -4, 539, -4, 541, -4,  // NOLINT
   543, -4, 547, -4, 549, -4, 551, -4, 553, -4, 555, -4, 557, -4, 559, -4,  // NOLINT
-  561, -4, 563, -4, 572, -4, 578, -4, 583, -4, 585, -4, 587, -4, 589, -4,  // NOLINT
-  591, -4, 595, -840, 596, -824, 1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820,  // NOLINT
-  611, -828, 616, -836, 617, -844, 619, 42972, 623, -844, 626, -852, 629, -856, 637, 42908,  // NOLINT
+  561, -4, 563, -4, 572, -4, 1073742399, 43260, 576, 43260, 578, -4, 583, -4, 585, -4,  // NOLINT
+  587, -4, 589, -4, 591, -4, 592, 43132, 593, 43120, 594, 43128, 595, -840, 596, -824,  // NOLINT
+  1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820, 611, -828, 613, 169120, 614, 169232,  // NOLINT
+  616, -836, 617, -844, 619, 42972, 623, -844, 625, 42996, 626, -852, 629, -856, 637, 42908,  // NOLINT
   640, -872, 643, -872, 648, -872, 649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876,  // NOLINT
-  837, 336, 1073742715, 520, 893, 520, 940, -152, 1073742765, -148, 943, -148, 1073742769, -128, 961, -128,  // NOLINT
-  962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252, 976, -248, 977, -228,  // NOLINT
-  981, -188, 982, -216, 985, -4, 987, -4, 989, -4, 991, -4, 993, -4, 995, -4,  // NOLINT
-  997, -4, 999, -4, 1001, -4, 1003, -4, 1005, -4, 1007, -4, 1008, -344, 1009, -320,  // NOLINT
-  1010, 28, 1013, -384, 1016, -4, 1019, -4, 1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320,  // NOLINT
-  1121, -4, 1123, -4, 1125, -4, 1127, -4, 1129, -4, 1131, -4, 1133, -4, 1135, -4,  // NOLINT
-  1137, -4, 1139, -4, 1141, -4, 1143, -4, 1145, -4, 1147, -4, 1149, -4, 1151, -4,  // NOLINT
-  1153, -4, 1163, -4, 1165, -4, 1167, -4, 1169, -4, 1171, -4, 1173, -4, 1175, -4,  // NOLINT
-  1177, -4, 1179, -4, 1181, -4, 1183, -4, 1185, -4, 1187, -4, 1189, -4, 1191, -4,  // NOLINT
-  1193, -4, 1195, -4, 1197, -4, 1199, -4, 1201, -4, 1203, -4, 1205, -4, 1207, -4,  // NOLINT
-  1209, -4, 1211, -4, 1213, -4, 1215, -4, 1218, -4, 1220, -4, 1222, -4, 1224, -4,  // NOLINT
-  1226, -4, 1228, -4, 1230, -4, 1231, -60, 1233, -4, 1235, -4, 1237, -4, 1239, -4,  // NOLINT
-  1241, -4, 1243, -4, 1245, -4, 1247, -4, 1249, -4, 1251, -4, 1253, -4, 1255, -4,  // NOLINT
-  1257, -4, 1259, -4, 1261, -4, 1263, -4, 1265, -4, 1267, -4, 1269, -4, 1271, -4,  // NOLINT
-  1273, -4, 1275, -4, 1277, -4, 1279, -4, 1281, -4, 1283, -4, 1285, -4, 1287, -4,  // NOLINT
-  1289, -4, 1291, -4, 1293, -4, 1295, -4, 1297, -4, 1299, -4, 1073743201, -192, 1414, -192,  // NOLINT
-  7549, 15256, 7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4,  // NOLINT
-  7695, -4, 7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4,  // NOLINT
-  7711, -4, 7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4,  // NOLINT
-  7727, -4, 7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4,  // NOLINT
-  7743, -4, 7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4,  // NOLINT
-  7759, -4, 7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4,  // NOLINT
-  7775, -4, 7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4,  // NOLINT
-  7791, -4, 7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4,  // NOLINT
-  7807, -4, 7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4,  // NOLINT
-  7823, -4, 7825, -4, 7827, -4, 7829, -4, 7835, -236, 7841, -4, 7843, -4, 7845, -4,  // NOLINT
-  7847, -4, 7849, -4, 7851, -4, 7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4,  // NOLINT
-  7863, -4, 7865, -4, 7867, -4, 7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4,  // NOLINT
-  7879, -4, 7881, -4, 7883, -4, 7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4,  // NOLINT
-  7895, -4, 7897, -4, 7899, -4, 7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4,  // NOLINT
-  7911, -4, 7913, -4, 7915, -4, 7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4,  // NOLINT
-  7927, -4, 7929, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32,  // NOLINT
-  1073749808, 32, 7991, 32, 1073749824, 32, 8005, 32, 8017, 32, 8019, 32, 8021, 32, 8023, 32,  // NOLINT
-  1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344, 1073749878, 400, 8055, 400,  // NOLINT
-  1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504, 1073749936, 32, 8113, 32,  // NOLINT
-  8126, -28820, 1073749968, 32, 8145, 32, 1073749984, 32, 8161, 32, 8165, 28 };  // NOLINT
+  837, 336, 881, -4, 883, -4, 887, -4, 1073742715, 520, 893, 520, 940, -152, 1073742765, -148,  // NOLINT
+  943, -148, 1073742769, -128, 961, -128, 962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252,  // NOLINT
+  974, -252, 976, -248, 977, -228, 981, -188, 982, -216, 983, -32, 985, -4, 987, -4,  // NOLINT
+  989, -4, 991, -4, 993, -4, 995, -4, 997, -4, 999, -4, 1001, -4, 1003, -4,  // NOLINT
+  1005, -4, 1007, -4, 1008, -344, 1009, -320, 1010, 28, 1013, -384, 1016, -4, 1019, -4,  // NOLINT
+  1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320, 1121, -4, 1123, -4, 1125, -4, 1127, -4,  // NOLINT
+  1129, -4, 1131, -4, 1133, -4, 1135, -4, 1137, -4, 1139, -4, 1141, -4, 1143, -4,  // NOLINT
+  1145, -4, 1147, -4, 1149, -4, 1151, -4, 1153, -4, 1163, -4, 1165, -4, 1167, -4,  // NOLINT
+  1169, -4, 1171, -4, 1173, -4, 1175, -4, 1177, -4, 1179, -4, 1181, -4, 1183, -4,  // NOLINT
+  1185, -4, 1187, -4, 1189, -4, 1191, -4, 1193, -4, 1195, -4, 1197, -4, 1199, -4,  // NOLINT
+  1201, -4, 1203, -4, 1205, -4, 1207, -4, 1209, -4, 1211, -4, 1213, -4, 1215, -4,  // NOLINT
+  1218, -4, 1220, -4, 1222, -4, 1224, -4, 1226, -4, 1228, -4, 1230, -4, 1231, -60,  // NOLINT
+  1233, -4, 1235, -4, 1237, -4, 1239, -4, 1241, -4, 1243, -4, 1245, -4, 1247, -4,  // NOLINT
+  1249, -4, 1251, -4, 1253, -4, 1255, -4, 1257, -4, 1259, -4, 1261, -4, 1263, -4,  // NOLINT
+  1265, -4, 1267, -4, 1269, -4, 1271, -4, 1273, -4, 1275, -4, 1277, -4, 1279, -4,  // NOLINT
+  1281, -4, 1283, -4, 1285, -4, 1287, -4, 1289, -4, 1291, -4, 1293, -4, 1295, -4,  // NOLINT
+  1297, -4, 1299, -4, 1301, -4, 1303, -4, 1305, -4, 1307, -4, 1309, -4, 1311, -4,  // NOLINT
+  1313, -4, 1315, -4, 1317, -4, 1319, -4, 1073743201, -192, 1414, -192, 7545, 141328, 7549, 15256,  // NOLINT
+  7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4, 7695, -4,  // NOLINT
+  7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4, 7711, -4,  // NOLINT
+  7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4, 7727, -4,  // NOLINT
+  7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4, 7743, -4,  // NOLINT
+  7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4, 7759, -4,  // NOLINT
+  7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4, 7775, -4,  // NOLINT
+  7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4, 7791, -4,  // NOLINT
+  7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4, 7807, -4,  // NOLINT
+  7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4, 7823, -4,  // NOLINT
+  7825, -4, 7827, -4, 7829, -4, 7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4,  // NOLINT
+  7849, -4, 7851, -4, 7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4,  // NOLINT
+  7865, -4, 7867, -4, 7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4,  // NOLINT
+  7881, -4, 7883, -4, 7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4,  // NOLINT
+  7897, -4, 7899, -4, 7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4,  // NOLINT
+  7913, -4, 7915, -4, 7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4,  // NOLINT
+  7929, -4, 7931, -4, 7933, -4, 7935, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32,  // NOLINT
+  1073749792, 32, 7975, 32, 1073749808, 32, 7991, 32, 1073749824, 32, 8005, 32, 8017, 32, 8019, 32,  // NOLINT
+  8021, 32, 8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344,  // NOLINT
+  1073749878, 400, 8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504,  // NOLINT
+  1073749936, 32, 8113, 32, 8126, -28820, 1073749968, 32, 8145, 32, 1073749984, 32, 8161, 32, 8165, 28 };  // NOLINT
 static const uint16_t kEcma262CanonicalizeMultiStrings0Size = 1;  // NOLINT
 static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings1[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable1Size = 67;  // NOLINT
-static const int32_t kEcma262CanonicalizeTable1[134] = {
+static const uint16_t kEcma262CanonicalizeTable1Size = 73;  // NOLINT
+static const int32_t kEcma262CanonicalizeTable1[146] = {
   334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104, 1257, -104, 1073744944, -192, 3166, -192,  // NOLINT
-  3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3190, -4, 3201, -4,  // NOLINT
-  3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4, 3217, -4,  // NOLINT
-  3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4, 3233, -4,  // NOLINT
-  3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4, 3249, -4,  // NOLINT
-  3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4, 3265, -4,  // NOLINT
-  3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4, 3281, -4,  // NOLINT
-  3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4, 3297, -4,  // NOLINT
-  3299, -4, 1073745152, -29056, 3365, -29056 };  // NOLINT
+  3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3187, -4, 3190, -4,  // NOLINT
+  3201, -4, 3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4,  // NOLINT
+  3217, -4, 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4,  // NOLINT
+  3233, -4, 3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4,  // NOLINT
+  3249, -4, 3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4,  // NOLINT
+  3265, -4, 3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4,  // NOLINT
+  3281, -4, 3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4,  // NOLINT
+  3297, -4, 3299, -4, 3308, -4, 3310, -4, 3315, -4, 1073745152, -29056, 3365, -29056, 3367, -29056,  // NOLINT
+  3373, -29056 };  // NOLINT
 static const uint16_t kEcma262CanonicalizeMultiStrings1Size = 1;  // NOLINT
+static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings5[1] = {  // NOLINT
+  {{kSentinel}} }; // NOLINT
+static const uint16_t kEcma262CanonicalizeTable5Size = 88;  // NOLINT
+static const int32_t kEcma262CanonicalizeTable5[176] = {
+  1601, -4, 1603, -4, 1605, -4, 1607, -4, 1609, -4, 1611, -4, 1613, -4, 1615, -4,  // NOLINT
+  1617, -4, 1619, -4, 1621, -4, 1623, -4, 1625, -4, 1627, -4, 1629, -4, 1631, -4,  // NOLINT
+  1633, -4, 1635, -4, 1637, -4, 1639, -4, 1641, -4, 1643, -4, 1645, -4, 1665, -4,  // NOLINT
+  1667, -4, 1669, -4, 1671, -4, 1673, -4, 1675, -4, 1677, -4, 1679, -4, 1681, -4,  // NOLINT
+  1683, -4, 1685, -4, 1687, -4, 1827, -4, 1829, -4, 1831, -4, 1833, -4, 1835, -4,  // NOLINT
+  1837, -4, 1839, -4, 1843, -4, 1845, -4, 1847, -4, 1849, -4, 1851, -4, 1853, -4,  // NOLINT
+  1855, -4, 1857, -4, 1859, -4, 1861, -4, 1863, -4, 1865, -4, 1867, -4, 1869, -4,  // NOLINT
+  1871, -4, 1873, -4, 1875, -4, 1877, -4, 1879, -4, 1881, -4, 1883, -4, 1885, -4,  // NOLINT
+  1887, -4, 1889, -4, 1891, -4, 1893, -4, 1895, -4, 1897, -4, 1899, -4, 1901, -4,  // NOLINT
+  1903, -4, 1914, -4, 1916, -4, 1919, -4, 1921, -4, 1923, -4, 1925, -4, 1927, -4,  // NOLINT
+  1932, -4, 1937, -4, 1939, -4, 1953, -4, 1955, -4, 1957, -4, 1959, -4, 1961, -4 };  // NOLINT
+static const uint16_t kEcma262CanonicalizeMultiStrings5Size = 1;  // NOLINT
 static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings7[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
 static const uint16_t kEcma262CanonicalizeTable7Size = 2;  // NOLINT
@@ -1185,6 +1344,13 @@
                                            n,
                                            result,
                                            allow_caching_ptr);
+    case 5: return LookupMapping<true>(kEcma262CanonicalizeTable5,
+                                           kEcma262CanonicalizeTable5Size,
+                                           kEcma262CanonicalizeMultiStrings5,
+                                           c,
+                                           n,
+                                           result,
+                                           allow_caching_ptr);
     case 7: return LookupMapping<true>(kEcma262CanonicalizeTable7,
                                            kEcma262CanonicalizeTable7Size,
                                            kEcma262CanonicalizeMultiStrings7,
@@ -1196,7 +1362,7 @@
   }
 }
 
-static const MultiCharacterSpecialCase<4> kEcma262UnCanonicalizeMultiStrings0[469] = {  // NOLINT
+static const MultiCharacterSpecialCase<4> kEcma262UnCanonicalizeMultiStrings0[497] = {  // NOLINT
   {{65, 97, kSentinel}}, {{90, 122, kSentinel}}, {{181, 924, 956, kSentinel}}, {{192, 224, kSentinel}},  // NOLINT
   {{214, 246, kSentinel}}, {{216, 248, kSentinel}}, {{222, 254, kSentinel}}, {{255, 376, kSentinel}},  // NOLINT
   {{256, 257, kSentinel}}, {{258, 259, kSentinel}}, {{260, 261, kSentinel}}, {{262, 263, kSentinel}},  // NOLINT
@@ -1238,16 +1404,19 @@
   {{546, 547, kSentinel}}, {{548, 549, kSentinel}}, {{550, 551, kSentinel}}, {{552, 553, kSentinel}},  // NOLINT
   {{554, 555, kSentinel}}, {{556, 557, kSentinel}}, {{558, 559, kSentinel}}, {{560, 561, kSentinel}},  // NOLINT
   {{562, 563, kSentinel}}, {{570, 11365, kSentinel}}, {{571, 572, kSentinel}}, {{574, 11366, kSentinel}},  // NOLINT
-  {{577, 578, kSentinel}}, {{580, 649, kSentinel}}, {{581, 652, kSentinel}}, {{582, 583, kSentinel}},  // NOLINT
-  {{584, 585, kSentinel}}, {{586, 587, kSentinel}}, {{588, 589, kSentinel}}, {{590, 591, kSentinel}},  // NOLINT
-  {{619, 11362, kSentinel}}, {{637, 11364, kSentinel}}, {{837, 921, 953, 8126}}, {{891, 1021, kSentinel}},  // NOLINT
-  {{893, 1023, kSentinel}}, {{902, 940, kSentinel}}, {{904, 941, kSentinel}}, {{906, 943, kSentinel}},  // NOLINT
-  {{908, 972, kSentinel}}, {{910, 973, kSentinel}}, {{911, 974, kSentinel}}, {{913, 945, kSentinel}},  // NOLINT
-  {{914, 946, 976, kSentinel}}, {{915, 947, kSentinel}}, {{916, 948, kSentinel}}, {{917, 949, 1013, kSentinel}},  // NOLINT
-  {{918, 950, kSentinel}}, {{919, 951, kSentinel}}, {{920, 952, 977, kSentinel}}, {{922, 954, 1008, kSentinel}},  // NOLINT
-  {{923, 955, kSentinel}}, {{925, 957, kSentinel}}, {{927, 959, kSentinel}}, {{928, 960, 982, kSentinel}},  // NOLINT
-  {{929, 961, 1009, kSentinel}}, {{931, 962, 963, kSentinel}}, {{932, 964, kSentinel}}, {{933, 965, kSentinel}},  // NOLINT
-  {{934, 966, 981, kSentinel}}, {{935, 967, kSentinel}}, {{939, 971, kSentinel}}, {{984, 985, kSentinel}},  // NOLINT
+  {{575, 11390, kSentinel}}, {{576, 11391, kSentinel}}, {{577, 578, kSentinel}}, {{580, 649, kSentinel}},  // NOLINT
+  {{581, 652, kSentinel}}, {{582, 583, kSentinel}}, {{584, 585, kSentinel}}, {{586, 587, kSentinel}},  // NOLINT
+  {{588, 589, kSentinel}}, {{590, 591, kSentinel}}, {{592, 11375, kSentinel}}, {{593, 11373, kSentinel}},  // NOLINT
+  {{594, 11376, kSentinel}}, {{613, 42893, kSentinel}}, {{614, 42922, kSentinel}}, {{619, 11362, kSentinel}},  // NOLINT
+  {{625, 11374, kSentinel}}, {{637, 11364, kSentinel}}, {{837, 921, 953, 8126}}, {{880, 881, kSentinel}},  // NOLINT
+  {{882, 883, kSentinel}}, {{886, 887, kSentinel}}, {{891, 1021, kSentinel}}, {{893, 1023, kSentinel}},  // NOLINT
+  {{902, 940, kSentinel}}, {{904, 941, kSentinel}}, {{906, 943, kSentinel}}, {{908, 972, kSentinel}},  // NOLINT
+  {{910, 973, kSentinel}}, {{911, 974, kSentinel}}, {{913, 945, kSentinel}}, {{914, 946, 976, kSentinel}},  // NOLINT
+  {{915, 947, kSentinel}}, {{916, 948, kSentinel}}, {{917, 949, 1013, kSentinel}}, {{918, 950, kSentinel}},  // NOLINT
+  {{919, 951, kSentinel}}, {{920, 952, 977, kSentinel}}, {{922, 954, 1008, kSentinel}}, {{923, 955, kSentinel}},  // NOLINT
+  {{925, 957, kSentinel}}, {{927, 959, kSentinel}}, {{928, 960, 982, kSentinel}}, {{929, 961, 1009, kSentinel}},  // NOLINT
+  {{931, 962, 963, kSentinel}}, {{932, 964, kSentinel}}, {{933, 965, kSentinel}}, {{934, 966, 981, kSentinel}},  // NOLINT
+  {{935, 967, kSentinel}}, {{939, 971, kSentinel}}, {{975, 983, kSentinel}}, {{984, 985, kSentinel}},  // NOLINT
   {{986, 987, kSentinel}}, {{988, 989, kSentinel}}, {{990, 991, kSentinel}}, {{992, 993, kSentinel}},  // NOLINT
   {{994, 995, kSentinel}}, {{996, 997, kSentinel}}, {{998, 999, kSentinel}}, {{1000, 1001, kSentinel}},  // NOLINT
   {{1002, 1003, kSentinel}}, {{1004, 1005, kSentinel}}, {{1006, 1007, kSentinel}}, {{1010, 1017, kSentinel}},  // NOLINT
@@ -1274,38 +1443,42 @@
   {{1276, 1277, kSentinel}}, {{1278, 1279, kSentinel}}, {{1280, 1281, kSentinel}}, {{1282, 1283, kSentinel}},  // NOLINT
   {{1284, 1285, kSentinel}}, {{1286, 1287, kSentinel}}, {{1288, 1289, kSentinel}}, {{1290, 1291, kSentinel}},  // NOLINT
   {{1292, 1293, kSentinel}}, {{1294, 1295, kSentinel}}, {{1296, 1297, kSentinel}}, {{1298, 1299, kSentinel}},  // NOLINT
-  {{1329, 1377, kSentinel}}, {{1366, 1414, kSentinel}}, {{4256, 11520, kSentinel}}, {{4293, 11557, kSentinel}},  // NOLINT
-  {{7549, 11363, kSentinel}}, {{7680, 7681, kSentinel}}, {{7682, 7683, kSentinel}}, {{7684, 7685, kSentinel}},  // NOLINT
-  {{7686, 7687, kSentinel}}, {{7688, 7689, kSentinel}}, {{7690, 7691, kSentinel}}, {{7692, 7693, kSentinel}},  // NOLINT
-  {{7694, 7695, kSentinel}}, {{7696, 7697, kSentinel}}, {{7698, 7699, kSentinel}}, {{7700, 7701, kSentinel}},  // NOLINT
-  {{7702, 7703, kSentinel}}, {{7704, 7705, kSentinel}}, {{7706, 7707, kSentinel}}, {{7708, 7709, kSentinel}},  // NOLINT
-  {{7710, 7711, kSentinel}}, {{7712, 7713, kSentinel}}, {{7714, 7715, kSentinel}}, {{7716, 7717, kSentinel}},  // NOLINT
-  {{7718, 7719, kSentinel}}, {{7720, 7721, kSentinel}}, {{7722, 7723, kSentinel}}, {{7724, 7725, kSentinel}},  // NOLINT
-  {{7726, 7727, kSentinel}}, {{7728, 7729, kSentinel}}, {{7730, 7731, kSentinel}}, {{7732, 7733, kSentinel}},  // NOLINT
-  {{7734, 7735, kSentinel}}, {{7736, 7737, kSentinel}}, {{7738, 7739, kSentinel}}, {{7740, 7741, kSentinel}},  // NOLINT
-  {{7742, 7743, kSentinel}}, {{7744, 7745, kSentinel}}, {{7746, 7747, kSentinel}}, {{7748, 7749, kSentinel}},  // NOLINT
-  {{7750, 7751, kSentinel}}, {{7752, 7753, kSentinel}}, {{7754, 7755, kSentinel}}, {{7756, 7757, kSentinel}},  // NOLINT
-  {{7758, 7759, kSentinel}}, {{7760, 7761, kSentinel}}, {{7762, 7763, kSentinel}}, {{7764, 7765, kSentinel}},  // NOLINT
-  {{7766, 7767, kSentinel}}, {{7768, 7769, kSentinel}}, {{7770, 7771, kSentinel}}, {{7772, 7773, kSentinel}},  // NOLINT
-  {{7774, 7775, kSentinel}}, {{7776, 7777, 7835, kSentinel}}, {{7778, 7779, kSentinel}}, {{7780, 7781, kSentinel}},  // NOLINT
-  {{7782, 7783, kSentinel}}, {{7784, 7785, kSentinel}}, {{7786, 7787, kSentinel}}, {{7788, 7789, kSentinel}},  // NOLINT
-  {{7790, 7791, kSentinel}}, {{7792, 7793, kSentinel}}, {{7794, 7795, kSentinel}}, {{7796, 7797, kSentinel}},  // NOLINT
-  {{7798, 7799, kSentinel}}, {{7800, 7801, kSentinel}}, {{7802, 7803, kSentinel}}, {{7804, 7805, kSentinel}},  // NOLINT
-  {{7806, 7807, kSentinel}}, {{7808, 7809, kSentinel}}, {{7810, 7811, kSentinel}}, {{7812, 7813, kSentinel}},  // NOLINT
-  {{7814, 7815, kSentinel}}, {{7816, 7817, kSentinel}}, {{7818, 7819, kSentinel}}, {{7820, 7821, kSentinel}},  // NOLINT
-  {{7822, 7823, kSentinel}}, {{7824, 7825, kSentinel}}, {{7826, 7827, kSentinel}}, {{7828, 7829, kSentinel}},  // NOLINT
-  {{7840, 7841, kSentinel}}, {{7842, 7843, kSentinel}}, {{7844, 7845, kSentinel}}, {{7846, 7847, kSentinel}},  // NOLINT
-  {{7848, 7849, kSentinel}}, {{7850, 7851, kSentinel}}, {{7852, 7853, kSentinel}}, {{7854, 7855, kSentinel}},  // NOLINT
-  {{7856, 7857, kSentinel}}, {{7858, 7859, kSentinel}}, {{7860, 7861, kSentinel}}, {{7862, 7863, kSentinel}},  // NOLINT
-  {{7864, 7865, kSentinel}}, {{7866, 7867, kSentinel}}, {{7868, 7869, kSentinel}}, {{7870, 7871, kSentinel}},  // NOLINT
-  {{7872, 7873, kSentinel}}, {{7874, 7875, kSentinel}}, {{7876, 7877, kSentinel}}, {{7878, 7879, kSentinel}},  // NOLINT
-  {{7880, 7881, kSentinel}}, {{7882, 7883, kSentinel}}, {{7884, 7885, kSentinel}}, {{7886, 7887, kSentinel}},  // NOLINT
-  {{7888, 7889, kSentinel}}, {{7890, 7891, kSentinel}}, {{7892, 7893, kSentinel}}, {{7894, 7895, kSentinel}},  // NOLINT
-  {{7896, 7897, kSentinel}}, {{7898, 7899, kSentinel}}, {{7900, 7901, kSentinel}}, {{7902, 7903, kSentinel}},  // NOLINT
-  {{7904, 7905, kSentinel}}, {{7906, 7907, kSentinel}}, {{7908, 7909, kSentinel}}, {{7910, 7911, kSentinel}},  // NOLINT
-  {{7912, 7913, kSentinel}}, {{7914, 7915, kSentinel}}, {{7916, 7917, kSentinel}}, {{7918, 7919, kSentinel}},  // NOLINT
-  {{7920, 7921, kSentinel}}, {{7922, 7923, kSentinel}}, {{7924, 7925, kSentinel}}, {{7926, 7927, kSentinel}},  // NOLINT
-  {{7928, 7929, kSentinel}}, {{7936, 7944, kSentinel}}, {{7943, 7951, kSentinel}}, {{7952, 7960, kSentinel}},  // NOLINT
+  {{1300, 1301, kSentinel}}, {{1302, 1303, kSentinel}}, {{1304, 1305, kSentinel}}, {{1306, 1307, kSentinel}},  // NOLINT
+  {{1308, 1309, kSentinel}}, {{1310, 1311, kSentinel}}, {{1312, 1313, kSentinel}}, {{1314, 1315, kSentinel}},  // NOLINT
+  {{1316, 1317, kSentinel}}, {{1318, 1319, kSentinel}}, {{1329, 1377, kSentinel}}, {{1366, 1414, kSentinel}},  // NOLINT
+  {{4256, 11520, kSentinel}}, {{4293, 11557, kSentinel}}, {{4295, 11559, kSentinel}}, {{4301, 11565, kSentinel}},  // NOLINT
+  {{7545, 42877, kSentinel}}, {{7549, 11363, kSentinel}}, {{7680, 7681, kSentinel}}, {{7682, 7683, kSentinel}},  // NOLINT
+  {{7684, 7685, kSentinel}}, {{7686, 7687, kSentinel}}, {{7688, 7689, kSentinel}}, {{7690, 7691, kSentinel}},  // NOLINT
+  {{7692, 7693, kSentinel}}, {{7694, 7695, kSentinel}}, {{7696, 7697, kSentinel}}, {{7698, 7699, kSentinel}},  // NOLINT
+  {{7700, 7701, kSentinel}}, {{7702, 7703, kSentinel}}, {{7704, 7705, kSentinel}}, {{7706, 7707, kSentinel}},  // NOLINT
+  {{7708, 7709, kSentinel}}, {{7710, 7711, kSentinel}}, {{7712, 7713, kSentinel}}, {{7714, 7715, kSentinel}},  // NOLINT
+  {{7716, 7717, kSentinel}}, {{7718, 7719, kSentinel}}, {{7720, 7721, kSentinel}}, {{7722, 7723, kSentinel}},  // NOLINT
+  {{7724, 7725, kSentinel}}, {{7726, 7727, kSentinel}}, {{7728, 7729, kSentinel}}, {{7730, 7731, kSentinel}},  // NOLINT
+  {{7732, 7733, kSentinel}}, {{7734, 7735, kSentinel}}, {{7736, 7737, kSentinel}}, {{7738, 7739, kSentinel}},  // NOLINT
+  {{7740, 7741, kSentinel}}, {{7742, 7743, kSentinel}}, {{7744, 7745, kSentinel}}, {{7746, 7747, kSentinel}},  // NOLINT
+  {{7748, 7749, kSentinel}}, {{7750, 7751, kSentinel}}, {{7752, 7753, kSentinel}}, {{7754, 7755, kSentinel}},  // NOLINT
+  {{7756, 7757, kSentinel}}, {{7758, 7759, kSentinel}}, {{7760, 7761, kSentinel}}, {{7762, 7763, kSentinel}},  // NOLINT
+  {{7764, 7765, kSentinel}}, {{7766, 7767, kSentinel}}, {{7768, 7769, kSentinel}}, {{7770, 7771, kSentinel}},  // NOLINT
+  {{7772, 7773, kSentinel}}, {{7774, 7775, kSentinel}}, {{7776, 7777, 7835, kSentinel}}, {{7778, 7779, kSentinel}},  // NOLINT
+  {{7780, 7781, kSentinel}}, {{7782, 7783, kSentinel}}, {{7784, 7785, kSentinel}}, {{7786, 7787, kSentinel}},  // NOLINT
+  {{7788, 7789, kSentinel}}, {{7790, 7791, kSentinel}}, {{7792, 7793, kSentinel}}, {{7794, 7795, kSentinel}},  // NOLINT
+  {{7796, 7797, kSentinel}}, {{7798, 7799, kSentinel}}, {{7800, 7801, kSentinel}}, {{7802, 7803, kSentinel}},  // NOLINT
+  {{7804, 7805, kSentinel}}, {{7806, 7807, kSentinel}}, {{7808, 7809, kSentinel}}, {{7810, 7811, kSentinel}},  // NOLINT
+  {{7812, 7813, kSentinel}}, {{7814, 7815, kSentinel}}, {{7816, 7817, kSentinel}}, {{7818, 7819, kSentinel}},  // NOLINT
+  {{7820, 7821, kSentinel}}, {{7822, 7823, kSentinel}}, {{7824, 7825, kSentinel}}, {{7826, 7827, kSentinel}},  // NOLINT
+  {{7828, 7829, kSentinel}}, {{7840, 7841, kSentinel}}, {{7842, 7843, kSentinel}}, {{7844, 7845, kSentinel}},  // NOLINT
+  {{7846, 7847, kSentinel}}, {{7848, 7849, kSentinel}}, {{7850, 7851, kSentinel}}, {{7852, 7853, kSentinel}},  // NOLINT
+  {{7854, 7855, kSentinel}}, {{7856, 7857, kSentinel}}, {{7858, 7859, kSentinel}}, {{7860, 7861, kSentinel}},  // NOLINT
+  {{7862, 7863, kSentinel}}, {{7864, 7865, kSentinel}}, {{7866, 7867, kSentinel}}, {{7868, 7869, kSentinel}},  // NOLINT
+  {{7870, 7871, kSentinel}}, {{7872, 7873, kSentinel}}, {{7874, 7875, kSentinel}}, {{7876, 7877, kSentinel}},  // NOLINT
+  {{7878, 7879, kSentinel}}, {{7880, 7881, kSentinel}}, {{7882, 7883, kSentinel}}, {{7884, 7885, kSentinel}},  // NOLINT
+  {{7886, 7887, kSentinel}}, {{7888, 7889, kSentinel}}, {{7890, 7891, kSentinel}}, {{7892, 7893, kSentinel}},  // NOLINT
+  {{7894, 7895, kSentinel}}, {{7896, 7897, kSentinel}}, {{7898, 7899, kSentinel}}, {{7900, 7901, kSentinel}},  // NOLINT
+  {{7902, 7903, kSentinel}}, {{7904, 7905, kSentinel}}, {{7906, 7907, kSentinel}}, {{7908, 7909, kSentinel}},  // NOLINT
+  {{7910, 7911, kSentinel}}, {{7912, 7913, kSentinel}}, {{7914, 7915, kSentinel}}, {{7916, 7917, kSentinel}},  // NOLINT
+  {{7918, 7919, kSentinel}}, {{7920, 7921, kSentinel}}, {{7922, 7923, kSentinel}}, {{7924, 7925, kSentinel}},  // NOLINT
+  {{7926, 7927, kSentinel}}, {{7928, 7929, kSentinel}}, {{7930, 7931, kSentinel}}, {{7932, 7933, kSentinel}},  // NOLINT
+  {{7934, 7935, kSentinel}}, {{7936, 7944, kSentinel}}, {{7943, 7951, kSentinel}}, {{7952, 7960, kSentinel}},  // NOLINT
   {{7957, 7965, kSentinel}}, {{7968, 7976, kSentinel}}, {{7975, 7983, kSentinel}}, {{7984, 7992, kSentinel}},  // NOLINT
   {{7991, 7999, kSentinel}}, {{8000, 8008, kSentinel}}, {{8005, 8013, kSentinel}}, {{8017, 8025, kSentinel}},  // NOLINT
   {{8019, 8027, kSentinel}}, {{8021, 8029, kSentinel}}, {{8023, 8031, kSentinel}}, {{8032, 8040, kSentinel}},  // NOLINT
@@ -1315,8 +1488,8 @@
   {{8061, 8187, kSentinel}}, {{8112, 8120, kSentinel}}, {{8113, 8121, kSentinel}}, {{8144, 8152, kSentinel}},  // NOLINT
   {{8145, 8153, kSentinel}}, {{8160, 8168, kSentinel}}, {{8161, 8169, kSentinel}}, {{8165, 8172, kSentinel}},  // NOLINT
   {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable0Size = 945;  // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable0[1890] = {
+static const uint16_t kEcma262UnCanonicalizeTable0Size = 990;  // NOLINT
+static const int32_t kEcma262UnCanonicalizeTable0[1980] = {
   1073741889, 1, 90, 5, 1073741921, 1, 122, 5, 181, 9, 1073742016, 13, 214, 17, 1073742040, 21,  // NOLINT
   222, 25, 1073742048, 13, 246, 17, 1073742072, 21, 254, 25, 255, 29, 256, 33, 257, 33,  // NOLINT
   258, 37, 259, 37, 260, 41, 261, 41, 262, 45, 263, 45, 264, 49, 265, 49,  // NOLINT
@@ -1355,127 +1528,187 @@
   539, 597, 540, 601, 541, 601, 542, 605, 543, 605, 544, 365, 546, 609, 547, 609,  // NOLINT
   548, 613, 549, 613, 550, 617, 551, 617, 552, 621, 553, 621, 554, 625, 555, 625,  // NOLINT
   556, 629, 557, 629, 558, 633, 559, 633, 560, 637, 561, 637, 562, 641, 563, 641,  // NOLINT
-  570, 645, 571, 649, 572, 649, 573, 353, 574, 653, 577, 657, 578, 657, 579, 277,  // NOLINT
-  580, 661, 581, 665, 582, 669, 583, 669, 584, 673, 585, 673, 586, 677, 587, 677,  // NOLINT
-  588, 681, 589, 681, 590, 685, 591, 685, 595, 281, 596, 293, 1073742422, 301, 599, 305,  // NOLINT
-  601, 317, 603, 321, 608, 329, 611, 333, 616, 345, 617, 341, 619, 689, 623, 357,  // NOLINT
-  626, 361, 629, 369, 637, 693, 640, 385, 643, 393, 648, 401, 649, 661, 1073742474, 409,  // NOLINT
-  651, 413, 652, 665, 658, 425, 837, 697, 1073742715, 701, 893, 705, 902, 709, 1073742728, 713,  // NOLINT
-  906, 717, 908, 721, 1073742734, 725, 911, 729, 913, 733, 914, 737, 1073742739, 741, 916, 745,  // NOLINT
-  917, 749, 1073742742, 753, 919, 757, 920, 761, 921, 697, 922, 765, 923, 769, 924, 9,  // NOLINT
-  1073742749, 773, 927, 777, 928, 781, 929, 785, 931, 789, 1073742756, 793, 933, 797, 934, 801,  // NOLINT
-  1073742759, 805, 939, 809, 940, 709, 1073742765, 713, 943, 717, 945, 733, 946, 737, 1073742771, 741,  // NOLINT
-  948, 745, 949, 749, 1073742774, 753, 951, 757, 952, 761, 953, 697, 954, 765, 955, 769,  // NOLINT
-  956, 9, 1073742781, 773, 959, 777, 960, 781, 961, 785, 962, 789, 963, 789, 1073742788, 793,  // NOLINT
-  965, 797, 966, 801, 1073742791, 805, 971, 809, 972, 721, 1073742797, 725, 974, 729, 976, 737,  // NOLINT
-  977, 761, 981, 801, 982, 781, 984, 813, 985, 813, 986, 817, 987, 817, 988, 821,  // NOLINT
-  989, 821, 990, 825, 991, 825, 992, 829, 993, 829, 994, 833, 995, 833, 996, 837,  // NOLINT
-  997, 837, 998, 841, 999, 841, 1000, 845, 1001, 845, 1002, 849, 1003, 849, 1004, 853,  // NOLINT
-  1005, 853, 1006, 857, 1007, 857, 1008, 765, 1009, 785, 1010, 861, 1013, 749, 1015, 865,  // NOLINT
-  1016, 865, 1017, 861, 1018, 869, 1019, 869, 1073742845, 701, 1023, 705, 1073742848, 873, 1039, 877,  // NOLINT
-  1073742864, 881, 1071, 885, 1073742896, 881, 1103, 885, 1073742928, 873, 1119, 877, 1120, 889, 1121, 889,  // NOLINT
-  1122, 893, 1123, 893, 1124, 897, 1125, 897, 1126, 901, 1127, 901, 1128, 905, 1129, 905,  // NOLINT
-  1130, 909, 1131, 909, 1132, 913, 1133, 913, 1134, 917, 1135, 917, 1136, 921, 1137, 921,  // NOLINT
-  1138, 925, 1139, 925, 1140, 929, 1141, 929, 1142, 933, 1143, 933, 1144, 937, 1145, 937,  // NOLINT
-  1146, 941, 1147, 941, 1148, 945, 1149, 945, 1150, 949, 1151, 949, 1152, 953, 1153, 953,  // NOLINT
-  1162, 957, 1163, 957, 1164, 961, 1165, 961, 1166, 965, 1167, 965, 1168, 969, 1169, 969,  // NOLINT
-  1170, 973, 1171, 973, 1172, 977, 1173, 977, 1174, 981, 1175, 981, 1176, 985, 1177, 985,  // NOLINT
-  1178, 989, 1179, 989, 1180, 993, 1181, 993, 1182, 997, 1183, 997, 1184, 1001, 1185, 1001,  // NOLINT
-  1186, 1005, 1187, 1005, 1188, 1009, 1189, 1009, 1190, 1013, 1191, 1013, 1192, 1017, 1193, 1017,  // NOLINT
-  1194, 1021, 1195, 1021, 1196, 1025, 1197, 1025, 1198, 1029, 1199, 1029, 1200, 1033, 1201, 1033,  // NOLINT
-  1202, 1037, 1203, 1037, 1204, 1041, 1205, 1041, 1206, 1045, 1207, 1045, 1208, 1049, 1209, 1049,  // NOLINT
-  1210, 1053, 1211, 1053, 1212, 1057, 1213, 1057, 1214, 1061, 1215, 1061, 1216, 1065, 1217, 1069,  // NOLINT
-  1218, 1069, 1219, 1073, 1220, 1073, 1221, 1077, 1222, 1077, 1223, 1081, 1224, 1081, 1225, 1085,  // NOLINT
-  1226, 1085, 1227, 1089, 1228, 1089, 1229, 1093, 1230, 1093, 1231, 1065, 1232, 1097, 1233, 1097,  // NOLINT
-  1234, 1101, 1235, 1101, 1236, 1105, 1237, 1105, 1238, 1109, 1239, 1109, 1240, 1113, 1241, 1113,  // NOLINT
-  1242, 1117, 1243, 1117, 1244, 1121, 1245, 1121, 1246, 1125, 1247, 1125, 1248, 1129, 1249, 1129,  // NOLINT
-  1250, 1133, 1251, 1133, 1252, 1137, 1253, 1137, 1254, 1141, 1255, 1141, 1256, 1145, 1257, 1145,  // NOLINT
-  1258, 1149, 1259, 1149, 1260, 1153, 1261, 1153, 1262, 1157, 1263, 1157, 1264, 1161, 1265, 1161,  // NOLINT
-  1266, 1165, 1267, 1165, 1268, 1169, 1269, 1169, 1270, 1173, 1271, 1173, 1272, 1177, 1273, 1177,  // NOLINT
-  1274, 1181, 1275, 1181, 1276, 1185, 1277, 1185, 1278, 1189, 1279, 1189, 1280, 1193, 1281, 1193,  // NOLINT
-  1282, 1197, 1283, 1197, 1284, 1201, 1285, 1201, 1286, 1205, 1287, 1205, 1288, 1209, 1289, 1209,  // NOLINT
-  1290, 1213, 1291, 1213, 1292, 1217, 1293, 1217, 1294, 1221, 1295, 1221, 1296, 1225, 1297, 1225,  // NOLINT
-  1298, 1229, 1299, 1229, 1073743153, 1233, 1366, 1237, 1073743201, 1233, 1414, 1237, 1073746080, 1241, 4293, 1245,  // NOLINT
-  7549, 1249, 7680, 1253, 7681, 1253, 7682, 1257, 7683, 1257, 7684, 1261, 7685, 1261, 7686, 1265,  // NOLINT
-  7687, 1265, 7688, 1269, 7689, 1269, 7690, 1273, 7691, 1273, 7692, 1277, 7693, 1277, 7694, 1281,  // NOLINT
-  7695, 1281, 7696, 1285, 7697, 1285, 7698, 1289, 7699, 1289, 7700, 1293, 7701, 1293, 7702, 1297,  // NOLINT
-  7703, 1297, 7704, 1301, 7705, 1301, 7706, 1305, 7707, 1305, 7708, 1309, 7709, 1309, 7710, 1313,  // NOLINT
-  7711, 1313, 7712, 1317, 7713, 1317, 7714, 1321, 7715, 1321, 7716, 1325, 7717, 1325, 7718, 1329,  // NOLINT
-  7719, 1329, 7720, 1333, 7721, 1333, 7722, 1337, 7723, 1337, 7724, 1341, 7725, 1341, 7726, 1345,  // NOLINT
-  7727, 1345, 7728, 1349, 7729, 1349, 7730, 1353, 7731, 1353, 7732, 1357, 7733, 1357, 7734, 1361,  // NOLINT
-  7735, 1361, 7736, 1365, 7737, 1365, 7738, 1369, 7739, 1369, 7740, 1373, 7741, 1373, 7742, 1377,  // NOLINT
-  7743, 1377, 7744, 1381, 7745, 1381, 7746, 1385, 7747, 1385, 7748, 1389, 7749, 1389, 7750, 1393,  // NOLINT
-  7751, 1393, 7752, 1397, 7753, 1397, 7754, 1401, 7755, 1401, 7756, 1405, 7757, 1405, 7758, 1409,  // NOLINT
-  7759, 1409, 7760, 1413, 7761, 1413, 7762, 1417, 7763, 1417, 7764, 1421, 7765, 1421, 7766, 1425,  // NOLINT
-  7767, 1425, 7768, 1429, 7769, 1429, 7770, 1433, 7771, 1433, 7772, 1437, 7773, 1437, 7774, 1441,  // NOLINT
-  7775, 1441, 7776, 1445, 7777, 1445, 7778, 1449, 7779, 1449, 7780, 1453, 7781, 1453, 7782, 1457,  // NOLINT
-  7783, 1457, 7784, 1461, 7785, 1461, 7786, 1465, 7787, 1465, 7788, 1469, 7789, 1469, 7790, 1473,  // NOLINT
-  7791, 1473, 7792, 1477, 7793, 1477, 7794, 1481, 7795, 1481, 7796, 1485, 7797, 1485, 7798, 1489,  // NOLINT
-  7799, 1489, 7800, 1493, 7801, 1493, 7802, 1497, 7803, 1497, 7804, 1501, 7805, 1501, 7806, 1505,  // NOLINT
-  7807, 1505, 7808, 1509, 7809, 1509, 7810, 1513, 7811, 1513, 7812, 1517, 7813, 1517, 7814, 1521,  // NOLINT
-  7815, 1521, 7816, 1525, 7817, 1525, 7818, 1529, 7819, 1529, 7820, 1533, 7821, 1533, 7822, 1537,  // NOLINT
-  7823, 1537, 7824, 1541, 7825, 1541, 7826, 1545, 7827, 1545, 7828, 1549, 7829, 1549, 7835, 1445,  // NOLINT
-  7840, 1553, 7841, 1553, 7842, 1557, 7843, 1557, 7844, 1561, 7845, 1561, 7846, 1565, 7847, 1565,  // NOLINT
-  7848, 1569, 7849, 1569, 7850, 1573, 7851, 1573, 7852, 1577, 7853, 1577, 7854, 1581, 7855, 1581,  // NOLINT
-  7856, 1585, 7857, 1585, 7858, 1589, 7859, 1589, 7860, 1593, 7861, 1593, 7862, 1597, 7863, 1597,  // NOLINT
-  7864, 1601, 7865, 1601, 7866, 1605, 7867, 1605, 7868, 1609, 7869, 1609, 7870, 1613, 7871, 1613,  // NOLINT
-  7872, 1617, 7873, 1617, 7874, 1621, 7875, 1621, 7876, 1625, 7877, 1625, 7878, 1629, 7879, 1629,  // NOLINT
-  7880, 1633, 7881, 1633, 7882, 1637, 7883, 1637, 7884, 1641, 7885, 1641, 7886, 1645, 7887, 1645,  // NOLINT
-  7888, 1649, 7889, 1649, 7890, 1653, 7891, 1653, 7892, 1657, 7893, 1657, 7894, 1661, 7895, 1661,  // NOLINT
-  7896, 1665, 7897, 1665, 7898, 1669, 7899, 1669, 7900, 1673, 7901, 1673, 7902, 1677, 7903, 1677,  // NOLINT
-  7904, 1681, 7905, 1681, 7906, 1685, 7907, 1685, 7908, 1689, 7909, 1689, 7910, 1693, 7911, 1693,  // NOLINT
-  7912, 1697, 7913, 1697, 7914, 1701, 7915, 1701, 7916, 1705, 7917, 1705, 7918, 1709, 7919, 1709,  // NOLINT
-  7920, 1713, 7921, 1713, 7922, 1717, 7923, 1717, 7924, 1721, 7925, 1721, 7926, 1725, 7927, 1725,  // NOLINT
-  7928, 1729, 7929, 1729, 1073749760, 1733, 7943, 1737, 1073749768, 1733, 7951, 1737, 1073749776, 1741, 7957, 1745,  // NOLINT
-  1073749784, 1741, 7965, 1745, 1073749792, 1749, 7975, 1753, 1073749800, 1749, 7983, 1753, 1073749808, 1757, 7991, 1761,  // NOLINT
-  1073749816, 1757, 7999, 1761, 1073749824, 1765, 8005, 1769, 1073749832, 1765, 8013, 1769, 8017, 1773, 8019, 1777,  // NOLINT
-  8021, 1781, 8023, 1785, 8025, 1773, 8027, 1777, 8029, 1781, 8031, 1785, 1073749856, 1789, 8039, 1793,  // NOLINT
-  1073749864, 1789, 8047, 1793, 1073749872, 1797, 8049, 1801, 1073749874, 1805, 8053, 1809, 1073749878, 1813, 8055, 1817,  // NOLINT
-  1073749880, 1821, 8057, 1825, 1073749882, 1829, 8059, 1833, 1073749884, 1837, 8061, 1841, 1073749936, 1845, 8113, 1849,  // NOLINT
-  1073749944, 1845, 8121, 1849, 1073749946, 1797, 8123, 1801, 8126, 697, 1073749960, 1805, 8139, 1809, 1073749968, 1853,  // NOLINT
-  8145, 1857, 1073749976, 1853, 8153, 1857, 1073749978, 1813, 8155, 1817, 1073749984, 1861, 8161, 1865, 8165, 1869,  // NOLINT
-  1073749992, 1861, 8169, 1865, 1073749994, 1829, 8171, 1833, 8172, 1869, 1073750008, 1821, 8185, 1825, 1073750010, 1837,  // NOLINT
-  8187, 1841 };  // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings0Size = 469;  // NOLINT
-static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings1[71] = {  // NOLINT
+  570, 645, 571, 649, 572, 649, 573, 353, 574, 653, 1073742399, 657, 576, 661, 577, 665,  // NOLINT
+  578, 665, 579, 277, 580, 669, 581, 673, 582, 677, 583, 677, 584, 681, 585, 681,  // NOLINT
+  586, 685, 587, 685, 588, 689, 589, 689, 590, 693, 591, 693, 592, 697, 593, 701,  // NOLINT
+  594, 705, 595, 281, 596, 293, 1073742422, 301, 599, 305, 601, 317, 603, 321, 608, 329,  // NOLINT
+  611, 333, 613, 709, 614, 713, 616, 345, 617, 341, 619, 717, 623, 357, 625, 721,  // NOLINT
+  626, 361, 629, 369, 637, 725, 640, 385, 643, 393, 648, 401, 649, 669, 1073742474, 409,  // NOLINT
+  651, 413, 652, 673, 658, 425, 837, 729, 880, 733, 881, 733, 882, 737, 883, 737,  // NOLINT
+  886, 741, 887, 741, 1073742715, 745, 893, 749, 902, 753, 1073742728, 757, 906, 761, 908, 765,  // NOLINT
+  1073742734, 769, 911, 773, 913, 777, 914, 781, 1073742739, 785, 916, 789, 917, 793, 1073742742, 797,  // NOLINT
+  919, 801, 920, 805, 921, 729, 922, 809, 923, 813, 924, 9, 1073742749, 817, 927, 821,  // NOLINT
+  928, 825, 929, 829, 931, 833, 1073742756, 837, 933, 841, 934, 845, 1073742759, 849, 939, 853,  // NOLINT
+  940, 753, 1073742765, 757, 943, 761, 945, 777, 946, 781, 1073742771, 785, 948, 789, 949, 793,  // NOLINT
+  1073742774, 797, 951, 801, 952, 805, 953, 729, 954, 809, 955, 813, 956, 9, 1073742781, 817,  // NOLINT
+  959, 821, 960, 825, 961, 829, 962, 833, 963, 833, 1073742788, 837, 965, 841, 966, 845,  // NOLINT
+  1073742791, 849, 971, 853, 972, 765, 1073742797, 769, 974, 773, 975, 857, 976, 781, 977, 805,  // NOLINT
+  981, 845, 982, 825, 983, 857, 984, 861, 985, 861, 986, 865, 987, 865, 988, 869,  // NOLINT
+  989, 869, 990, 873, 991, 873, 992, 877, 993, 877, 994, 881, 995, 881, 996, 885,  // NOLINT
+  997, 885, 998, 889, 999, 889, 1000, 893, 1001, 893, 1002, 897, 1003, 897, 1004, 901,  // NOLINT
+  1005, 901, 1006, 905, 1007, 905, 1008, 809, 1009, 829, 1010, 909, 1013, 793, 1015, 913,  // NOLINT
+  1016, 913, 1017, 909, 1018, 917, 1019, 917, 1073742845, 745, 1023, 749, 1073742848, 921, 1039, 925,  // NOLINT
+  1073742864, 929, 1071, 933, 1073742896, 929, 1103, 933, 1073742928, 921, 1119, 925, 1120, 937, 1121, 937,  // NOLINT
+  1122, 941, 1123, 941, 1124, 945, 1125, 945, 1126, 949, 1127, 949, 1128, 953, 1129, 953,  // NOLINT
+  1130, 957, 1131, 957, 1132, 961, 1133, 961, 1134, 965, 1135, 965, 1136, 969, 1137, 969,  // NOLINT
+  1138, 973, 1139, 973, 1140, 977, 1141, 977, 1142, 981, 1143, 981, 1144, 985, 1145, 985,  // NOLINT
+  1146, 989, 1147, 989, 1148, 993, 1149, 993, 1150, 997, 1151, 997, 1152, 1001, 1153, 1001,  // NOLINT
+  1162, 1005, 1163, 1005, 1164, 1009, 1165, 1009, 1166, 1013, 1167, 1013, 1168, 1017, 1169, 1017,  // NOLINT
+  1170, 1021, 1171, 1021, 1172, 1025, 1173, 1025, 1174, 1029, 1175, 1029, 1176, 1033, 1177, 1033,  // NOLINT
+  1178, 1037, 1179, 1037, 1180, 1041, 1181, 1041, 1182, 1045, 1183, 1045, 1184, 1049, 1185, 1049,  // NOLINT
+  1186, 1053, 1187, 1053, 1188, 1057, 1189, 1057, 1190, 1061, 1191, 1061, 1192, 1065, 1193, 1065,  // NOLINT
+  1194, 1069, 1195, 1069, 1196, 1073, 1197, 1073, 1198, 1077, 1199, 1077, 1200, 1081, 1201, 1081,  // NOLINT
+  1202, 1085, 1203, 1085, 1204, 1089, 1205, 1089, 1206, 1093, 1207, 1093, 1208, 1097, 1209, 1097,  // NOLINT
+  1210, 1101, 1211, 1101, 1212, 1105, 1213, 1105, 1214, 1109, 1215, 1109, 1216, 1113, 1217, 1117,  // NOLINT
+  1218, 1117, 1219, 1121, 1220, 1121, 1221, 1125, 1222, 1125, 1223, 1129, 1224, 1129, 1225, 1133,  // NOLINT
+  1226, 1133, 1227, 1137, 1228, 1137, 1229, 1141, 1230, 1141, 1231, 1113, 1232, 1145, 1233, 1145,  // NOLINT
+  1234, 1149, 1235, 1149, 1236, 1153, 1237, 1153, 1238, 1157, 1239, 1157, 1240, 1161, 1241, 1161,  // NOLINT
+  1242, 1165, 1243, 1165, 1244, 1169, 1245, 1169, 1246, 1173, 1247, 1173, 1248, 1177, 1249, 1177,  // NOLINT
+  1250, 1181, 1251, 1181, 1252, 1185, 1253, 1185, 1254, 1189, 1255, 1189, 1256, 1193, 1257, 1193,  // NOLINT
+  1258, 1197, 1259, 1197, 1260, 1201, 1261, 1201, 1262, 1205, 1263, 1205, 1264, 1209, 1265, 1209,  // NOLINT
+  1266, 1213, 1267, 1213, 1268, 1217, 1269, 1217, 1270, 1221, 1271, 1221, 1272, 1225, 1273, 1225,  // NOLINT
+  1274, 1229, 1275, 1229, 1276, 1233, 1277, 1233, 1278, 1237, 1279, 1237, 1280, 1241, 1281, 1241,  // NOLINT
+  1282, 1245, 1283, 1245, 1284, 1249, 1285, 1249, 1286, 1253, 1287, 1253, 1288, 1257, 1289, 1257,  // NOLINT
+  1290, 1261, 1291, 1261, 1292, 1265, 1293, 1265, 1294, 1269, 1295, 1269, 1296, 1273, 1297, 1273,  // NOLINT
+  1298, 1277, 1299, 1277, 1300, 1281, 1301, 1281, 1302, 1285, 1303, 1285, 1304, 1289, 1305, 1289,  // NOLINT
+  1306, 1293, 1307, 1293, 1308, 1297, 1309, 1297, 1310, 1301, 1311, 1301, 1312, 1305, 1313, 1305,  // NOLINT
+  1314, 1309, 1315, 1309, 1316, 1313, 1317, 1313, 1318, 1317, 1319, 1317, 1073743153, 1321, 1366, 1325,  // NOLINT
+  1073743201, 1321, 1414, 1325, 1073746080, 1329, 4293, 1333, 4295, 1337, 4301, 1341, 7545, 1345, 7549, 1349,  // NOLINT
+  7680, 1353, 7681, 1353, 7682, 1357, 7683, 1357, 7684, 1361, 7685, 1361, 7686, 1365, 7687, 1365,  // NOLINT
+  7688, 1369, 7689, 1369, 7690, 1373, 7691, 1373, 7692, 1377, 7693, 1377, 7694, 1381, 7695, 1381,  // NOLINT
+  7696, 1385, 7697, 1385, 7698, 1389, 7699, 1389, 7700, 1393, 7701, 1393, 7702, 1397, 7703, 1397,  // NOLINT
+  7704, 1401, 7705, 1401, 7706, 1405, 7707, 1405, 7708, 1409, 7709, 1409, 7710, 1413, 7711, 1413,  // NOLINT
+  7712, 1417, 7713, 1417, 7714, 1421, 7715, 1421, 7716, 1425, 7717, 1425, 7718, 1429, 7719, 1429,  // NOLINT
+  7720, 1433, 7721, 1433, 7722, 1437, 7723, 1437, 7724, 1441, 7725, 1441, 7726, 1445, 7727, 1445,  // NOLINT
+  7728, 1449, 7729, 1449, 7730, 1453, 7731, 1453, 7732, 1457, 7733, 1457, 7734, 1461, 7735, 1461,  // NOLINT
+  7736, 1465, 7737, 1465, 7738, 1469, 7739, 1469, 7740, 1473, 7741, 1473, 7742, 1477, 7743, 1477,  // NOLINT
+  7744, 1481, 7745, 1481, 7746, 1485, 7747, 1485, 7748, 1489, 7749, 1489, 7750, 1493, 7751, 1493,  // NOLINT
+  7752, 1497, 7753, 1497, 7754, 1501, 7755, 1501, 7756, 1505, 7757, 1505, 7758, 1509, 7759, 1509,  // NOLINT
+  7760, 1513, 7761, 1513, 7762, 1517, 7763, 1517, 7764, 1521, 7765, 1521, 7766, 1525, 7767, 1525,  // NOLINT
+  7768, 1529, 7769, 1529, 7770, 1533, 7771, 1533, 7772, 1537, 7773, 1537, 7774, 1541, 7775, 1541,  // NOLINT
+  7776, 1545, 7777, 1545, 7778, 1549, 7779, 1549, 7780, 1553, 7781, 1553, 7782, 1557, 7783, 1557,  // NOLINT
+  7784, 1561, 7785, 1561, 7786, 1565, 7787, 1565, 7788, 1569, 7789, 1569, 7790, 1573, 7791, 1573,  // NOLINT
+  7792, 1577, 7793, 1577, 7794, 1581, 7795, 1581, 7796, 1585, 7797, 1585, 7798, 1589, 7799, 1589,  // NOLINT
+  7800, 1593, 7801, 1593, 7802, 1597, 7803, 1597, 7804, 1601, 7805, 1601, 7806, 1605, 7807, 1605,  // NOLINT
+  7808, 1609, 7809, 1609, 7810, 1613, 7811, 1613, 7812, 1617, 7813, 1617, 7814, 1621, 7815, 1621,  // NOLINT
+  7816, 1625, 7817, 1625, 7818, 1629, 7819, 1629, 7820, 1633, 7821, 1633, 7822, 1637, 7823, 1637,  // NOLINT
+  7824, 1641, 7825, 1641, 7826, 1645, 7827, 1645, 7828, 1649, 7829, 1649, 7835, 1545, 7840, 1653,  // NOLINT
+  7841, 1653, 7842, 1657, 7843, 1657, 7844, 1661, 7845, 1661, 7846, 1665, 7847, 1665, 7848, 1669,  // NOLINT
+  7849, 1669, 7850, 1673, 7851, 1673, 7852, 1677, 7853, 1677, 7854, 1681, 7855, 1681, 7856, 1685,  // NOLINT
+  7857, 1685, 7858, 1689, 7859, 1689, 7860, 1693, 7861, 1693, 7862, 1697, 7863, 1697, 7864, 1701,  // NOLINT
+  7865, 1701, 7866, 1705, 7867, 1705, 7868, 1709, 7869, 1709, 7870, 1713, 7871, 1713, 7872, 1717,  // NOLINT
+  7873, 1717, 7874, 1721, 7875, 1721, 7876, 1725, 7877, 1725, 7878, 1729, 7879, 1729, 7880, 1733,  // NOLINT
+  7881, 1733, 7882, 1737, 7883, 1737, 7884, 1741, 7885, 1741, 7886, 1745, 7887, 1745, 7888, 1749,  // NOLINT
+  7889, 1749, 7890, 1753, 7891, 1753, 7892, 1757, 7893, 1757, 7894, 1761, 7895, 1761, 7896, 1765,  // NOLINT
+  7897, 1765, 7898, 1769, 7899, 1769, 7900, 1773, 7901, 1773, 7902, 1777, 7903, 1777, 7904, 1781,  // NOLINT
+  7905, 1781, 7906, 1785, 7907, 1785, 7908, 1789, 7909, 1789, 7910, 1793, 7911, 1793, 7912, 1797,  // NOLINT
+  7913, 1797, 7914, 1801, 7915, 1801, 7916, 1805, 7917, 1805, 7918, 1809, 7919, 1809, 7920, 1813,  // NOLINT
+  7921, 1813, 7922, 1817, 7923, 1817, 7924, 1821, 7925, 1821, 7926, 1825, 7927, 1825, 7928, 1829,  // NOLINT
+  7929, 1829, 7930, 1833, 7931, 1833, 7932, 1837, 7933, 1837, 7934, 1841, 7935, 1841, 1073749760, 1845,  // NOLINT
+  7943, 1849, 1073749768, 1845, 7951, 1849, 1073749776, 1853, 7957, 1857, 1073749784, 1853, 7965, 1857, 1073749792, 1861,  // NOLINT
+  7975, 1865, 1073749800, 1861, 7983, 1865, 1073749808, 1869, 7991, 1873, 1073749816, 1869, 7999, 1873, 1073749824, 1877,  // NOLINT
+  8005, 1881, 1073749832, 1877, 8013, 1881, 8017, 1885, 8019, 1889, 8021, 1893, 8023, 1897, 8025, 1885,  // NOLINT
+  8027, 1889, 8029, 1893, 8031, 1897, 1073749856, 1901, 8039, 1905, 1073749864, 1901, 8047, 1905, 1073749872, 1909,  // NOLINT
+  8049, 1913, 1073749874, 1917, 8053, 1921, 1073749878, 1925, 8055, 1929, 1073749880, 1933, 8057, 1937, 1073749882, 1941,  // NOLINT
+  8059, 1945, 1073749884, 1949, 8061, 1953, 1073749936, 1957, 8113, 1961, 1073749944, 1957, 8121, 1961, 1073749946, 1909,  // NOLINT
+  8123, 1913, 8126, 729, 1073749960, 1917, 8139, 1921, 1073749968, 1965, 8145, 1969, 1073749976, 1965, 8153, 1969,  // NOLINT
+  1073749978, 1925, 8155, 1929, 1073749984, 1973, 8161, 1977, 8165, 1981, 1073749992, 1973, 8169, 1977, 1073749994, 1941,  // NOLINT
+  8171, 1945, 8172, 1981, 1073750008, 1933, 8185, 1937, 1073750010, 1949, 8187, 1953 };  // NOLINT
+static const uint16_t kEcma262UnCanonicalizeMultiStrings0Size = 497;  // NOLINT
+static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings1[83] = {  // NOLINT
   {{8498, 8526}}, {{8544, 8560}}, {{8559, 8575}}, {{8579, 8580}},  // NOLINT
   {{9398, 9424}}, {{9423, 9449}}, {{11264, 11312}}, {{11310, 11358}},  // NOLINT
   {{11360, 11361}}, {{619, 11362}}, {{7549, 11363}}, {{637, 11364}},  // NOLINT
   {{570, 11365}}, {{574, 11366}}, {{11367, 11368}}, {{11369, 11370}},  // NOLINT
-  {{11371, 11372}}, {{11381, 11382}}, {{11392, 11393}}, {{11394, 11395}},  // NOLINT
-  {{11396, 11397}}, {{11398, 11399}}, {{11400, 11401}}, {{11402, 11403}},  // NOLINT
-  {{11404, 11405}}, {{11406, 11407}}, {{11408, 11409}}, {{11410, 11411}},  // NOLINT
-  {{11412, 11413}}, {{11414, 11415}}, {{11416, 11417}}, {{11418, 11419}},  // NOLINT
-  {{11420, 11421}}, {{11422, 11423}}, {{11424, 11425}}, {{11426, 11427}},  // NOLINT
-  {{11428, 11429}}, {{11430, 11431}}, {{11432, 11433}}, {{11434, 11435}},  // NOLINT
-  {{11436, 11437}}, {{11438, 11439}}, {{11440, 11441}}, {{11442, 11443}},  // NOLINT
-  {{11444, 11445}}, {{11446, 11447}}, {{11448, 11449}}, {{11450, 11451}},  // NOLINT
-  {{11452, 11453}}, {{11454, 11455}}, {{11456, 11457}}, {{11458, 11459}},  // NOLINT
-  {{11460, 11461}}, {{11462, 11463}}, {{11464, 11465}}, {{11466, 11467}},  // NOLINT
-  {{11468, 11469}}, {{11470, 11471}}, {{11472, 11473}}, {{11474, 11475}},  // NOLINT
-  {{11476, 11477}}, {{11478, 11479}}, {{11480, 11481}}, {{11482, 11483}},  // NOLINT
-  {{11484, 11485}}, {{11486, 11487}}, {{11488, 11489}}, {{11490, 11491}},  // NOLINT
-  {{4256, 11520}}, {{4293, 11557}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable1Size = 133;  // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable1[266] = {
+  {{11371, 11372}}, {{593, 11373}}, {{625, 11374}}, {{592, 11375}},  // NOLINT
+  {{594, 11376}}, {{11378, 11379}}, {{11381, 11382}}, {{575, 11390}},  // NOLINT
+  {{576, 11391}}, {{11392, 11393}}, {{11394, 11395}}, {{11396, 11397}},  // NOLINT
+  {{11398, 11399}}, {{11400, 11401}}, {{11402, 11403}}, {{11404, 11405}},  // NOLINT
+  {{11406, 11407}}, {{11408, 11409}}, {{11410, 11411}}, {{11412, 11413}},  // NOLINT
+  {{11414, 11415}}, {{11416, 11417}}, {{11418, 11419}}, {{11420, 11421}},  // NOLINT
+  {{11422, 11423}}, {{11424, 11425}}, {{11426, 11427}}, {{11428, 11429}},  // NOLINT
+  {{11430, 11431}}, {{11432, 11433}}, {{11434, 11435}}, {{11436, 11437}},  // NOLINT
+  {{11438, 11439}}, {{11440, 11441}}, {{11442, 11443}}, {{11444, 11445}},  // NOLINT
+  {{11446, 11447}}, {{11448, 11449}}, {{11450, 11451}}, {{11452, 11453}},  // NOLINT
+  {{11454, 11455}}, {{11456, 11457}}, {{11458, 11459}}, {{11460, 11461}},  // NOLINT
+  {{11462, 11463}}, {{11464, 11465}}, {{11466, 11467}}, {{11468, 11469}},  // NOLINT
+  {{11470, 11471}}, {{11472, 11473}}, {{11474, 11475}}, {{11476, 11477}},  // NOLINT
+  {{11478, 11479}}, {{11480, 11481}}, {{11482, 11483}}, {{11484, 11485}},  // NOLINT
+  {{11486, 11487}}, {{11488, 11489}}, {{11490, 11491}}, {{11499, 11500}},  // NOLINT
+  {{11501, 11502}}, {{11506, 11507}}, {{4256, 11520}}, {{4293, 11557}},  // NOLINT
+  {{4295, 11559}}, {{4301, 11565}}, {{kSentinel}} }; // NOLINT
+static const uint16_t kEcma262UnCanonicalizeTable1Size = 149;  // NOLINT
+static const int32_t kEcma262UnCanonicalizeTable1[298] = {
   306, 1, 334, 1, 1073742176, 5, 367, 9, 1073742192, 5, 383, 9, 387, 13, 388, 13,  // NOLINT
   1073743030, 17, 1231, 21, 1073743056, 17, 1257, 21, 1073744896, 25, 3118, 29, 1073744944, 25, 3166, 29,  // NOLINT
   3168, 33, 3169, 33, 3170, 37, 3171, 41, 3172, 45, 3173, 49, 3174, 53, 3175, 57,  // NOLINT
-  3176, 57, 3177, 61, 3178, 61, 3179, 65, 3180, 65, 3189, 69, 3190, 69, 3200, 73,  // NOLINT
-  3201, 73, 3202, 77, 3203, 77, 3204, 81, 3205, 81, 3206, 85, 3207, 85, 3208, 89,  // NOLINT
-  3209, 89, 3210, 93, 3211, 93, 3212, 97, 3213, 97, 3214, 101, 3215, 101, 3216, 105,  // NOLINT
-  3217, 105, 3218, 109, 3219, 109, 3220, 113, 3221, 113, 3222, 117, 3223, 117, 3224, 121,  // NOLINT
-  3225, 121, 3226, 125, 3227, 125, 3228, 129, 3229, 129, 3230, 133, 3231, 133, 3232, 137,  // NOLINT
-  3233, 137, 3234, 141, 3235, 141, 3236, 145, 3237, 145, 3238, 149, 3239, 149, 3240, 153,  // NOLINT
-  3241, 153, 3242, 157, 3243, 157, 3244, 161, 3245, 161, 3246, 165, 3247, 165, 3248, 169,  // NOLINT
-  3249, 169, 3250, 173, 3251, 173, 3252, 177, 3253, 177, 3254, 181, 3255, 181, 3256, 185,  // NOLINT
-  3257, 185, 3258, 189, 3259, 189, 3260, 193, 3261, 193, 3262, 197, 3263, 197, 3264, 201,  // NOLINT
-  3265, 201, 3266, 205, 3267, 205, 3268, 209, 3269, 209, 3270, 213, 3271, 213, 3272, 217,  // NOLINT
-  3273, 217, 3274, 221, 3275, 221, 3276, 225, 3277, 225, 3278, 229, 3279, 229, 3280, 233,  // NOLINT
-  3281, 233, 3282, 237, 3283, 237, 3284, 241, 3285, 241, 3286, 245, 3287, 245, 3288, 249,  // NOLINT
-  3289, 249, 3290, 253, 3291, 253, 3292, 257, 3293, 257, 3294, 261, 3295, 261, 3296, 265,  // NOLINT
-  3297, 265, 3298, 269, 3299, 269, 1073745152, 273, 3365, 277 };  // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings1Size = 71;  // NOLINT
+  3176, 57, 3177, 61, 3178, 61, 3179, 65, 3180, 65, 3181, 69, 3182, 73, 3183, 77,  // NOLINT
+  3184, 81, 3186, 85, 3187, 85, 3189, 89, 3190, 89, 1073745022, 93, 3199, 97, 3200, 101,  // NOLINT
+  3201, 101, 3202, 105, 3203, 105, 3204, 109, 3205, 109, 3206, 113, 3207, 113, 3208, 117,  // NOLINT
+  3209, 117, 3210, 121, 3211, 121, 3212, 125, 3213, 125, 3214, 129, 3215, 129, 3216, 133,  // NOLINT
+  3217, 133, 3218, 137, 3219, 137, 3220, 141, 3221, 141, 3222, 145, 3223, 145, 3224, 149,  // NOLINT
+  3225, 149, 3226, 153, 3227, 153, 3228, 157, 3229, 157, 3230, 161, 3231, 161, 3232, 165,  // NOLINT
+  3233, 165, 3234, 169, 3235, 169, 3236, 173, 3237, 173, 3238, 177, 3239, 177, 3240, 181,  // NOLINT
+  3241, 181, 3242, 185, 3243, 185, 3244, 189, 3245, 189, 3246, 193, 3247, 193, 3248, 197,  // NOLINT
+  3249, 197, 3250, 201, 3251, 201, 3252, 205, 3253, 205, 3254, 209, 3255, 209, 3256, 213,  // NOLINT
+  3257, 213, 3258, 217, 3259, 217, 3260, 221, 3261, 221, 3262, 225, 3263, 225, 3264, 229,  // NOLINT
+  3265, 229, 3266, 233, 3267, 233, 3268, 237, 3269, 237, 3270, 241, 3271, 241, 3272, 245,  // NOLINT
+  3273, 245, 3274, 249, 3275, 249, 3276, 253, 3277, 253, 3278, 257, 3279, 257, 3280, 261,  // NOLINT
+  3281, 261, 3282, 265, 3283, 265, 3284, 269, 3285, 269, 3286, 273, 3287, 273, 3288, 277,  // NOLINT
+  3289, 277, 3290, 281, 3291, 281, 3292, 285, 3293, 285, 3294, 289, 3295, 289, 3296, 293,  // NOLINT
+  3297, 293, 3298, 297, 3299, 297, 3307, 301, 3308, 301, 3309, 305, 3310, 305, 3314, 309,  // NOLINT
+  3315, 309, 1073745152, 313, 3365, 317, 3367, 321, 3373, 325 };  // NOLINT
+static const uint16_t kEcma262UnCanonicalizeMultiStrings1Size = 83;  // NOLINT
+static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings5[92] = {  // NOLINT
+  {{42560, 42561}}, {{42562, 42563}}, {{42564, 42565}}, {{42566, 42567}},  // NOLINT
+  {{42568, 42569}}, {{42570, 42571}}, {{42572, 42573}}, {{42574, 42575}},  // NOLINT
+  {{42576, 42577}}, {{42578, 42579}}, {{42580, 42581}}, {{42582, 42583}},  // NOLINT
+  {{42584, 42585}}, {{42586, 42587}}, {{42588, 42589}}, {{42590, 42591}},  // NOLINT
+  {{42592, 42593}}, {{42594, 42595}}, {{42596, 42597}}, {{42598, 42599}},  // NOLINT
+  {{42600, 42601}}, {{42602, 42603}}, {{42604, 42605}}, {{42624, 42625}},  // NOLINT
+  {{42626, 42627}}, {{42628, 42629}}, {{42630, 42631}}, {{42632, 42633}},  // NOLINT
+  {{42634, 42635}}, {{42636, 42637}}, {{42638, 42639}}, {{42640, 42641}},  // NOLINT
+  {{42642, 42643}}, {{42644, 42645}}, {{42646, 42647}}, {{42786, 42787}},  // NOLINT
+  {{42788, 42789}}, {{42790, 42791}}, {{42792, 42793}}, {{42794, 42795}},  // NOLINT
+  {{42796, 42797}}, {{42798, 42799}}, {{42802, 42803}}, {{42804, 42805}},  // NOLINT
+  {{42806, 42807}}, {{42808, 42809}}, {{42810, 42811}}, {{42812, 42813}},  // NOLINT
+  {{42814, 42815}}, {{42816, 42817}}, {{42818, 42819}}, {{42820, 42821}},  // NOLINT
+  {{42822, 42823}}, {{42824, 42825}}, {{42826, 42827}}, {{42828, 42829}},  // NOLINT
+  {{42830, 42831}}, {{42832, 42833}}, {{42834, 42835}}, {{42836, 42837}},  // NOLINT
+  {{42838, 42839}}, {{42840, 42841}}, {{42842, 42843}}, {{42844, 42845}},  // NOLINT
+  {{42846, 42847}}, {{42848, 42849}}, {{42850, 42851}}, {{42852, 42853}},  // NOLINT
+  {{42854, 42855}}, {{42856, 42857}}, {{42858, 42859}}, {{42860, 42861}},  // NOLINT
+  {{42862, 42863}}, {{42873, 42874}}, {{42875, 42876}}, {{7545, 42877}},  // NOLINT
+  {{42878, 42879}}, {{42880, 42881}}, {{42882, 42883}}, {{42884, 42885}},  // NOLINT
+  {{42886, 42887}}, {{42891, 42892}}, {{613, 42893}}, {{42896, 42897}},  // NOLINT
+  {{42898, 42899}}, {{42912, 42913}}, {{42914, 42915}}, {{42916, 42917}},  // NOLINT
+  {{42918, 42919}}, {{42920, 42921}}, {{614, 42922}}, {{kSentinel}} }; // NOLINT
+static const uint16_t kEcma262UnCanonicalizeTable5Size = 179;  // NOLINT
+static const int32_t kEcma262UnCanonicalizeTable5[358] = {
+  1600, 1, 1601, 1, 1602, 5, 1603, 5, 1604, 9, 1605, 9, 1606, 13, 1607, 13,  // NOLINT
+  1608, 17, 1609, 17, 1610, 21, 1611, 21, 1612, 25, 1613, 25, 1614, 29, 1615, 29,  // NOLINT
+  1616, 33, 1617, 33, 1618, 37, 1619, 37, 1620, 41, 1621, 41, 1622, 45, 1623, 45,  // NOLINT
+  1624, 49, 1625, 49, 1626, 53, 1627, 53, 1628, 57, 1629, 57, 1630, 61, 1631, 61,  // NOLINT
+  1632, 65, 1633, 65, 1634, 69, 1635, 69, 1636, 73, 1637, 73, 1638, 77, 1639, 77,  // NOLINT
+  1640, 81, 1641, 81, 1642, 85, 1643, 85, 1644, 89, 1645, 89, 1664, 93, 1665, 93,  // NOLINT
+  1666, 97, 1667, 97, 1668, 101, 1669, 101, 1670, 105, 1671, 105, 1672, 109, 1673, 109,  // NOLINT
+  1674, 113, 1675, 113, 1676, 117, 1677, 117, 1678, 121, 1679, 121, 1680, 125, 1681, 125,  // NOLINT
+  1682, 129, 1683, 129, 1684, 133, 1685, 133, 1686, 137, 1687, 137, 1826, 141, 1827, 141,  // NOLINT
+  1828, 145, 1829, 145, 1830, 149, 1831, 149, 1832, 153, 1833, 153, 1834, 157, 1835, 157,  // NOLINT
+  1836, 161, 1837, 161, 1838, 165, 1839, 165, 1842, 169, 1843, 169, 1844, 173, 1845, 173,  // NOLINT
+  1846, 177, 1847, 177, 1848, 181, 1849, 181, 1850, 185, 1851, 185, 1852, 189, 1853, 189,  // NOLINT
+  1854, 193, 1855, 193, 1856, 197, 1857, 197, 1858, 201, 1859, 201, 1860, 205, 1861, 205,  // NOLINT
+  1862, 209, 1863, 209, 1864, 213, 1865, 213, 1866, 217, 1867, 217, 1868, 221, 1869, 221,  // NOLINT
+  1870, 225, 1871, 225, 1872, 229, 1873, 229, 1874, 233, 1875, 233, 1876, 237, 1877, 237,  // NOLINT
+  1878, 241, 1879, 241, 1880, 245, 1881, 245, 1882, 249, 1883, 249, 1884, 253, 1885, 253,  // NOLINT
+  1886, 257, 1887, 257, 1888, 261, 1889, 261, 1890, 265, 1891, 265, 1892, 269, 1893, 269,  // NOLINT
+  1894, 273, 1895, 273, 1896, 277, 1897, 277, 1898, 281, 1899, 281, 1900, 285, 1901, 285,  // NOLINT
+  1902, 289, 1903, 289, 1913, 293, 1914, 293, 1915, 297, 1916, 297, 1917, 301, 1918, 305,  // NOLINT
+  1919, 305, 1920, 309, 1921, 309, 1922, 313, 1923, 313, 1924, 317, 1925, 317, 1926, 321,  // NOLINT
+  1927, 321, 1931, 325, 1932, 325, 1933, 329, 1936, 333, 1937, 333, 1938, 337, 1939, 337,  // NOLINT
+  1952, 341, 1953, 341, 1954, 345, 1955, 345, 1956, 349, 1957, 349, 1958, 353, 1959, 353,  // NOLINT
+  1960, 357, 1961, 357, 1962, 361 };  // NOLINT
+static const uint16_t kEcma262UnCanonicalizeMultiStrings5Size = 92;  // NOLINT
 static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings7[3] = {  // NOLINT
   {{65313, 65345}}, {{65338, 65370}}, {{kSentinel}} }; // NOLINT
 static const uint16_t kEcma262UnCanonicalizeTable7Size = 4;  // NOLINT
@@ -1502,6 +1735,13 @@
                                            n,
                                            result,
                                            allow_caching_ptr);
+    case 5: return LookupMapping<true>(kEcma262UnCanonicalizeTable5,
+                                           kEcma262UnCanonicalizeTable5Size,
+                                           kEcma262UnCanonicalizeMultiStrings5,
+                                           c,
+                                           n,
+                                           result,
+                                           allow_caching_ptr);
     case 7: return LookupMapping<true>(kEcma262UnCanonicalizeTable7,
                                            kEcma262UnCanonicalizeTable7Size,
                                            kEcma262UnCanonicalizeMultiStrings7,
@@ -1577,9 +1817,11 @@
 int UnicodeData::GetByteCount() {
   return kUppercaseTable0Size * sizeof(int32_t)  // NOLINT
       + kUppercaseTable1Size * sizeof(int32_t)  // NOLINT
+      + kUppercaseTable5Size * sizeof(int32_t)  // NOLINT
       + kUppercaseTable7Size * sizeof(int32_t)  // NOLINT
       + kLowercaseTable0Size * sizeof(int32_t)  // NOLINT
       + kLowercaseTable1Size * sizeof(int32_t)  // NOLINT
+      + kLowercaseTable5Size * sizeof(int32_t)  // NOLINT
       + kLowercaseTable7Size * sizeof(int32_t)  // NOLINT
       + kLetterTable0Size * sizeof(int32_t)  // NOLINT
       + kLetterTable1Size * sizeof(int32_t)  // NOLINT
@@ -1592,6 +1834,7 @@
       + kSpaceTable0Size * sizeof(int32_t)  // NOLINT
       + kSpaceTable1Size * sizeof(int32_t)  // NOLINT
       + kNumberTable0Size * sizeof(int32_t)  // NOLINT
+      + kNumberTable5Size * sizeof(int32_t)  // NOLINT
       + kNumberTable7Size * sizeof(int32_t)  // NOLINT
       + kWhiteSpaceTable0Size * sizeof(int32_t)  // NOLINT
       + kWhiteSpaceTable1Size * sizeof(int32_t)  // NOLINT
@@ -1606,15 +1849,19 @@
       + kConnectorPunctuationTable7Size * sizeof(int32_t)  // NOLINT
       + kToLowercaseMultiStrings0Size * sizeof(MultiCharacterSpecialCase<2>)  // NOLINT
       + kToLowercaseMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
+      + kToLowercaseMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kToLowercaseMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kToUppercaseMultiStrings0Size * sizeof(MultiCharacterSpecialCase<3>)  // NOLINT
       + kToUppercaseMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
+      + kToUppercaseMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kToUppercaseMultiStrings7Size * sizeof(MultiCharacterSpecialCase<3>)  // NOLINT
       + kEcma262CanonicalizeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kEcma262CanonicalizeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
+      + kEcma262CanonicalizeMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kEcma262CanonicalizeMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kEcma262UnCanonicalizeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<4>)  // NOLINT
       + kEcma262UnCanonicalizeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<2>)  // NOLINT
+      + kEcma262UnCanonicalizeMultiStrings5Size * sizeof(MultiCharacterSpecialCase<2>)  // NOLINT
       + kEcma262UnCanonicalizeMultiStrings7Size * sizeof(MultiCharacterSpecialCase<2>)  // NOLINT
       + kCanonicalizationRangeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kCanonicalizationRangeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
diff --git a/src/unicode.h b/src/unicode.h
index 39fc349..94ab1b4 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,7 +44,7 @@
  * The max length of the result of converting the case of a single
  * character.
  */
-static const int kMaxMappingSize = 4;
+const int kMaxMappingSize = 4;
 
 template <class T, int size = 256>
 class Predicate {
@@ -100,7 +100,7 @@
   static const uchar kMaxCodePoint;
 };
 
-// --- U t f   8 ---
+// --- U t f   8   a n d   16 ---
 
 template <typename Data>
 class Buffer {
@@ -114,10 +114,46 @@
   unsigned length_;
 };
 
+
+class Utf16 {
+ public:
+  static inline bool IsLeadSurrogate(int code) {
+    if (code == kNoPreviousCharacter) return false;
+    return (code & 0xfc00) == 0xd800;
+  }
+  static inline bool IsTrailSurrogate(int code) {
+    if (code == kNoPreviousCharacter) return false;
+    return (code & 0xfc00) == 0xdc00;
+  }
+
+  static inline int CombineSurrogatePair(uchar lead, uchar trail) {
+    return 0x10000 + ((lead & 0x3ff) << 10) + (trail & 0x3ff);
+  }
+  static const int kNoPreviousCharacter = -1;
+  static const uchar kMaxNonSurrogateCharCode = 0xffff;
+  // Encoding a single UTF-16 code unit will produce 1, 2 or 3 bytes
+  // of UTF-8 data.  The special case where the unit is a surrogate
+  // trail produces 1 byte net, because the encoding of the pair is
+  // 4 bytes and the 3 bytes that were used to encode the lead surrogate
+  // can be reclaimed.
+  static const int kMaxExtraUtf8BytesForOneUtf16CodeUnit = 3;
+  // One UTF-16 surrogate is endoded (illegally) as 3 UTF-8 bytes.
+  // The illegality stems from the surrogate not being part of a pair.
+  static const int kUtf8BytesToCodeASurrogate = 3;
+  static inline uchar LeadSurrogate(int char_code) {
+    return 0xd800 + (((char_code - 0x10000) >> 10) & 0x3ff);
+  }
+  static inline uchar TrailSurrogate(int char_code) {
+    return 0xdc00 + (char_code & 0x3ff);
+  }
+};
+
+
 class Utf8 {
  public:
-  static inline uchar Length(uchar chr);
-  static inline unsigned Encode(char* out, uchar c);
+  static inline uchar Length(uchar chr, int previous);
+  static inline unsigned Encode(
+      char* out, uchar c, int previous);
   static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
       unsigned capacity, unsigned* chars_read, unsigned* offset);
   static uchar CalculateValue(const byte* str,
@@ -130,6 +166,11 @@
   static const unsigned kMaxThreeByteChar = 0xffff;
   static const unsigned kMaxFourByteChar  = 0x1fffff;
 
+  // A single surrogate is coded as a 3 byte UTF-8 sequence, but two together
+  // that match are coded as a 4 byte UTF-8 sequence.
+  static const unsigned kBytesSavedByCombiningSurrogates = 2;
+  static const unsigned kSizeOfUnmatchedSurrogate = 3;
+
  private:
   template <unsigned s> friend class Utf8InputBuffer;
   friend class Test;
@@ -147,6 +188,7 @@
   // Note that default implementation is not efficient.
   virtual void Seek(unsigned);
   unsigned Length();
+  unsigned Utf16Length();
   virtual ~CharacterStream() { }
   static inline bool EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
       unsigned& offset);
@@ -156,6 +198,7 @@
       unsigned capacity, unsigned& offset);
   static inline uchar DecodeCharacter(const byte* buffer, unsigned* offset);
   virtual void Rewind() = 0;
+
  protected:
   virtual void FillBuffer() = 0;
   // The number of characters left in the current buffer
diff --git a/src/uri.js b/src/uri.js
index c910d75..b195f3d 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -111,47 +111,59 @@
     var o1 = octets[1];
     if (o0 < 0xe0) {
       var a = o0 & 0x1f;
-      if ((o1 < 0x80) || (o1 > 0xbf))
+      if ((o1 < 0x80) || (o1 > 0xbf)) {
         throw new $URIError("URI malformed");
+      }
       var b = o1 & 0x3f;
       value = (a << 6) + b;
-      if (value < 0x80 || value > 0x7ff)
+      if (value < 0x80 || value > 0x7ff) {
         throw new $URIError("URI malformed");
+      }
     } else {
       var o2 = octets[2];
       if (o0 < 0xf0) {
         var a = o0 & 0x0f;
-        if ((o1 < 0x80) || (o1 > 0xbf))
+        if ((o1 < 0x80) || (o1 > 0xbf)) {
           throw new $URIError("URI malformed");
+        }
         var b = o1 & 0x3f;
-        if ((o2 < 0x80) || (o2 > 0xbf))
+        if ((o2 < 0x80) || (o2 > 0xbf)) {
           throw new $URIError("URI malformed");
+        }
         var c = o2 & 0x3f;
         value = (a << 12) + (b << 6) + c;
-        if ((value < 0x800) || (value > 0xffff))
+        if ((value < 0x800) || (value > 0xffff)) {
           throw new $URIError("URI malformed");
+        }
       } else {
         var o3 = octets[3];
         if (o0 < 0xf8) {
           var a = (o0 & 0x07);
-          if ((o1 < 0x80) || (o1 > 0xbf))
+          if ((o1 < 0x80) || (o1 > 0xbf)) {
             throw new $URIError("URI malformed");
+          }
           var b = (o1 & 0x3f);
-          if ((o2 < 0x80) || (o2 > 0xbf))
+          if ((o2 < 0x80) || (o2 > 0xbf)) {
             throw new $URIError("URI malformed");
+          }
           var c = (o2 & 0x3f);
-          if ((o3 < 0x80) || (o3 > 0xbf))
+          if ((o3 < 0x80) || (o3 > 0xbf)) {
             throw new $URIError("URI malformed");
+          }
           var d = (o3 & 0x3f);
           value = (a << 18) + (b << 12) + (c << 6) + d;
-          if ((value < 0x10000) || (value > 0x10ffff))
+          if ((value < 0x10000) || (value > 0x10ffff)) {
             throw new $URIError("URI malformed");
+          }
         } else {
           throw new $URIError("URI malformed");
         }
       }
     }
   }
+  if (0xD800 <= value && value <= 0xDFFF) {
+    throw new $URIError("URI malformed");
+  }
   if (value < 0x10000) {
     result[index++] = value;
     return index;
@@ -207,14 +219,15 @@
       var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
       if (cc >> 7) {
         var n = 0;
-        while (((cc << ++n) & 0x80) != 0) ;
+        while (((cc << ++n) & 0x80) != 0) { }
         if (n == 1 || n > 4) throw new $URIError("URI malformed");
         var octets = new $Array(n);
         octets[0] = cc;
         if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
         for (var i = 1; i < n; i++) {
           if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
-          octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
+          octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
+                                            uri.charCodeAt(++k));
         }
         index = URIDecodeOctets(octets, result, index);
       } else {
@@ -237,7 +250,7 @@
 
 // ECMA-262 - 15.1.3.1.
 function URIDecode(uri) {
-  function reservedPredicate(cc) {
+  var reservedPredicate = function(cc) {
     // #$
     if (35 <= cc && cc <= 36) return true;
     // &
@@ -262,7 +275,7 @@
 
 // ECMA-262 - 15.1.3.2.
 function URIDecodeComponent(component) {
-  function reservedPredicate(cc) { return false; };
+  var reservedPredicate = function(cc) { return false; };
   var string = ToString(component);
   return Decode(string, reservedPredicate);
 }
@@ -283,7 +296,7 @@
 
 // ECMA-262 - 15.1.3.3.
 function URIEncode(uri) {
-  function unescapePredicate(cc) {
+  var unescapePredicate = function(cc) {
     if (isAlphaNumeric(cc)) return true;
     // !
     if (cc == 33) return true;
@@ -312,7 +325,7 @@
 
 // ECMA-262 - 15.1.3.4
 function URIEncodeComponent(component) {
-  function unescapePredicate(cc) {
+  var unescapePredicate = function(cc) {
     if (isAlphaNumeric(cc)) return true;
     // !
     if (cc == 33) return true;
@@ -366,7 +379,9 @@
 function IsValidHex(s) {
   for (var i = 0; i < s.length; ++i) {
     var cc = s.charCodeAt(i);
-    if ((48 <= cc && cc <= 57) || (65 <= cc && cc <= 70) || (97 <= cc && cc <= 102)) {
+    if ((48 <= cc && cc <= 57) ||
+        (65 <= cc && cc <= 70) ||
+        (97 <= cc && cc <= 102)) {
       // '0'..'9', 'A'..'F' and 'a' .. 'f'.
     } else {
       return false;
diff --git a/src/utils.h b/src/utils.h
index cf7819e..1d40c98 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,13 +47,13 @@
 // Returns true iff x is a power of 2 (or zero). Cannot be used with the
 // maximally negative value of the type T (the -1 overflows).
 template <typename T>
-static inline bool IsPowerOf2(T x) {
+inline bool IsPowerOf2(T x) {
   return IS_POWER_OF_TWO(x);
 }
 
 
 // X must be a power of 2.  Returns the number of trailing zeros.
-static inline int WhichPowerOf2(uint32_t x) {
+inline int WhichPowerOf2(uint32_t x) {
   ASSERT(IsPowerOf2(x));
   ASSERT(x != 0);
   int bits = 0;
@@ -88,7 +88,7 @@
 // The C++ standard leaves the semantics of '>>' undefined for
 // negative signed operands. Most implementations do the right thing,
 // though.
-static inline int ArithmeticShiftRight(int x, int s) {
+inline int ArithmeticShiftRight(int x, int s) {
   return x >> s;
 }
 
@@ -97,7 +97,7 @@
 // This allows conversion of Addresses and integral types into
 // 0-relative int offsets.
 template <typename T>
-static inline intptr_t OffsetFrom(T x) {
+inline intptr_t OffsetFrom(T x) {
   return x - static_cast<T>(0);
 }
 
@@ -106,14 +106,14 @@
 // This allows conversion of 0-relative int offsets into Addresses and
 // integral types.
 template <typename T>
-static inline T AddressFrom(intptr_t x) {
+inline T AddressFrom(intptr_t x) {
   return static_cast<T>(static_cast<T>(0) + x);
 }
 
 
 // Return the largest multiple of m which is <= x.
 template <typename T>
-static inline T RoundDown(T x, int m) {
+inline T RoundDown(T x, intptr_t m) {
   ASSERT(IsPowerOf2(m));
   return AddressFrom<T>(OffsetFrom(x) & -m);
 }
@@ -121,13 +121,13 @@
 
 // Return the smallest multiple of m which is >= x.
 template <typename T>
-static inline T RoundUp(T x, int m) {
-  return RoundDown(x + m - 1, m);
+inline T RoundUp(T x, intptr_t m) {
+  return RoundDown<T>(static_cast<T>(x + m - 1), m);
 }
 
 
 template <typename T>
-static int Compare(const T& a, const T& b) {
+int Compare(const T& a, const T& b) {
   if (a == b)
     return 0;
   else if (a < b)
@@ -138,16 +138,26 @@
 
 
 template <typename T>
-static int PointerValueCompare(const T* a, const T* b) {
+int PointerValueCompare(const T* a, const T* b) {
   return Compare<T>(*a, *b);
 }
 
 
+// Compare function to compare the object pointer value of two
+// handlified objects. The handles are passed as pointers to the
+// handles.
+template<typename T> class Handle;  // Forward declaration.
+template <typename T>
+int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
+  return Compare<T*>(*(*a), *(*b));
+}
+
+
 // Returns the smallest power of two which is >= x. If you pass in a
 // number that is already a power of two, it is returned as is.
 // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
 // figure 3-3, page 48, where the function is called clp2.
-static inline uint32_t RoundUpToPowerOf2(uint32_t x) {
+inline uint32_t RoundUpToPowerOf2(uint32_t x) {
   ASSERT(x <= 0x80000000u);
   x = x - 1;
   x = x | (x >> 1);
@@ -159,18 +169,23 @@
 }
 
 
+inline uint32_t RoundDownToPowerOf2(uint32_t x) {
+  uint32_t rounded_up = RoundUpToPowerOf2(x);
+  if (rounded_up > x) return rounded_up >> 1;
+  return rounded_up;
+}
 
-template <typename T>
-static inline bool IsAligned(T value, T alignment) {
-  ASSERT(IsPowerOf2(alignment));
+
+template <typename T, typename U>
+inline bool IsAligned(T value, U alignment) {
   return (value & (alignment - 1)) == 0;
 }
 
 
 // Returns true if (addr + offset) is aligned.
-static inline bool IsAddressAligned(Address addr,
-                                    intptr_t alignment,
-                                    int offset) {
+inline bool IsAddressAligned(Address addr,
+                             intptr_t alignment,
+                             int offset = 0) {
   intptr_t offs = OffsetFrom(addr + offset);
   return IsAligned(offs, alignment);
 }
@@ -178,14 +193,14 @@
 
 // Returns the maximum of the two parameters.
 template <typename T>
-static T Max(T a, T b) {
+T Max(T a, T b) {
   return a < b ? b : a;
 }
 
 
 // Returns the minimum of the two parameters.
 template <typename T>
-static T Min(T a, T b) {
+T Min(T a, T b) {
   return a < b ? a : b;
 }
 
@@ -241,7 +256,7 @@
 
 // Thomas Wang, Integer Hash Functions.
 // http://www.concentric.net/~Ttwang/tech/inthash.htm
-static inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
+inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
   uint32_t hash = key;
   hash = hash ^ seed;
   hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
@@ -254,7 +269,19 @@
 }
 
 
-static inline uint32_t ComputePointerHash(void* ptr) {
+inline uint32_t ComputeLongHash(uint64_t key) {
+  uint64_t hash = key;
+  hash = ~hash + (hash << 18);  // hash = (hash << 18) - hash - 1;
+  hash = hash ^ (hash >> 31);
+  hash = hash * 21;  // hash = (hash + (hash << 2)) + (hash << 4);
+  hash = hash ^ (hash >> 11);
+  hash = hash + (hash << 6);
+  hash = hash ^ (hash >> 22);
+  return (uint32_t) hash;
+}
+
+
+inline uint32_t ComputePointerHash(void* ptr) {
   return ComputeIntegerHash(
       static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)),
       v8::internal::kZeroHashSeed);
@@ -711,7 +738,7 @@
 
 // Compare ASCII/16bit chars to ASCII/16bit chars.
 template <typename lchar, typename rchar>
-static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
   const lchar* limit = lhs + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*lhs) == sizeof(*rhs)) {
@@ -738,7 +765,7 @@
 
 
 // Calculate 10^exponent.
-static inline int TenToThe(int exponent) {
+inline int TenToThe(int exponent) {
   ASSERT(exponent <= 9);
   ASSERT(exponent >= 1);
   int answer = 10;
@@ -904,9 +931,17 @@
   explicit EnumSet(T bits = 0) : bits_(bits) {}
   bool IsEmpty() const { return bits_ == 0; }
   bool Contains(E element) const { return (bits_ & Mask(element)) != 0; }
+  bool ContainsAnyOf(const EnumSet& set) const {
+    return (bits_ & set.bits_) != 0;
+  }
   void Add(E element) { bits_ |= Mask(element); }
+  void Add(const EnumSet& set) { bits_ |= set.bits_; }
   void Remove(E element) { bits_ &= ~Mask(element); }
+  void Remove(const EnumSet& set) { bits_ &= ~set.bits_; }
+  void RemoveAll() { bits_ = 0; }
+  void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
   T ToIntegral() const { return bits_; }
+  bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
 
  private:
   T Mask(E element) const {
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 2de8303..6db9c77 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -107,7 +107,10 @@
   SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot)      \
   /* Number of code objects found from pc. */                         \
   SC(pc_to_code, V8.PcToCode)                                         \
-  SC(pc_to_code_cached, V8.PcToCodeCached)
+  SC(pc_to_code_cached, V8.PcToCodeCached)                            \
+  /* The store-buffer implementation of the write barrier. */         \
+  SC(store_buffer_compactions, V8.StoreBufferCompactions)             \
+  SC(store_buffer_overflows, V8.StoreBufferOverflows)
 
 
 #define STATS_COUNTER_LIST_2(SC)                                      \
@@ -126,10 +129,6 @@
      V8.GCCompactorCausedByWeakHandles)                               \
   SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                   \
   SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)         \
-  SC(map_to_fast_elements, V8.MapToFastElements)                      \
-  SC(map_to_fast_double_elements, V8.MapToFastDoubleElements)         \
-  SC(map_to_slow_elements, V8.MapToSlowElements)                      \
-  SC(map_to_external_array_elements, V8.MapToExternalArrayElements)   \
   /* How is the generic keyed-load stub used? */                      \
   SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                  \
   SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)            \
@@ -199,6 +198,9 @@
   SC(constructed_objects_stub, V8.ConstructedObjectsStub)             \
   SC(negative_lookups, V8.NegativeLookups)                            \
   SC(negative_lookups_miss, V8.NegativeLookupsMiss)                   \
+  SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes)    \
+  SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses)    \
+  SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates)  \
   SC(array_function_runtime, V8.ArrayFunctionRuntime)                 \
   SC(array_function_native, V8.ArrayFunctionNative)                   \
   SC(for_in, V8.ForIn)                                                \
diff --git a/src/v8.cc b/src/v8.cc
index 1e9b5dc..1157c9d 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,26 +36,32 @@
 #include "hydrogen.h"
 #include "lithium-allocator.h"
 #include "log.h"
+#include "once.h"
+#include "platform.h"
 #include "runtime-profiler.h"
 #include "serialize.h"
+#include "store-buffer.h"
 
 namespace v8 {
 namespace internal {
 
-static Mutex* init_once_mutex = OS::CreateMutex();
-static bool init_once_called = false;
+V8_DECLARE_ONCE(init_once);
 
 bool V8::is_running_ = false;
-bool V8::has_been_setup_ = false;
+bool V8::has_been_set_up_ = false;
 bool V8::has_been_disposed_ = false;
 bool V8::has_fatal_error_ = false;
 bool V8::use_crankshaft_ = true;
+List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
 
-static Mutex* entropy_mutex = OS::CreateMutex();
+static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
+
 static EntropySource entropy_source;
 
 
 bool V8::Initialize(Deserializer* des) {
+  FlagList::EnforceFlagImplications();
+
   InitializeOncePerProcess();
 
   // The current thread may not yet had entered an isolate to run.
@@ -78,7 +84,7 @@
   if (isolate->IsInitialized()) return true;
 
   is_running_ = true;
-  has_been_setup_ = true;
+  has_been_set_up_ = true;
   has_fatal_error_ = false;
   has_been_disposed_ = false;
 
@@ -96,11 +102,14 @@
   Isolate* isolate = Isolate::Current();
   ASSERT(isolate->IsDefaultIsolate());
 
-  if (!has_been_setup_ || has_been_disposed_) return;
+  if (!has_been_set_up_ || has_been_disposed_) return;
   isolate->TearDown();
 
   is_running_ = false;
   has_been_disposed_ = true;
+
+  delete call_completed_callbacks_;
+  call_completed_callbacks_ = NULL;
 }
 
 
@@ -110,7 +119,7 @@
       state[i] = FLAG_random_seed;
     } else if (entropy_source != NULL) {
       uint32_t val;
-      ScopedLock lock(entropy_mutex);
+      ScopedLock lock(entropy_mutex.Pointer());
       entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
       state[i] = val;
     } else {
@@ -139,10 +148,17 @@
 }
 
 
+void V8::SetReturnAddressLocationResolver(
+      ReturnAddressLocationResolver resolver) {
+  StackFrame::SetReturnAddressLocationResolver(resolver);
+}
+
+
 // Used by JavaScript APIs
-uint32_t V8::Random(Isolate* isolate) {
-  ASSERT(isolate == Isolate::Current());
-  return random_base(isolate->random_seed());
+uint32_t V8::Random(Context* context) {
+  ASSERT(context->IsGlobalContext());
+  ByteArray* seed = context->random_seed();
+  return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress()));
 }
 
 
@@ -155,13 +171,48 @@
 }
 
 
-bool V8::IdleNotification() {
+bool V8::IdleNotification(int hint) {
   // Returning true tells the caller that there is no need to call
   // IdleNotification again.
   if (!FLAG_use_idle_notification) return true;
 
   // Tell the heap that it may want to adjust.
-  return HEAP->IdleNotification();
+  return HEAP->IdleNotification(hint);
+}
+
+
+void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
+  if (call_completed_callbacks_ == NULL) {  // Lazy init.
+    call_completed_callbacks_ = new List<CallCompletedCallback>();
+  }
+  for (int i = 0; i < call_completed_callbacks_->length(); i++) {
+    if (callback == call_completed_callbacks_->at(i)) return;
+  }
+  call_completed_callbacks_->Add(callback);
+}
+
+
+void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
+  if (call_completed_callbacks_ == NULL) return;
+  for (int i = 0; i < call_completed_callbacks_->length(); i++) {
+    if (callback == call_completed_callbacks_->at(i)) {
+      call_completed_callbacks_->Remove(i);
+    }
+  }
+}
+
+
+void V8::FireCallCompletedCallback(Isolate* isolate) {
+  if (call_completed_callbacks_ == NULL) return;
+  HandleScopeImplementer* handle_scope_implementer =
+      isolate->handle_scope_implementer();
+  if (!handle_scope_implementer->CallDepthIsZero()) return;
+  // Fire callbacks.  Increase call depth to prevent recursive callbacks.
+  handle_scope_implementer->IncrementCallDepth();
+  for (int i = 0; i < call_completed_callbacks_->length(); i++) {
+    call_completed_callbacks_->at(i)();
+  }
+  handle_scope_implementer->DecrementCallDepth();
 }
 
 
@@ -172,31 +223,25 @@
 } double_int_union;
 
 
-Object* V8::FillHeapNumberWithRandom(Object* heap_number, Isolate* isolate) {
-  uint64_t random_bits = Random(isolate);
-  // Make a double* from address (heap_number + sizeof(double)).
-  double_int_union* r = reinterpret_cast<double_int_union*>(
-      reinterpret_cast<char*>(heap_number) +
-      HeapNumber::kValueOffset - kHeapObjectTag);
+Object* V8::FillHeapNumberWithRandom(Object* heap_number,
+                                     Context* context) {
+  double_int_union r;
+  uint64_t random_bits = Random(context);
   // Convert 32 random bits to 0.(32 random bits) in a double
   // by computing:
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  const double binary_million = 1048576.0;
-  r->double_value = binary_million;
-  r->uint64_t_value |=  random_bits;
-  r->double_value -= binary_million;
+  static const double binary_million = 1048576.0;
+  r.double_value = binary_million;
+  r.uint64_t_value |= random_bits;
+  r.double_value -= binary_million;
 
+  HeapNumber::cast(heap_number)->set_value(r.double_value);
   return heap_number;
 }
 
-
-void V8::InitializeOncePerProcess() {
-  ScopedLock lock(init_once_mutex);
-  if (init_once_called) return;
-  init_once_called = true;
-
-  // Setup the platform OS support.
-  OS::Setup();
+void V8::InitializeOncePerProcessImpl() {
+  // Set up the platform OS support.
+  OS::SetUp();
 
   use_crankshaft_ = FLAG_crankshaft;
 
@@ -204,17 +249,28 @@
     use_crankshaft_ = false;
   }
 
-  CPU::Setup();
+  CPU::SetUp();
   if (!CPU::SupportsCrankshaft()) {
     use_crankshaft_ = false;
   }
 
+  OS::PostSetUp();
+
   RuntimeProfiler::GlobalSetup();
 
-  // Peephole optimization might interfere with deoptimization.
-  FLAG_peephole_optimization = !use_crankshaft_;
-
   ElementsAccessor::InitializeOncePerProcess();
+
+  if (FLAG_stress_compaction) {
+    FLAG_force_marking_deque_overflows = true;
+    FLAG_gc_global = true;
+    FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
+  }
+
+  LOperand::SetUpCaches();
+}
+
+void V8::InitializeOncePerProcess() {
+  CallOnce(&init_once, &InitializeOncePerProcessImpl);
 }
 
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index e565ca5..59ce602 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -60,10 +60,11 @@
 #include "objects-inl.h"
 #include "spaces-inl.h"
 #include "heap-inl.h"
+#include "incremental-marking-inl.h"
+#include "mark-compact-inl.h"
 #include "log-inl.h"
 #include "cpu-profiler-inl.h"
 #include "handles-inl.h"
-#include "isolate-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -94,26 +95,34 @@
   // Allows an entropy source to be provided for use in random number
   // generation.
   static void SetEntropySource(EntropySource source);
+  // Support for return-address rewriting profilers.
+  static void SetReturnAddressLocationResolver(
+      ReturnAddressLocationResolver resolver);
   // Random number generation support. Not cryptographically safe.
-  static uint32_t Random(Isolate* isolate);
+  static uint32_t Random(Context* context);
   // We use random numbers internally in memory allocation and in the
   // compilers for security. In order to prevent information leaks we
   // use a separate random state for internal random number
   // generation.
   static uint32_t RandomPrivate(Isolate* isolate);
   static Object* FillHeapNumberWithRandom(Object* heap_number,
-                                          Isolate* isolate);
+                                          Context* context);
 
   // Idle notification directly from the API.
-  static bool IdleNotification();
+  static bool IdleNotification(int hint);
+
+  static void AddCallCompletedCallback(CallCompletedCallback callback);
+  static void RemoveCallCompletedCallback(CallCompletedCallback callback);
+  static void FireCallCompletedCallback(Isolate* isolate);
 
  private:
+  static void InitializeOncePerProcessImpl();
   static void InitializeOncePerProcess();
 
   // True if engine is currently running
   static bool is_running_;
   // True if V8 has ever been run
-  static bool has_been_setup_;
+  static bool has_been_set_up_;
   // True if error has been signaled for current engine
   // (reset to false if engine is restarted)
   static bool has_fatal_error_;
@@ -122,8 +131,19 @@
   static bool has_been_disposed_;
   // True if we are using the crankshaft optimizing compiler.
   static bool use_crankshaft_;
+  // List of callbacks when a Call completes.
+  static List<CallCompletedCallback>* call_completed_callbacks_;
 };
 
+
+// JavaScript defines two kinds of 'nil'.
+enum NilValue { kNullValue, kUndefinedValue };
+
+
+// JavaScript defines two kinds of equality.
+enum EqualityKind { kStrictEquality, kNonStrictEquality };
+
+
 } }  // namespace v8::internal
 
 namespace i = v8::internal;
diff --git a/src/v8conversions.h b/src/v8conversions.h
index 1840e3a..0147d8c 100644
--- a/src/v8conversions.h
+++ b/src/v8conversions.h
@@ -34,13 +34,13 @@
 namespace internal {
 
 // Convert from Number object to C integer.
-static inline int32_t NumberToInt32(Object* number) {
+inline int32_t NumberToInt32(Object* number) {
   if (number->IsSmi()) return Smi::cast(number)->value();
   return DoubleToInt32(number->Number());
 }
 
 
-static inline uint32_t NumberToUint32(Object* number) {
+inline uint32_t NumberToUint32(Object* number) {
   if (number->IsSmi()) return Smi::cast(number)->value();
   return DoubleToUint32(number->Number());
 }
diff --git a/src/v8globals.h b/src/v8globals.h
index bf843e5..bfc5e23 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_V8GLOBALS_H_
 
 #include "globals.h"
+#include "checks.h"
 
 namespace v8 {
 namespace internal {
@@ -79,18 +80,20 @@
     reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
 const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
 const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
+const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
 #else
 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
 const uint32_t kSlotsZapValue = 0xbeefdeef;
 const uint32_t kDebugZapValue = 0xbadbaddb;
+const uint32_t kFreeListZapValue = 0xfeed1eaf;
 #endif
 
 
-// Number of bits to represent the page size for paged spaces. The value of 13
-// gives 8K bytes per page.
-const int kPageSizeBits = 13;
+// Number of bits to represent the page size for paged spaces. The value of 20
+// gives 1Mb bytes per page.
+const int kPageSizeBits = 20;
 
 // On Intel architecture, cache line size is 64 bytes.
 // On ARM it may be less (32 bytes), but as far this constant is
@@ -98,24 +101,18 @@
 const int kProcessorCacheLineSize = 64;
 
 // Constants relevant to double precision floating point numbers.
-
-// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
-// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
 const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
 
 
 // -----------------------------------------------------------------------------
 // Forward declarations for frequently used classes
-// (sorted alphabetically)
 
 class AccessorInfo;
 class Allocation;
 class Arguments;
 class Assembler;
 class AssertNoAllocation;
-class BreakableStatement;
 class Code;
 class CodeGenerator;
 class CodeStub;
@@ -125,12 +122,10 @@
 class DebugInfo;
 class Descriptor;
 class DescriptorArray;
-class Expression;
 class ExternalReference;
 class FixedArray;
-class FunctionEntry;
-class FunctionLiteral;
 class FunctionTemplateInfo;
+class MemoryChunk;
 class SeededNumberDictionary;
 class UnseededNumberDictionary;
 class StringDictionary;
@@ -139,7 +134,6 @@
 class HeapObject;
 class IC;
 class InterceptorInfo;
-class IterationStatement;
 class JSArray;
 class JSFunction;
 class JSObject;
@@ -150,32 +144,19 @@
 class MapSpace;
 class MarkCompactCollector;
 class NewSpace;
-class NodeVisitor;
 class Object;
 class MaybeObject;
 class OldSpace;
-class Property;
 class Foreign;
-class RegExpNode;
-struct RegExpCompileData;
-class RegExpTree;
-class RegExpCompiler;
-class RegExpVisitor;
 class Scope;
-template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
-class SerializedScopeInfo;
+class ScopeInfo;
 class Script;
-class Slot;
 class Smi;
 template <typename Config, class Allocator = FreeStoreAllocationPolicy>
     class SplayTree;
-class Statement;
 class String;
 class Struct;
-class SwitchStatement;
-class AstVisitor;
 class Variable;
-class VariableProxy;
 class RelocInfo;
 class Deserializer;
 class MessageLocation;
@@ -255,12 +236,6 @@
 };
 
 
-// Callback function on object slots, used for iterating heap object slots in
-// HeapObjects, global pointers to heap objects, etc. The callback allows the
-// callback function to change the value of the slot.
-typedef void (*ObjectSlotCallback)(HeapObject** pointer);
-
-
 // Callback function used for iterating objects in heap spaces,
 // for example, scanning heap objects.
 typedef int (*HeapObjectCallback)(HeapObject* obj);
@@ -307,7 +282,9 @@
   NO_CALL_FUNCTION_FLAGS = 0,
   // Receiver might implicitly be the global objects. If it is, the
   // hole is passed to the call function stub.
-  RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0
+  RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0,
+  // The call target is cached in the instruction stream.
+  RECORD_CALL_TARGET = 1 << 1
 };
 
 
@@ -317,28 +294,17 @@
 };
 
 
-// Type of properties.
-// Order of properties is significant.
-// Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in mirror-debugger.js.
-enum PropertyType {
-  NORMAL                    = 0,  // only in slow mode
-  FIELD                     = 1,  // only in fast mode
-  CONSTANT_FUNCTION         = 2,  // only in fast mode
-  CALLBACKS                 = 3,
-  HANDLER                   = 4,  // only in lookup results, not in descriptors
-  INTERCEPTOR               = 5,  // only in lookup results, not in descriptors
-  MAP_TRANSITION            = 6,  // only in fast mode
-  ELEMENTS_TRANSITION       = 7,
-  CONSTANT_TRANSITION       = 8,  // only in fast mode
-  NULL_DESCRIPTOR           = 9,  // only in fast mode
-  // All properties before MAP_TRANSITION are real.
-  FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
-  // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
-  // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
-  // nonexistent properties.
-  NONEXISTENT = NULL_DESCRIPTOR
-};
+// The Store Buffer (GC).
+typedef enum {
+  kStoreBufferFullEvent,
+  kStoreBufferStartScanningPagesEvent,
+  kStoreBufferScanningPageEvent
+} StoreBufferEvent;
+
+
+typedef void (*StoreBufferCallback)(Heap* heap,
+                                    MemoryChunk* page,
+                                    StoreBufferEvent event);
 
 
 // Whether to remove map transitions and constant transitions from a
@@ -475,21 +441,11 @@
                   SAHF = 0,    // x86
                   FPU = 1};    // MIPS
 
-// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-enum StrictModeFlag {
-  kNonStrictMode,
-  kStrictMode,
-  // This value is never used, but is needed to prevent GCC 4.5 from failing
-  // to compile when we assert that a flag is either kNonStrictMode or
-  // kStrictMode.
-  kInvalidStrictFlag
-};
-
 
 // Used to specify if a macro instruction must perform a smi check on tagged
 // values.
 enum SmiCheckType {
-  DONT_DO_SMI_CHECK = 0,
+  DONT_DO_SMI_CHECK,
   DO_SMI_CHECK
 };
 
@@ -497,20 +453,106 @@
 // Used to specify whether a receiver is implicitly or explicitly
 // provided to a call.
 enum CallKind {
-  CALL_AS_METHOD = 0,
+  CALL_AS_METHOD,
   CALL_AS_FUNCTION
 };
 
 
-static const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
-static const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
-static const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
+enum ScopeType {
+  EVAL_SCOPE,      // The top-level scope for an eval source.
+  FUNCTION_SCOPE,  // The top-level scope for a function.
+  MODULE_SCOPE,    // The scope introduced by a module literal
+  GLOBAL_SCOPE,    // The top-level scope for a program or a top-level eval.
+  CATCH_SCOPE,     // The scope introduced by catch.
+  BLOCK_SCOPE,     // The scope introduced by a new block.
+  WITH_SCOPE       // The scope introduced by with.
+};
+
+
+const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
+const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
+const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
 
 const uint64_t kHoleNanInt64 =
     (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
 const uint64_t kLastNonNaNInt64 =
     (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
 
+
+enum VariableMode {
+  // User declared variables:
+  VAR,             // declared via 'var', and 'function' declarations
+
+  CONST,           // declared via 'const' declarations
+
+  CONST_HARMONY,   // declared via 'const' declarations in harmony mode
+
+  LET,             // declared via 'let' declarations
+
+  // Variables introduced by the compiler:
+  DYNAMIC,         // always require dynamic lookup (we don't know
+                   // the declaration)
+
+  DYNAMIC_GLOBAL,  // requires dynamic lookup, but we know that the
+                   // variable is global unless it has been shadowed
+                   // by an eval-introduced variable
+
+  DYNAMIC_LOCAL,   // requires dynamic lookup, but we know that the
+                   // variable is local and where it is unless it
+                   // has been shadowed by an eval-introduced
+                   // variable
+
+  INTERNAL,        // like VAR, but not user-visible (may or may not
+                   // be in a context)
+
+  TEMPORARY        // temporary variables (not user-visible), never
+                   // in a context
+};
+
+
+// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
+// and immutable bindings that can be in two states: initialized and
+// uninitialized. In ES5 only immutable bindings have these two states. When
+// accessing a binding, it needs to be checked for initialization. However in
+// the following cases the binding is initialized immediately after creation
+// so the initialization check can always be skipped:
+// 1. Var declared local variables.
+//      var foo;
+// 2. A local variable introduced by a function declaration.
+//      function foo() {}
+// 3. Parameters
+//      function x(foo) {}
+// 4. Catch bound variables.
+//      try {} catch (foo) {}
+// 6. Function variables of named function expressions.
+//      var x = function foo() {}
+// 7. Implicit binding of 'this'.
+// 8. Implicit binding of 'arguments' in functions.
+//
+// ES5 specified object environment records which are introduced by ES elements
+// such as Program and WithStatement that associate identifier bindings with the
+// properties of some object. In the specification only mutable bindings exist
+// (which may be non-writable) and have no distinct initialization step. However
+// V8 allows const declarations in global code with distinct creation and
+// initialization steps which are represented by non-writable properties in the
+// global object. As a result also these bindings need to be checked for
+// initialization.
+//
+// The following enum specifies a flag that indicates if the binding needs a
+// distinct initialization step (kNeedsInitialization) or if the binding is
+// immediately initialized upon creation (kCreatedInitialized).
+enum InitializationFlag {
+  kNeedsInitialization,
+  kCreatedInitialized
+};
+
+
+enum ClearExceptionFlag {
+  KEEP_EXCEPTION,
+  CLEAR_EXCEPTION
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_V8GLOBALS_H_
diff --git a/src/v8memory.h b/src/v8memory.h
index 901e78d..f71de82 100644
--- a/src/v8memory.h
+++ b/src/v8memory.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -60,6 +60,10 @@
     return *reinterpret_cast<int*>(addr);
   }
 
+  static unsigned& unsigned_at(Address addr) {
+    return *reinterpret_cast<unsigned*>(addr);
+  }
+
   static double& double_at(Address addr)  {
     return *reinterpret_cast<double*>(addr);
   }
diff --git a/src/v8natives.js b/src/v8natives.js
index 588bdb2..f1e8084 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,18 +28,18 @@
 // This file relies on the fact that the following declarations have been made
 //
 // in runtime.js:
-// const $Object = global.Object;
-// const $Boolean = global.Boolean;
-// const $Number = global.Number;
-// const $Function = global.Function;
-// const $Array = global.Array;
-// const $NaN = 0/0;
+// var $Object = global.Object;
+// var $Boolean = global.Boolean;
+// var $Number = global.Number;
+// var $Function = global.Function;
+// var $Array = global.Array;
+// var $NaN = 0/0;
 //
 // in math.js:
-// const $floor = MathFloor
+// var $floor = MathFloor
 
-const $isNaN = GlobalIsNaN;
-const $isFinite = GlobalIsFinite;
+var $isNaN = GlobalIsNaN;
+var $isFinite = GlobalIsFinite;
 
 // ----------------------------------------------------------------------------
 
@@ -60,18 +60,6 @@
   %ToFastProperties(object);
 }
 
-// Emulates JSC by installing functions on a hidden prototype that
-// lies above the current object/prototype.  This lets you override
-// functions on String.prototype etc. and then restore the old function
-// with delete.  See http://code.google.com/p/chromium/issues/detail?id=1717
-function InstallFunctionsOnHiddenPrototype(object, attributes, functions) {
-  %CheckIsBootstrapping();
-  var hidden_prototype = new $Object();
-  %SetHiddenPrototype(object, hidden_prototype);
-  InstallFunctions(hidden_prototype, attributes, functions);
-}
-
-
 // Prevents changes to the prototype of a built-infunction.
 // The "prototype" property of the function object is made non-configurable,
 // and the prototype object is made non-extensible. The latter prevents
@@ -139,8 +127,9 @@
     // The spec says ToString should be evaluated before ToInt32.
     string = TO_STRING_INLINE(string);
     radix = TO_INT32(radix);
-    if (!(radix == 0 || (2 <= radix && radix <= 36)))
+    if (!(radix == 0 || (2 <= radix && radix <= 36))) {
       return $NaN;
+    }
   }
 
   if (%_HasCachedArrayIndex(string) &&
@@ -162,28 +151,23 @@
 function GlobalEval(x) {
   if (!IS_STRING(x)) return x;
 
-  var receiver = this;
   var global_receiver = %GlobalReceiver(global);
-
-  if (receiver == null && !IS_UNDETECTABLE(receiver)) {
-    receiver = global_receiver;
-  }
-
-  var this_is_global_receiver = (receiver === global_receiver);
   var global_is_detached = (global === global_receiver);
 
   // For consistency with JSC we require the global object passed to
   // eval to be the global object from which 'eval' originated. This
   // is not mandated by the spec.
-  if (!this_is_global_receiver || global_is_detached) {
-    throw new $EvalError('The "this" object passed to eval must ' +
+  // We only throw if the global has been detached, since we need the
+  // receiver as this-value for the call.
+  if (global_is_detached) {
+    throw new $EvalError('The "this" value passed to eval must ' +
                          'be the global object from which eval originated');
   }
 
   var f = %CompileString(x);
   if (!IS_FUNCTION(f)) return f;
 
-  return %_CallFunction(receiver, f);
+  return %_CallFunction(global_receiver, f);
 }
 
 
@@ -193,13 +177,14 @@
 function SetUpGlobal() {
   %CheckIsBootstrapping();
   // ECMA 262 - 15.1.1.1.
-  %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE);
+  %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 - 15.1.1.2.
-  %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE);
+  %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 - 15.1.1.3.
-  %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
+  %SetProperty(global, "undefined", void 0,
+               DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // Set up non-enumerable function on the global object.
   InstallFunctions(global, DONT_ENUM, $Array(
@@ -299,7 +284,8 @@
     receiver = %GlobalReceiver(global);
   }
   if (!IS_SPEC_FUNCTION(fun)) {
-    throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
+    throw new $TypeError(
+        'Object.prototype.__defineGetter__: Expecting function');
   }
   var desc = new PropertyDescriptor();
   desc.setGet(fun);
@@ -345,8 +331,9 @@
 
 
 function ObjectKeys(obj) {
-  if (!IS_SPEC_OBJECT(obj))
-    throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
+  if (!IS_SPEC_OBJECT(obj)) {
+    throw MakeTypeError("called_on_non_object", ["Object.keys"]);
+  }
   if (%IsJSProxy(obj)) {
     var handler = %GetHandler(obj);
     var names = CallTrap0(handler, "keys", DerivedKeysTrap);
@@ -372,6 +359,7 @@
 
 // ES5 8.10.3.
 function IsGenericDescriptor(desc) {
+  if (IS_UNDEFINED(desc)) return false;
   return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
 }
 
@@ -476,7 +464,7 @@
 
 // For Harmony proxies.
 function ToCompletePropertyDescriptor(obj) {
-  var desc = ToPropertyDescriptor(obj)
+  var desc = ToPropertyDescriptor(obj);
   if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
     if (!desc.hasValue()) desc.setValue(void 0);
     if (!desc.hasWritable()) desc.setWritable(false);
@@ -672,6 +660,21 @@
 }
 
 
+// ES5 section 8.12.7.
+function Delete(obj, p, should_throw) {
+  var desc = GetOwnProperty(obj, p);
+  if (IS_UNDEFINED(desc)) return true;
+  if (desc.isConfigurable()) {
+    %DeleteProperty(obj, p, 0);
+    return true;
+  } else if (should_throw) {
+    throw MakeTypeError("define_disallowed", [p]);
+  } else {
+    return;
+  }
+}
+
+
 // Harmony proxies.
 function DefineProxyProperty(obj, p, attributes, should_throw) {
   var handler = %GetHandler(obj);
@@ -689,12 +692,7 @@
 
 
 // ES5 8.12.9.
-function DefineOwnProperty(obj, p, desc, should_throw) {
-  if (%IsJSProxy(obj)) {
-    var attributes = FromGenericPropertyDescriptor(desc);
-    return DefineProxyProperty(obj, p, attributes, should_throw);
-  }
-
+function DefineObjectProperty(obj, p, desc, should_throw) {
   var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
   // A false value here means that access checks failed.
   if (current_or_access === false) return void 0;
@@ -708,7 +706,7 @@
     if (should_throw) {
       throw MakeTypeError("define_disallowed", [p]);
     } else {
-      return;
+      return false;
     }
   }
 
@@ -738,7 +736,7 @@
         if (should_throw) {
           throw MakeTypeError("redefine_disallowed", [p]);
         } else {
-          return;
+          return false;
         }
       }
       // Step 8
@@ -748,7 +746,7 @@
           if (should_throw) {
             throw MakeTypeError("redefine_disallowed", [p]);
           } else {
-            return;
+            return false;
           }
         }
         // Step 10a
@@ -757,7 +755,7 @@
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return;
+              return false;
             }
           }
           if (!current.isWritable() && desc.hasValue() &&
@@ -765,7 +763,7 @@
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return;
+              return false;
             }
           }
         }
@@ -775,14 +773,14 @@
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return;
+              return false;
             }
           }
           if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return;
+              return false;
             }
           }
         }
@@ -836,10 +834,6 @@
     }
 
     %DefineOrRedefineDataProperty(obj, p, value, flag);
-  } else if (IsGenericDescriptor(desc)) {
-    // Step 12 - updating an existing accessor property with generic
-    //           descriptor. Changing flags only.
-    %DefineOrRedefineAccessorProperty(obj, p, GETTER, current.getGet(), flag);
   } else {
     // There are 3 cases that lead here:
     // Step 4b - defining a new accessor property.
@@ -847,30 +841,113 @@
     //                 property.
     // Step 12 - updating an existing accessor property with an accessor
     //           descriptor.
-    if (desc.hasGetter()) {
-      %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
-    }
-    if (desc.hasSetter()) {
-      %DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
-    }
+    var getter = desc.hasGetter() ? desc.getGet() : null;
+    var setter = desc.hasSetter() ? desc.getSet() : null;
+    %DefineOrRedefineAccessorProperty(obj, p, getter, setter, flag);
   }
   return true;
 }
 
 
+// ES5 section 15.4.5.1.
+function DefineArrayProperty(obj, p, desc, should_throw) {
+  // Note that the length of an array is not actually stored as part of the
+  // property, hence we use generated code throughout this function instead of
+  // DefineObjectProperty() to modify its value.
+
+  // Step 3 - Special handling for length property.
+  if (p == "length") {
+    var length = obj.length;
+    if (!desc.hasValue()) {
+      return DefineObjectProperty(obj, "length", desc, should_throw);
+    }
+    var new_length = ToUint32(desc.getValue());
+    if (new_length != ToNumber(desc.getValue())) {
+      throw new $RangeError('defineProperty() array length out of range');
+    }
+    var length_desc = GetOwnProperty(obj, "length");
+    if (new_length != length && !length_desc.isWritable()) {
+      if (should_throw) {
+        throw MakeTypeError("redefine_disallowed", [p]);
+      } else {
+        return false;
+      }
+    }
+    var threw = false;
+    while (new_length < length--) {
+      if (!Delete(obj, ToString(length), false)) {
+        new_length = length + 1;
+        threw = true;
+        break;
+      }
+    }
+    // Make sure the below call to DefineObjectProperty() doesn't overwrite
+    // any magic "length" property by removing the value.
+    obj.length = new_length;
+    desc.value_ = void 0;
+    desc.hasValue_ = false;
+    if (!DefineObjectProperty(obj, "length", desc, should_throw) || threw) {
+      if (should_throw) {
+        throw MakeTypeError("redefine_disallowed", [p]);
+      } else {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  // Step 4 - Special handling for array index.
+  var index = ToUint32(p);
+  if (index == ToNumber(p) && index != 4294967295) {
+    var length = obj.length;
+    var length_desc = GetOwnProperty(obj, "length");
+    if ((index >= length && !length_desc.isWritable()) ||
+        !DefineObjectProperty(obj, p, desc, true)) {
+      if (should_throw) {
+        throw MakeTypeError("define_disallowed", [p]);
+      } else {
+        return false;
+      }
+    }
+    if (index >= length) {
+      obj.length = index + 1;
+    }
+    return true;
+  }
+
+  // Step 5 - Fallback to default implementation.
+  return DefineObjectProperty(obj, p, desc, should_throw);
+}
+
+
+// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
+function DefineOwnProperty(obj, p, desc, should_throw) {
+  if (%IsJSProxy(obj)) {
+    var attributes = FromGenericPropertyDescriptor(desc);
+    return DefineProxyProperty(obj, p, attributes, should_throw);
+  } else if (IS_ARRAY(obj)) {
+    return DefineArrayProperty(obj, p, desc, should_throw);
+  } else {
+    return DefineObjectProperty(obj, p, desc, should_throw);
+  }
+}
+
+
 // ES5 section 15.2.3.2.
 function ObjectGetPrototypeOf(obj) {
-  if (!IS_SPEC_OBJECT(obj))
-    throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
+  if (!IS_SPEC_OBJECT(obj)) {
+    throw MakeTypeError("called_on_non_object", ["Object.getPrototypeOf"]);
+  }
   return %GetPrototype(obj);
 }
 
 
 // ES5 section 15.2.3.3
 function ObjectGetOwnPropertyDescriptor(obj, p) {
-  if (!IS_SPEC_OBJECT(obj))
-    throw MakeTypeError("obj_ctor_property_non_object",
-                        ["getOwnPropertyDescriptor"]);
+  if (!IS_SPEC_OBJECT(obj)) {
+    throw MakeTypeError("called_on_non_object",
+                        ["Object.getOwnPropertyDescriptor"]);
+  }
   var desc = GetOwnProperty(obj, p);
   return FromPropertyDescriptor(desc);
 }
@@ -883,14 +960,14 @@
   }
   var n = ToUint32(obj.length);
   var array = new $Array(n);
-  var names = {}
+  var names = {};  // TODO(rossberg): use sets once they are ready.
   for (var index = 0; index < n; index++) {
     var s = ToString(obj[index]);
     if (s in names) {
-      throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s])
+      throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s]);
     }
     array[index] = s;
-    names.s = 0;
+    names[s] = 0;
   }
   return array;
 }
@@ -898,9 +975,9 @@
 
 // ES5 section 15.2.3.4.
 function ObjectGetOwnPropertyNames(obj) {
-  if (!IS_SPEC_OBJECT(obj))
-    throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
-
+  if (!IS_SPEC_OBJECT(obj)) {
+    throw MakeTypeError("called_on_non_object", ["Object.getOwnPropertyNames"]);
+  }
   // Special handling for proxies.
   if (%IsJSProxy(obj)) {
     var handler = %GetHandler(obj);
@@ -917,8 +994,9 @@
   if (%GetInterceptorInfo(obj) & 1) {
     var indexedInterceptorNames =
         %GetIndexedInterceptorElementNames(obj);
-    if (indexedInterceptorNames)
+    if (indexedInterceptorNames) {
       propertyNames = propertyNames.concat(indexedInterceptorNames);
+    }
   }
 
   // Find all the named properties.
@@ -944,8 +1022,9 @@
     // We need to check for the exact property value since for intrinsic
     // properties like toString if(propertySet["toString"]) will always
     // succeed.
-    if (propertySet[name] === true)
+    if (propertySet[name] === true) {
       continue;
+    }
     propertySet[name] = true;
     propertyNames[j++] = name;
   }
@@ -970,7 +1049,7 @@
 // ES5 section 15.2.3.6.
 function ObjectDefineProperty(obj, p, attributes) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
+    throw MakeTypeError("called_on_non_object", ["Object.defineProperty"]);
   }
   var name = ToString(p);
   if (%IsJSProxy(obj)) {
@@ -1021,14 +1100,17 @@
 
 // ES5 section 15.2.3.7.
 function ObjectDefineProperties(obj, properties) {
-  if (!IS_SPEC_OBJECT(obj))
-    throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
+  if (!IS_SPEC_OBJECT(obj)) {
+    throw MakeTypeError("called_on_non_object", ["Object.defineProperties"]);
+  }
   var props = ToObject(properties);
   var names = GetOwnEnumerablePropertyNames(props);
+  var descriptors = new InternalArray();
   for (var i = 0; i < names.length; i++) {
-    var name = names[i];
-    var desc = ToPropertyDescriptor(props[name]);
-    DefineOwnProperty(obj, name, desc, true);
+    descriptors.push(ToPropertyDescriptor(props[names[i]]));
+  }
+  for (var i = 0; i < names.length; i++) {
+    DefineOwnProperty(obj, names[i], descriptors[i], true);
   }
   return obj;
 }
@@ -1042,12 +1124,20 @@
     throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
   }
 
-  if (IS_SPEC_FUNCTION(obj)) {
+  if (%IsJSFunctionProxy(obj)) {
     var callTrap = %GetCallTrap(obj);
     var constructTrap = %GetConstructTrap(obj);
     var code = DelegateCallAndConstruct(callTrap, constructTrap);
     %Fix(obj);  // becomes a regular function
     %SetCode(obj, code);
+    // TODO(rossberg): What about length and other properties? Not specified.
+    // We just put in some half-reasonable defaults for now.
+    var prototype = new $Object();
+    $Object.defineProperty(prototype, "constructor",
+      {value: obj, writable: true, enumerable: false, configurable: true});
+    // TODO(v8:1530): defineProperty does not handle prototype and length.
+    %FunctionSetPrototype(obj, prototype);
+    obj.length = 0;
   } else {
     %Fix(obj);
   }
@@ -1058,7 +1148,7 @@
 // ES5 section 15.2.3.8.
 function ObjectSeal(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("obj_ctor_property_non_object", ["seal"]);
+    throw MakeTypeError("called_on_non_object", ["Object.seal"]);
   }
   if (%IsJSProxy(obj)) {
     ProxyFix(obj);
@@ -1080,7 +1170,7 @@
 // ES5 section 15.2.3.9.
 function ObjectFreeze(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]);
+    throw MakeTypeError("called_on_non_object", ["Object.freeze"]);
   }
   if (%IsJSProxy(obj)) {
     ProxyFix(obj);
@@ -1103,7 +1193,7 @@
 // ES5 section 15.2.3.10
 function ObjectPreventExtension(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
+    throw MakeTypeError("called_on_non_object", ["Object.preventExtension"]);
   }
   if (%IsJSProxy(obj)) {
     ProxyFix(obj);
@@ -1116,7 +1206,7 @@
 // ES5 section 15.2.3.11
 function ObjectIsSealed(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("obj_ctor_property_non_object", ["isSealed"]);
+    throw MakeTypeError("called_on_non_object", ["Object.isSealed"]);
   }
   if (%IsJSProxy(obj)) {
     return false;
@@ -1137,7 +1227,7 @@
 // ES5 section 15.2.3.12
 function ObjectIsFrozen(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]);
+    throw MakeTypeError("called_on_non_object", ["Object.isFrozen"]);
   }
   if (%IsJSProxy(obj)) {
     return false;
@@ -1159,7 +1249,7 @@
 // ES5 section 15.2.3.13
 function ObjectIsExtensible(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("obj_ctor_property_non_object", ["isExtensible"]);
+    throw MakeTypeError("called_on_non_object", ["Object.isExtensible"]);
   }
   if (%IsJSProxy(obj)) {
     return true;
@@ -1168,6 +1258,16 @@
 }
 
 
+// Harmony egal.
+function ObjectIs(obj1, obj2) {
+  if (obj1 === obj2) {
+    return (obj1 !== 0) || (1 / obj1 === 1 / obj2);
+  } else {
+    return (obj1 !== obj1) && (obj2 !== obj2);
+  }
+}
+
+
 %SetCode($Object, function(x) {
   if (%_IsConstructCall()) {
     if (x == null) return this;
@@ -1207,6 +1307,7 @@
     "getPrototypeOf", ObjectGetPrototypeOf,
     "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
     "getOwnPropertyNames", ObjectGetOwnPropertyNames,
+    "is", ObjectIs,
     "isExtensible", ObjectIsExtensible,
     "isFrozen", ObjectIsFrozen,
     "isSealed", ObjectIsSealed,
@@ -1237,8 +1338,9 @@
 function BooleanValueOf() {
   // NOTE: Both Boolean objects and values can enter here as
   // 'this'. This is not as dictated by ECMA-262.
-  if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
+  if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this)) {
     throw new $TypeError('Boolean.prototype.valueOf is not generic');
+  }
   return %_ValueOf(this);
 }
 
@@ -1278,8 +1380,9 @@
   // 'this'. This is not as dictated by ECMA-262.
   var number = this;
   if (!IS_NUMBER(this)) {
-    if (!IS_NUMBER_WRAPPER(this))
+    if (!IS_NUMBER_WRAPPER(this)) {
       throw new $TypeError('Number.prototype.toString is not generic');
+    }
     // Get the value of this number in case it's an object.
     number = %_ValueOf(this);
   }
@@ -1312,8 +1415,9 @@
 function NumberValueOf() {
   // NOTE: Both Number objects and values can enter here as
   // 'this'. This is not as dictated by ECMA-262.
-  if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this))
+  if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this)) {
     throw new $TypeError('Number.prototype.valueOf is not generic');
+  }
   return %_ValueOf(this);
 }
 
@@ -1339,7 +1443,8 @@
   if (!IS_UNDEFINED(fractionDigits)) {
     f = TO_INTEGER(fractionDigits);
     if (f < 0 || f > 20) {
-      throw new $RangeError("toExponential() argument must be between 0 and 20");
+      throw new $RangeError(
+          "toExponential() argument must be between 0 and 20");
     }
   }
   if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
@@ -1367,6 +1472,18 @@
 }
 
 
+// Harmony isFinite.
+function NumberIsFinite(number) {
+  return IS_NUMBER(number) && NUMBER_IS_FINITE(number);
+}
+
+
+// Harmony isNaN.
+function NumberIsNaN(number) {
+  return IS_NUMBER(number) && NUMBER_IS_NAN(number);
+}
+
+
 // ----------------------------------------------------------------------------
 
 function SetUpNumber() {
@@ -1383,7 +1500,8 @@
                DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 section 15.7.3.2.
-  %SetProperty($Number, "MIN_VALUE", 5e-324, DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %SetProperty($Number, "MIN_VALUE", 5e-324,
+               DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 section 15.7.3.3.
   %SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1410,6 +1528,10 @@
     "toExponential", NumberToExponential,
     "toPrecision", NumberToPrecision
   ));
+  InstallFunctions($Number, DONT_ENUM, $Array(
+    "isFinite", NumberIsFinite,
+    "isNaN", NumberIsNaN
+  ));
 }
 
 SetUpNumber();
@@ -1455,53 +1577,54 @@
 // ES5 15.3.4.5
 function FunctionBind(this_arg) { // Length is 1.
   if (!IS_SPEC_FUNCTION(this)) {
-      throw new $TypeError('Bind must be called on a function');
+    throw new $TypeError('Bind must be called on a function');
   }
-  // this_arg is not an argument that should be bound.
-  var argc_bound = (%_ArgumentsLength() || 1) - 1;
-  var fn = this;
-
-  if (argc_bound == 0) {
-    var result = function() {
-      if (%_IsConstructCall()) {
-        // %NewObjectFromBound implicitly uses arguments passed to this
-        // function. We do not pass the arguments object explicitly to avoid
-        // materializing it and guarantee that this function will be optimized.
-        return %NewObjectFromBound(fn, null);
-      }
-      return %Apply(fn, this_arg, arguments, 0, %_ArgumentsLength());
-    };
-  } else {
-    var bound_args = new InternalArray(argc_bound);
-    for(var i = 0; i < argc_bound; i++) {
-      bound_args[i] = %_Arguments(i+1);
+  var boundFunction = function () {
+    // Poison .arguments and .caller, but is otherwise not detectable.
+    "use strict";
+    // This function must not use any object literals (Object, Array, RegExp),
+    // since the literals-array is being used to store the bound data.
+    if (%_IsConstructCall()) {
+      return %NewObjectFromBound(boundFunction);
     }
+    var bindings = %BoundFunctionGetBindings(boundFunction);
 
-    var result = function() {
-      // If this is a construct call we use a special runtime method
-      // to generate the actual object using the bound function.
-      if (%_IsConstructCall()) {
-        // %NewObjectFromBound implicitly uses arguments passed to this
-        // function. We do not pass the arguments object explicitly to avoid
-        // materializing it and guarantee that this function will be optimized.
-        return %NewObjectFromBound(fn, bound_args);
-      }
+    var argc = %_ArgumentsLength();
+    if (argc == 0) {
+      return %Apply(bindings[0], bindings[1], bindings, 2, bindings.length - 2);
+    }
+    if (bindings.length === 2) {
+      return %Apply(bindings[0], bindings[1], arguments, 0, argc);
+    }
+    var bound_argc = bindings.length - 2;
+    var argv = new InternalArray(bound_argc + argc);
+    for (var i = 0; i < bound_argc; i++) {
+      argv[i] = bindings[i + 2];
+    }
+    for (var j = 0; j < argc; j++) {
+      argv[i++] = %_Arguments(j);
+    }
+    return %Apply(bindings[0], bindings[1], argv, 0, bound_argc + argc);
+  };
 
-      // Combine the args we got from the bind call with the args
-      // given as argument to the invocation.
+  %FunctionRemovePrototype(boundFunction);
+  var new_length = 0;
+  if (%_ClassOf(this) == "Function") {
+    // Function or FunctionProxy.
+    var old_length = this.length;
+    // FunctionProxies might provide a non-UInt32 value. If so, ignore it.
+    if ((typeof old_length === "number") &&
+        ((old_length >>> 0) === old_length)) {
       var argc = %_ArgumentsLength();
-      var args = new InternalArray(argc + argc_bound);
-      // Add bound arguments.
-      for (var i = 0; i < argc_bound; i++) {
-        args[i] = bound_args[i];
-      }
-      // Add arguments from call.
-      for (var i = 0; i < argc; i++) {
-        args[argc_bound + i] = %_Arguments(i);
-      }
-      return %Apply(fn, this_arg, args, 0, argc + argc_bound);
-    };
+      if (argc > 0) argc--;  // Don't count the thisArg as parameter.
+      new_length = old_length - argc;
+      if (new_length < 0) new_length = 0;
+    }
   }
+  // This runtime function finds any remaining arguments on the stack,
+  // so we don't pass the arguments object.
+  var result = %FunctionBindArguments(boundFunction, this,
+                                      this_arg, new_length);
 
   // We already have caller and arguments properties on functions,
   // which are non-configurable. It therefore makes no sence to
@@ -1509,17 +1632,7 @@
   // that bind should make these throw a TypeError if get or set
   // is called and make them non-enumerable and non-configurable.
   // To be consistent with our normal functions we leave this as it is.
-
-  %FunctionRemovePrototype(result);
-  %FunctionSetBound(result);
-  // Set the correct length. If this is a function proxy, this.length might
-  // throw, or return a bogus result. Leave length alone in that case.
-  // TODO(rossberg): This is underspecified in the current proxy proposal.
-  try {
-    var old_length = ToInteger(this.length);
-    var length = (old_length - argc_bound) > 0 ? old_length - argc_bound : 0;
-    %BoundFunctionSetLength(result, length);
-  } catch(x) {}
+  // TODO(lrn): Do set these to be thrower.
   return result;
 }
 
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 3881d66..fd8d536 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -154,7 +154,7 @@
 
 bool ThreadManager::RestoreThread() {
   ASSERT(IsLockedByCurrentThread());
-  // First check whether the current thread has been 'lazily archived', ie
+  // First check whether the current thread has been 'lazily archived', i.e.
   // not archived at all.  If that is the case we put the state storage we
   // had prepared back in the free list, since we didn't need it after all.
   if (lazily_archived_thread_.Equals(ThreadId::Current())) {
diff --git a/src/v8utils.cc b/src/v8utils.cc
index bf0e05d..042a60f 100644
--- a/src/v8utils.cc
+++ b/src/v8utils.cc
@@ -316,7 +316,7 @@
   for (const char* p = data_; p < end; p++) {
     char c = *p;
     if ((c & 0x80) != 0) {
-      // Non-ascii detected:
+      // Non-ASCII detected:
       is_ascii = false;
 
       // Report the error and abort if appropriate:
@@ -329,7 +329,7 @@
                c, filename_, line_no, char_no);
 
         // Allow for some context up to kNumberOfLeadingContextChars chars
-        // before the offending non-ascii char to help the user see where
+        // before the offending non-ASCII char to help the user see where
         // the offending char is.
         const int kNumberOfLeadingContextChars = 10;
         const char* err_context = p - kNumberOfLeadingContextChars;
@@ -345,7 +345,7 @@
         OS::Abort();
       }
 
-      break;  // Non-ascii detected.  No need to continue scanning.
+      break;  // Non-ASCII detected.  No need to continue scanning.
     }
     if (c == '\n') {
       start_of_line = p;
diff --git a/src/v8utils.h b/src/v8utils.h
index aada521..c73222a 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -142,8 +142,14 @@
 }
 
 
-template <typename T>
-static inline void MemsetPointer(T** dest, T* value, int counter) {
+template <typename T, typename U>
+inline void MemsetPointer(T** dest, U* value, int counter) {
+#ifdef DEBUG
+  T* a = NULL;
+  U* b = NULL;
+  a = b;  // Fake assignment to check assignability.
+  USE(a);
+#endif  // DEBUG
 #if defined(V8_HOST_ARCH_IA32)
 #define STOS "stosl"
 #elif defined(V8_HOST_ARCH_X64)
@@ -196,7 +202,7 @@
 
 // Copy from ASCII/16bit chars to ASCII/16bit chars.
 template <typename sourcechar, typename sinkchar>
-static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
   sinkchar* limit = dest + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*dest) == sizeof(*src)) {
diff --git a/src/variables.cc b/src/variables.cc
index 971061b..32ad5bc 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -37,10 +37,11 @@
 // ----------------------------------------------------------------------------
 // Implementation Variable.
 
-const char* Variable::Mode2String(Mode mode) {
+const char* Variable::Mode2String(VariableMode mode) {
   switch (mode) {
     case VAR: return "VAR";
     case CONST: return "CONST";
+    case CONST_HARMONY: return "CONST";
     case LET: return "LET";
     case DYNAMIC: return "DYNAMIC";
     case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
@@ -55,21 +56,28 @@
 
 Variable::Variable(Scope* scope,
                    Handle<String> name,
-                   Mode mode,
+                   VariableMode mode,
                    bool is_valid_LHS,
-                   Kind kind)
+                   Kind kind,
+                   InitializationFlag initialization_flag,
+                   Interface* interface)
   : scope_(scope),
     name_(name),
     mode_(mode),
     kind_(kind),
     location_(UNALLOCATED),
     index_(-1),
+    initializer_position_(RelocInfo::kNoPosition),
     local_if_not_shadowed_(NULL),
     is_valid_LHS_(is_valid_LHS),
-    is_accessed_from_inner_scope_(false),
-    is_used_(false) {
-  // names must be canonicalized for fast equality checks
+    force_context_allocation_(false),
+    is_used_(false),
+    initialization_flag_(initialization_flag),
+    interface_(interface) {
+  // Names must be canonicalized for fast equality checks.
   ASSERT(name->IsSymbol());
+  // Var declared variables never need initialization.
+  ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization));
 }
 
 
@@ -79,4 +87,12 @@
   return mode_ != TEMPORARY && scope_ != NULL && scope_->is_global_scope();
 }
 
+
+int Variable::CompareIndex(Variable* const* v, Variable* const* w) {
+  int x = (*v)->index();
+  int y = (*w)->index();
+  // Consider sorting them according to type as well?
+  return x - y;
+}
+
 } }  // namespace v8::internal
diff --git a/src/variables.h b/src/variables.h
index 56c8dab..f49b6e1 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -29,6 +29,7 @@
 #define V8_VARIABLES_H_
 
 #include "zone.h"
+#include "interface.h"
 
 namespace v8 {
 namespace internal {
@@ -40,34 +41,6 @@
 
 class Variable: public ZoneObject {
  public:
-  enum Mode {
-    // User declared variables:
-    VAR,       // declared via 'var', and 'function' declarations
-
-    CONST,     // declared via 'const' declarations
-
-    LET,       // declared via 'let' declarations
-
-    // Variables introduced by the compiler:
-    DYNAMIC,         // always require dynamic lookup (we don't know
-                     // the declaration)
-
-    DYNAMIC_GLOBAL,  // requires dynamic lookup, but we know that the
-                     // variable is global unless it has been shadowed
-                     // by an eval-introduced variable
-
-    DYNAMIC_LOCAL,   // requires dynamic lookup, but we know that the
-                     // variable is local and where it is unless it
-                     // has been shadowed by an eval-introduced
-                     // variable
-
-    INTERNAL,        // like VAR, but not user-visible (may or may not
-                     // be in a context)
-
-    TEMPORARY        // temporary variables (not user-visible), never
-                     // in a context
-  };
-
   enum Kind {
     NORMAL,
     THIS,
@@ -103,12 +76,14 @@
 
   Variable(Scope* scope,
            Handle<String> name,
-           Mode mode,
+           VariableMode mode,
            bool is_valid_lhs,
-           Kind kind);
+           Kind kind,
+           InitializationFlag initialization_flag,
+           Interface* interface = Interface::NewValue());
 
   // Printing support
-  static const char* Mode2String(Mode mode);
+  static const char* Mode2String(VariableMode mode);
 
   bool IsValidLeftHandSide() { return is_valid_LHS_; }
 
@@ -119,17 +94,20 @@
   Scope* scope() const { return scope_; }
 
   Handle<String> name() const { return name_; }
-  Mode mode() const { return mode_; }
-  bool is_accessed_from_inner_scope() const {
-    return is_accessed_from_inner_scope_;
+  VariableMode mode() const { return mode_; }
+  bool has_forced_context_allocation() const {
+    return force_context_allocation_;
   }
-  void MarkAsAccessedFromInnerScope() {
+  void ForceContextAllocation() {
     ASSERT(mode_ != TEMPORARY);
-    is_accessed_from_inner_scope_ = true;
+    force_context_allocation_ = true;
   }
   bool is_used() { return is_used_; }
   void set_is_used(bool flag) { is_used_ = flag; }
 
+  int initializer_position() { return initializer_position_; }
+  void set_initializer_position(int pos) { initializer_position_ = pos; }
+
   bool IsVariable(Handle<String> n) const {
     return !is_this() && name().is_identical_to(n);
   }
@@ -146,6 +124,13 @@
             mode_ == DYNAMIC_GLOBAL ||
             mode_ == DYNAMIC_LOCAL);
   }
+  bool is_const_mode() const {
+    return (mode_ == CONST ||
+            mode_ == CONST_HARMONY);
+  }
+  bool binding_needs_init() const {
+    return initialization_flag_ == kNeedsInitialization;
+  }
 
   bool is_global() const;
   bool is_this() const { return kind_ == THIS; }
@@ -153,8 +138,7 @@
 
   // True if the variable is named eval and not known to be shadowed.
   bool is_possibly_eval() const {
-    return IsVariable(FACTORY->eval_symbol()) &&
-        (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
+    return IsVariable(FACTORY->eval_symbol());
   }
 
   Variable* local_if_not_shadowed() const {
@@ -168,28 +152,43 @@
 
   Location location() const { return location_; }
   int index() const { return index_; }
+  InitializationFlag initialization_flag() const {
+    return initialization_flag_;
+  }
+  Interface* interface() const { return interface_; }
 
   void AllocateTo(Location location, int index) {
     location_ = location;
     index_ = index;
   }
 
+  static int CompareIndex(Variable* const* v, Variable* const* w);
+
  private:
   Scope* scope_;
   Handle<String> name_;
-  Mode mode_;
+  VariableMode mode_;
   Kind kind_;
   Location location_;
   int index_;
+  int initializer_position_;
 
+  // If this field is set, this variable references the stored locally bound
+  // variable, but it might be shadowed by variable bindings introduced by
+  // non-strict 'eval' calls between the reference scope (inclusive) and the
+  // binding scope (exclusive).
   Variable* local_if_not_shadowed_;
 
   // Valid as a LHS? (const and this are not valid LHS, for example)
   bool is_valid_LHS_;
 
   // Usage info.
-  bool is_accessed_from_inner_scope_;  // set by variable resolver
+  bool force_context_allocation_;  // set by variable resolver
   bool is_used_;
+  InitializationFlag initialization_flag_;
+
+  // Module type info.
+  Interface* interface_;
 };
 
 
diff --git a/src/version.cc b/src/version.cc
index 2c21152..c4fc0d7 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     6
-#define BUILD_NUMBER      6
-#define PATCH_LEVEL       19
+#define MINOR_VERSION     9
+#define BUILD_NUMBER      24
+#define PATCH_LEVEL       13
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/weakmap.js b/src/weakmap.js
deleted file mode 100644
index 5fb5151..0000000
--- a/src/weakmap.js
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// const $Object = global.Object;
-const $WeakMap = global.WeakMap;
-
-// -------------------------------------------------------------------
-
-function WeakMapConstructor() {
-  if (%_IsConstructCall()) {
-    %WeakMapInitialize(this);
-  } else {
-    return new $WeakMap();
-  }
-}
-
-
-function WeakMapGet(key) {
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return %WeakMapGet(this, key);
-}
-
-
-function WeakMapSet(key, value) {
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return %WeakMapSet(this, key, value);
-}
-
-
-function WeakMapHas(key) {
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return !IS_UNDEFINED(%WeakMapGet(this, key));
-}
-
-
-function WeakMapDelete(key) {
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  if (!IS_UNDEFINED(%WeakMapGet(this, key))) {
-    %WeakMapSet(this, key, void 0);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-// -------------------------------------------------------------------
-
-(function () {
-  %CheckIsBootstrapping();
-  // Set up the WeakMap constructor function.
-  %SetCode($WeakMap, WeakMapConstructor);
-
-  // Set up the constructor property on the WeakMap prototype object.
-  %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
-
-  // Set up the non-enumerable functions on the WeakMap prototype object.
-  InstallFunctionsOnHiddenPrototype($WeakMap.prototype, DONT_ENUM, $Array(
-    "get", WeakMapGet,
-    "set", WeakMapSet,
-    "has", WeakMapHas,
-    "delete", WeakMapDelete
-  ));
-})();
diff --git a/src/win32-headers.h b/src/win32-headers.h
index fca5c13..5d9c89e 100644
--- a/src/win32-headers.h
+++ b/src/win32-headers.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -56,6 +56,7 @@
 #include <windows.h>
 
 #ifdef V8_WIN32_HEADERS_FULL
+#include <signal.h>  // For raise().
 #include <time.h>  // For LocalOffset() implementation.
 #include <mmsystem.h>  // For timeGetTime().
 #ifdef __MINGW32__
@@ -64,10 +65,10 @@
 #undef _WIN32_WINNT
 #define _WIN32_WINNT 0x501
 #endif  // __MINGW32__
-#ifndef __MINGW32__
+#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
 #include <dbghelp.h>  // For SymLoadModule64 and al.
 #include <errno.h>  // For STRUNCATE
-#endif  // __MINGW32__
+#endif  // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
 #include <limits.h>  // For INT_MAX and al.
 #include <tlhelp32.h>  // For Module32First and al.
 
@@ -75,7 +76,10 @@
 // makes it impossible to have them elsewhere.
 #include <winsock2.h>
 #include <ws2tcpip.h>
-#include <process.h>  // for _beginthreadex()
+#ifndef __MINGW32__
+#include <wspiapi.h>
+#endif  // __MINGW32__
+#include <process.h>  // For _beginthreadex().
 #include <stdlib.h>
 #endif  // V8_WIN32_HEADERS_FULL
 
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 8db54f0..a9cc2ef 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,8 @@
 #ifndef V8_X64_ASSEMBLER_X64_INL_H_
 #define V8_X64_ASSEMBLER_X64_INL_H_
 
+#include "x64/assembler-x64.h"
+
 #include "cpu.h"
 #include "debug.h"
 #include "v8memory.h"
@@ -224,24 +226,31 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+                              || rmode_ == EMBEDDED_OBJECT
+                              || rmode_ == EXTERNAL_REFERENCE);
   return reinterpret_cast<Address>(pc_);
 }
 
 
 int RelocInfo::target_address_size() {
   if (IsCodedSpecially()) {
-    return Assembler::kCallTargetSize;
+    return Assembler::kSpecialTargetSize;
   } else {
-    return Assembler::kExternalTargetSize;
+    return kPointerSize;
   }
 }
 
 
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   if (IsCodeTarget(rmode_)) {
     Assembler::set_target_address_at(pc_, target);
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+      host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+          host(), this, HeapObject::cast(target_code));
+    }
   } else {
     Memory::Address_at(pc_) = target;
     CPU::FlushICache(pc_, sizeof(Address));
@@ -255,7 +264,7 @@
 }
 
 
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   if (rmode_ == EMBEDDED_OBJECT) {
     return Memory::Object_Handle_at(pc_);
@@ -277,10 +286,16 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  *reinterpret_cast<Object**>(pc_) = target;
+  Memory::Object_at(pc_) = target;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -301,11 +316,19 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+                                WriteBarrierMode mode) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -344,6 +367,11 @@
       target;
   CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
                    sizeof(Address));
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -368,14 +396,14 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitPointer(target_object_address());
+    visitor->VisitEmbeddedPointer(this);
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(target_reference_address());
+    visitor->VisitExternalReference(this);
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
@@ -396,14 +424,14 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(target_reference_address());
+    StaticVisitor::VisitExternalReference(this);
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 745fdae..2f0c542 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,7 +47,7 @@
 
 
 void CpuFeatures::Probe() {
-  ASSERT(!initialized_);
+  ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -383,7 +383,7 @@
   }
 #endif
 
-  // Setup buffer pointers.
+  // Set up buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -412,7 +412,7 @@
   // Finalize code (at this point overflow() may be true, but the gap ensures
   // that we are still not overlapping instructions and relocation info).
   ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
-  // Setup code descriptor.
+  // Set up code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -426,13 +426,7 @@
 void Assembler::Align(int m) {
   ASSERT(IsPowerOf2(m));
   int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
-  while (delta >= 9) {
-    nop(9);
-    delta -= 9;
-  }
-  if (delta > 0) {
-    nop(delta);
-  }
+  Nop(delta);
 }
 
 
@@ -441,6 +435,15 @@
 }
 
 
+bool Assembler::IsNop(Address addr) {
+  Address a = addr;
+  while (*a == 0x66) a++;
+  if (*a == 0x90) return true;
+  if (a[0] == 0xf && a[1] == 0x1f) return true;
+  return false;
+}
+
+
 void Assembler::bind_to(Label* L, int pos) {
   ASSERT(!L->is_bound());  // Label may only be bound once.
   ASSERT(0 <= pos && pos <= pc_offset());  // Position must be valid.
@@ -499,7 +502,7 @@
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
-  // Setup new buffer.
+  // Set up new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
   desc.instr_size = pc_offset();
   desc.reloc_size =
@@ -772,7 +775,7 @@
                                           Register dst,
                                           Immediate src) {
   EnsureSpace ensure_space(this);
-  if (dst.code() > 3) {
+  if (!dst.is_byte_register()) {
     // Use 64-bit mode byte registers.
     emit_rex_64(dst);
   }
@@ -1056,7 +1059,7 @@
 
 void Assembler::decb(Register dst) {
   EnsureSpace ensure_space(this);
-  if (dst.code() > 3) {
+  if (!dst.is_byte_register()) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(dst);
   }
@@ -1384,7 +1387,7 @@
 
 void Assembler::movb(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  if (dst.code() > 3) {
+  if (!dst.is_byte_register()) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(dst, src);
   } else {
@@ -1397,7 +1400,7 @@
 
 void Assembler::movb(Register dst, Immediate imm) {
   EnsureSpace ensure_space(this);
-  if (dst.code() > 3) {
+  if (!dst.is_byte_register()) {
     emit_rex_32(dst);
   }
   emit(0xB0 + dst.low_bits());
@@ -1407,7 +1410,7 @@
 
 void Assembler::movb(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  if (src.code() > 3) {
+  if (!src.is_byte_register()) {
     emit_rex_32(src, dst);
   } else {
     emit_optional_rex_32(src, dst);
@@ -1637,6 +1640,8 @@
 
 void Assembler::movzxbq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
+  // 32 bit operations zero the top 32 bits of 64 bit registers.  Therefore
+  // there is no need to make this a 64 bit operation.
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xB6);
@@ -1763,7 +1768,7 @@
 }
 
 
-void Assembler::nop(int n) {
+void Assembler::Nop(int n) {
   // The recommended muti-byte sequences of NOP instructions from the Intel 64
   // and IA-32 Architectures Software Developer's Manual.
   //
@@ -1778,73 +1783,64 @@
   // 9 bytes  66 NOP DWORD ptr [EAX + EAX*1 +         66 0F 1F 84 00 00 00 00
   //          00000000H]                              00H
 
-  ASSERT(1 <= n);
-  ASSERT(n <= 9);
   EnsureSpace ensure_space(this);
-  switch (n) {
-  case 1:
-    emit(0x90);
-    return;
-  case 2:
-    emit(0x66);
-    emit(0x90);
-    return;
-  case 3:
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x00);
-    return;
-  case 4:
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x40);
-    emit(0x00);
-    return;
-  case 5:
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x44);
-    emit(0x00);
-    emit(0x00);
-    return;
-  case 6:
-    emit(0x66);
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x44);
-    emit(0x00);
-    emit(0x00);
-    return;
-  case 7:
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x80);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    return;
-  case 8:
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x84);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    return;
-  case 9:
-    emit(0x66);
-    emit(0x0f);
-    emit(0x1f);
-    emit(0x84);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    emit(0x00);
-    return;
+  while (n > 0) {
+    switch (n) {
+      case 2:
+        emit(0x66);
+      case 1:
+        emit(0x90);
+        return;
+      case 3:
+        emit(0x0f);
+        emit(0x1f);
+        emit(0x00);
+        return;
+      case 4:
+        emit(0x0f);
+        emit(0x1f);
+        emit(0x40);
+        emit(0x00);
+        return;
+      case 6:
+        emit(0x66);
+      case 5:
+        emit(0x0f);
+        emit(0x1f);
+        emit(0x44);
+        emit(0x00);
+        emit(0x00);
+        return;
+      case 7:
+        emit(0x0f);
+        emit(0x1f);
+        emit(0x80);
+        emit(0x00);
+        emit(0x00);
+        emit(0x00);
+        emit(0x00);
+        return;
+      default:
+      case 11:
+        emit(0x66);
+        n--;
+      case 10:
+        emit(0x66);
+        n--;
+      case 9:
+        emit(0x66);
+        n--;
+      case 8:
+        emit(0x0f);
+        emit(0x1f);
+        emit(0x84);
+        emit(0x00);
+        emit(0x00);
+        emit(0x00);
+        emit(0x00);
+        emit(0x00);
+        n -= 8;
+    }
   }
 }
 
@@ -1937,7 +1933,7 @@
   }
   EnsureSpace ensure_space(this);
   ASSERT(is_uint4(cc));
-  if (reg.code() > 3) {  // Use x64 byte registers, where different.
+  if (!reg.is_byte_register()) {  // Use x64 byte registers, where different.
     emit_rex_32(reg);
   }
   emit(0x0F);
@@ -2002,7 +1998,7 @@
     emit(0x84);
     emit_modrm(src, dst);
   } else {
-    if (dst.code() > 3 || src.code() > 3) {
+    if (!dst.is_byte_register() || !src.is_byte_register()) {
       // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
       emit_rex_32(dst, src);
     }
@@ -2019,7 +2015,7 @@
     emit(0xA8);
     emit(mask.value_);  // Low byte emitted.
   } else {
-    if (reg.code() > 3) {
+    if (!reg.is_byte_register()) {
       // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
       emit_rex_32(reg);
     }
@@ -2042,7 +2038,7 @@
 
 void Assembler::testb(const Operand& op, Register reg) {
   EnsureSpace ensure_space(this);
-  if (reg.code() > 3) {
+  if (!reg.is_byte_register()) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(reg, op);
   } else {
@@ -2299,6 +2295,13 @@
 }
 
 
+void Assembler::fptan() {
+  EnsureSpace ensure_space(this);
+  emit(0xD9);
+  emit(0xF2);
+}
+
+
 void Assembler::fyl2x() {
   EnsureSpace ensure_space(this);
   emit(0xD9);
@@ -2306,6 +2309,27 @@
 }
 
 
+void Assembler::f2xm1() {
+  EnsureSpace ensure_space(this);
+  emit(0xD9);
+  emit(0xF0);
+}
+
+
+void Assembler::fscale() {
+  EnsureSpace ensure_space(this);
+  emit(0xD9);
+  emit(0xFD);
+}
+
+
+void Assembler::fninit() {
+  EnsureSpace ensure_space(this);
+  emit(0xDB);
+  emit(0xE3);
+}
+
+
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDC, 0xC0, i);
@@ -2565,7 +2589,8 @@
 
 
 void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
-  ASSERT(is_uint2(imm8));
+  ASSERT(CpuFeatures::IsSupported(SSE4_1));
+  ASSERT(is_uint8(imm8));
   EnsureSpace ensure_space(this);
   emit(0x66);
   emit_optional_rex_32(dst, src);
@@ -2983,7 +3008,7 @@
       return;
     }
   }
-  RelocInfo rinfo(pc_, rmode, data);
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   reloc_info_writer.Write(&rinfo);
 }
 
@@ -3020,8 +3045,6 @@
   return (1 << rmode_) & kApplyMask;
 }
 
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 2e373fa..60b29e6 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 // A lightweight X64 Assembler.
 
@@ -45,22 +45,22 @@
 // Utility functions
 
 // Test whether a 64-bit value is in a specific range.
-static inline bool is_uint32(int64_t x) {
+inline bool is_uint32(int64_t x) {
   static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
   return static_cast<uint64_t>(x) <= kMaxUInt32;
 }
 
-static inline bool is_int32(int64_t x) {
+inline bool is_int32(int64_t x) {
   static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
   return is_uint32(x - kMinInt32);
 }
 
-static inline bool uint_is_int32(uint64_t x) {
+inline bool uint_is_int32(uint64_t x) {
   static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
   return x <= kMaxInt32;
 }
 
-static inline bool is_uint32(uint64_t x) {
+inline bool is_uint32(uint64_t x) {
   static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
   return x <= kMaxUInt32;
 }
@@ -131,6 +131,8 @@
   }
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
+  // rax, rbx, rcx and rdx are byte registers, the rest are not.
+  bool is_byte_register() const { return code_ <= 3; }
   int code() const {
     ASSERT(is_valid());
     return code_;
@@ -159,23 +161,41 @@
   static const int kAllocationIndexByRegisterCode[kNumRegisters];
 };
 
-const Register rax = { 0 };
-const Register rcx = { 1 };
-const Register rdx = { 2 };
-const Register rbx = { 3 };
-const Register rsp = { 4 };
-const Register rbp = { 5 };
-const Register rsi = { 6 };
-const Register rdi = { 7 };
-const Register r8 = { 8 };
-const Register r9 = { 9 };
-const Register r10 = { 10 };
-const Register r11 = { 11 };
-const Register r12 = { 12 };
-const Register r13 = { 13 };
-const Register r14 = { 14 };
-const Register r15 = { 15 };
-const Register no_reg = { -1 };
+const int kRegister_rax_Code = 0;
+const int kRegister_rcx_Code = 1;
+const int kRegister_rdx_Code = 2;
+const int kRegister_rbx_Code = 3;
+const int kRegister_rsp_Code = 4;
+const int kRegister_rbp_Code = 5;
+const int kRegister_rsi_Code = 6;
+const int kRegister_rdi_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_r11_Code = 11;
+const int kRegister_r12_Code = 12;
+const int kRegister_r13_Code = 13;
+const int kRegister_r14_Code = 14;
+const int kRegister_r15_Code = 15;
+const int kRegister_no_reg_Code = -1;
+
+const Register rax = { kRegister_rax_Code };
+const Register rcx = { kRegister_rcx_Code };
+const Register rdx = { kRegister_rdx_Code };
+const Register rbx = { kRegister_rbx_Code };
+const Register rsp = { kRegister_rsp_Code };
+const Register rbp = { kRegister_rbp_Code };
+const Register rsi = { kRegister_rsi_Code };
+const Register rdi = { kRegister_rdi_Code };
+const Register r8 = { kRegister_r8_Code };
+const Register r9 = { kRegister_r9_Code };
+const Register r10 = { kRegister_r10_Code };
+const Register r11 = { kRegister_r11_Code };
+const Register r12 = { kRegister_r12_Code };
+const Register r13 = { kRegister_r13_Code };
+const Register r14 = { kRegister_r14_Code };
+const Register r15 = { kRegister_r15_Code };
+const Register no_reg = { kRegister_no_reg_Code };
 
 
 struct XMMRegister {
@@ -215,6 +235,12 @@
     return names[index];
   }
 
+  static XMMRegister from_code(int code) {
+    ASSERT(code >= 0);
+    ASSERT(code < kNumRegisters);
+    XMMRegister r = { code };
+    return r;
+  }
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(XMMRegister reg) const { return code_ == reg.code_; }
   int code() const {
@@ -551,8 +577,8 @@
 
   // This sets the branch destination (which is in the instruction on x64).
   // This is for calls and branches within generated code.
-  inline static void set_target_at(Address instruction_payload,
-                                   Address target) {
+  inline static void deserialization_set_special_target_at(
+      Address instruction_payload, Address target) {
     set_target_address_at(instruction_payload, target);
   }
 
@@ -565,8 +591,7 @@
 
   inline Handle<Object> code_target_object_handle_at(Address pc);
   // Number of bytes taken up by the branch target in the code.
-  static const int kCallTargetSize = 4;      // Use 32-bit displacement.
-  static const int kExternalTargetSize = 8;  // Use 64-bit absolute.
+  static const int kSpecialTargetSize = 4;  // Use 32-bit displacement.
   // Distance between the address of the code target in the call instruction
   // and the return address pushed on the stack.
   static const int kCallTargetAddressOffset = 4;  // Use 32-bit displacement.
@@ -630,6 +655,7 @@
   // possible to align the pc offset to a multiple
   // of m, where m must be a power of 2.
   void Align(int m);
+  void Nop(int bytes = 1);
   // Aligns code to something that's optimal for a jump target for the platform.
   void CodeTargetAlign();
 
@@ -643,7 +669,6 @@
   void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
-  void push(Handle<Object> handle);
 
   void pop(Register dst);
   void pop(const Operand& dst);
@@ -735,6 +760,10 @@
     immediate_arithmetic_op_32(0x0, dst, src);
   }
 
+  void addl(const Operand& dst, Register src) {
+    arithmetic_op_32(0x01, src, dst);
+  }
+
   void addq(Register dst, Register src) {
     arithmetic_op(0x03, dst, src);
   }
@@ -1145,7 +1174,6 @@
   void hlt();
   void int3();
   void nop();
-  void nop(int n);
   void rdtsc();
   void ret(int imm16);
   void setcc(Condition cc, Register reg);
@@ -1266,7 +1294,11 @@
 
   void fsin();
   void fcos();
+  void fptan();
   void fyl2x();
+  void f2xm1();
+  void fscale();
+  void fninit();
 
   void frndint();
 
@@ -1388,19 +1420,20 @@
     return static_cast<int>(reloc_info_writer.pos() - pc_);
   }
 
-  static bool IsNop(Address addr) { return *addr == 0x90; }
+  static bool IsNop(Address addr);
 
   // Avoid overflows for displacements etc.
   static const int kMaximalBufferSize = 512*MB;
   static const int kMinimalBufferSize = 4*KB;
 
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
  protected:
   bool emit_debug_code() const { return emit_debug_code_; }
 
  private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
-  byte byte_at(int pos)  { return buffer_[pos]; }
-  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
     return *reinterpret_cast<uint32_t*>(addr_at(pos));
   }
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index db06909..4e037ff 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -73,310 +73,295 @@
 }
 
 
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool count_constructions) {
   // ----------- S t a t e -------------
   //  -- rax: number of arguments
   //  -- rdi: constructor function
   // -----------------------------------
 
-  Label non_function_call;
-  // Check that function is not a smi.
-  __ JumpIfSmi(rdi, &non_function_call);
-  // Check that function is a JSFunction.
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &non_function_call);
-
-  // Jump to the function-specific construct stub.
-  __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
-  __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
-  __ jmp(rbx);
-
-  // rdi: called object
-  // rax: number of arguments
-  __ bind(&non_function_call);
-  // Set expected number of arguments to zero (not changing rax).
-  __ Set(rbx, 0);
-  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-  __ SetCallKind(rcx, CALL_AS_METHOD);
-  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool count_constructions) {
   // Should never count constructions for api objects.
   ASSERT(!is_api_function || !count_constructions);
 
-    // Enter a construct frame.
-  __ EnterConstructFrame();
+  // Enter a construct frame.
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Store a smi-tagged arguments count on the stack.
-  __ Integer32ToSmi(rax, rax);
-  __ push(rax);
+    // Store a smi-tagged arguments count on the stack.
+    __ Integer32ToSmi(rax, rax);
+    __ push(rax);
 
-  // Push the function to invoke on the stack.
-  __ push(rdi);
+    // Push the function to invoke on the stack.
+    __ push(rdi);
 
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  Label rt_call, allocated;
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    Label rt_call, allocated;
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(masm->isolate());
-    __ movq(kScratchRegister, debug_step_in_fp);
-    __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
-    __ j(not_equal, &rt_call);
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(masm->isolate());
+      __ movq(kScratchRegister, debug_step_in_fp);
+      __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+      __ j(not_equal, &rt_call);
 #endif
 
-    // Verified that the constructor is a JSFunction.
-    // Load the initial map and verify that it is in fact a map.
-    // rdi: constructor
-    __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi
-    STATIC_ASSERT(kSmiTag == 0);
-    __ JumpIfSmi(rax, &rt_call);
-    // rdi: constructor
-    // rax: initial map (if proven valid below)
-    __ CmpObjectType(rax, MAP_TYPE, rbx);
-    __ j(not_equal, &rt_call);
+      // Verified that the constructor is a JSFunction.
+      // Load the initial map and verify that it is in fact a map.
+      // rdi: constructor
+      __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+      // Will both indicate a NULL and a Smi
+      ASSERT(kSmiTag == 0);
+      __ JumpIfSmi(rax, &rt_call);
+      // rdi: constructor
+      // rax: initial map (if proven valid below)
+      __ CmpObjectType(rax, MAP_TYPE, rbx);
+      __ j(not_equal, &rt_call);
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // rdi: constructor
-    // rax: initial map
-    __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
-    __ j(equal, &rt_call);
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // rdi: constructor
+      // rax: initial map
+      __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+      __ j(equal, &rt_call);
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-      __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset));
-      __ j(not_zero, &allocate);
-
-      __ push(rax);
-      __ push(rdi);
-
-      __ push(rdi);  // constructor
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(rdi);
-      __ pop(rax);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
-    __ shl(rdi, Immediate(kPointerSizeLog2));
-    // rdi: size of new object
-    __ AllocateInNewSpace(rdi,
-                          rbx,
-                          rdi,
-                          no_reg,
-                          &rt_call,
-                          NO_ALLOCATION_FLAGS);
-    // Allocated the JSObject, now initialize the fields.
-    // rax: initial map
-    // rbx: JSObject (not HeapObject tagged - the actual address).
-    // rdi: start of next object
-    __ movq(Operand(rbx, JSObject::kMapOffset), rax);
-    __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
-    __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
-    __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
-    // Set extra fields in the newly allocated object.
-    // rax: initial map
-    // rbx: JSObject
-    // rdi: start of next object
-    { Label loop, entry;
-      // To allow for truncation.
       if (count_constructions) {
-        __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
-      } else {
-        __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+        Label allocate;
+        // Decrease generous allocation count.
+        __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+        __ decb(FieldOperand(rcx,
+                             SharedFunctionInfo::kConstructionCountOffset));
+        __ j(not_zero, &allocate);
+
+        __ push(rax);
+        __ push(rdi);
+
+        __ push(rdi);  // constructor
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(rdi);
+        __ pop(rax);
+
+        __ bind(&allocate);
       }
+
+      // Now allocate the JSObject on the heap.
+      __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+      __ shl(rdi, Immediate(kPointerSizeLog2));
+      // rdi: size of new object
+      __ AllocateInNewSpace(rdi,
+                            rbx,
+                            rdi,
+                            no_reg,
+                            &rt_call,
+                            NO_ALLOCATION_FLAGS);
+      // Allocated the JSObject, now initialize the fields.
+      // rax: initial map
+      // rbx: JSObject (not HeapObject tagged - the actual address).
+      // rdi: start of next object
+      __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+      __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+      __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+      __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+      // Set extra fields in the newly allocated object.
+      // rax: initial map
+      // rbx: JSObject
+      // rdi: start of next object
       __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ movq(Operand(rcx, 0), rdx);
-      __ addq(rcx, Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmpq(rcx, rdi);
-      __ j(less, &loop);
-    }
-
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    // rax: initial map
-    // rbx: JSObject
-    // rdi: start of next object
-    __ or_(rbx, Immediate(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed.
-    // Allocate and initialize a FixedArray if it is.
-    // rax: initial map
-    // rbx: JSObject
-    // rdi: start of next object
-    // Calculate total properties described map.
-    __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
-    __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
-    __ addq(rdx, rcx);
-    // Calculate unused properties past the end of the in-object properties.
-    __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
-    __ subq(rdx, rcx);
-    // Done if no extra properties are to be allocated.
-    __ j(zero, &allocated);
-    __ Assert(positive, "Property allocation count failed.");
-
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // rbx: JSObject
-    // rdi: start of next object (will be start of FixedArray)
-    // rdx: number of elements in properties array
-    __ AllocateInNewSpace(FixedArray::kHeaderSize,
-                          times_pointer_size,
-                          rdx,
-                          rdi,
-                          rax,
-                          no_reg,
-                          &undo_allocation,
-                          RESULT_CONTAINS_TOP);
-
-    // Initialize the FixedArray.
-    // rbx: JSObject
-    // rdi: FixedArray
-    // rdx: number of elements
-    // rax: start of next object
-    __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
-    __ movq(Operand(rdi, HeapObject::kMapOffset), rcx);  // setup the map
-    __ Integer32ToSmi(rdx, rdx);
-    __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx);  // and length
-
-    // Initialize the fields to undefined.
-    // rbx: JSObject
-    // rdi: FixedArray
-    // rax: start of next object
-    // rdx: number of elements
-    { Label loop, entry;
       __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
-      __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ movq(Operand(rcx, 0), rdx);
-      __ addq(rcx, Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmpq(rcx, rax);
-      __ j(below, &loop);
+      if (count_constructions) {
+        __ movzxbq(rsi,
+                   FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+        __ lea(rsi,
+               Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
+        // rsi: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ cmpq(rsi, rdi);
+          __ Assert(less_equal,
+                    "Unexpected number of pre-allocated property fields.");
+        }
+        __ InitializeFieldsWithFiller(rcx, rsi, rdx);
+        __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+      }
+      __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      // rax: initial map
+      // rbx: JSObject
+      // rdi: start of next object
+      __ or_(rbx, Immediate(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed.
+      // Allocate and initialize a FixedArray if it is.
+      // rax: initial map
+      // rbx: JSObject
+      // rdi: start of next object
+      // Calculate total properties described map.
+      __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+      __ movzxbq(rcx,
+                 FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+      __ addq(rdx, rcx);
+      // Calculate unused properties past the end of the in-object properties.
+      __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+      __ subq(rdx, rcx);
+      // Done if no extra properties are to be allocated.
+      __ j(zero, &allocated);
+      __ Assert(positive, "Property allocation count failed.");
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // rbx: JSObject
+      // rdi: start of next object (will be start of FixedArray)
+      // rdx: number of elements in properties array
+      __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                            times_pointer_size,
+                            rdx,
+                            rdi,
+                            rax,
+                            no_reg,
+                            &undo_allocation,
+                            RESULT_CONTAINS_TOP);
+
+      // Initialize the FixedArray.
+      // rbx: JSObject
+      // rdi: FixedArray
+      // rdx: number of elements
+      // rax: start of next object
+      __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+      __ movq(Operand(rdi, HeapObject::kMapOffset), rcx);  // setup the map
+      __ Integer32ToSmi(rdx, rdx);
+      __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx);  // and length
+
+      // Initialize the fields to undefined.
+      // rbx: JSObject
+      // rdi: FixedArray
+      // rax: start of next object
+      // rdx: number of elements
+      { Label loop, entry;
+        __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+        __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ movq(Operand(rcx, 0), rdx);
+        __ addq(rcx, Immediate(kPointerSize));
+        __ bind(&entry);
+        __ cmpq(rcx, rax);
+        __ j(below, &loop);
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject
+      // rbx: JSObject
+      // rdi: FixedArray
+      __ or_(rdi, Immediate(kHeapObjectTag));  // add the heap tag
+      __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+
+
+      // Continue with JSObject being successfully allocated
+      // rbx: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // rbx: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(rbx);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject
-    // rbx: JSObject
-    // rdi: FixedArray
-    __ or_(rdi, Immediate(kHeapObjectTag));  // add the heap tag
-    __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+    // Allocate the new receiver object using the runtime call.
+    // rdi: function (constructor)
+    __ bind(&rt_call);
+    // Must restore rdi (constructor) before calling runtime.
+    __ movq(rdi, Operand(rsp, 0));
+    __ push(rdi);
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ movq(rbx, rax);  // store result in rbx
 
+    // New object allocated.
+    // rbx: newly allocated object
+    __ bind(&allocated);
+    // Retrieve the function from the stack.
+    __ pop(rdi);
 
-    // Continue with JSObject being successfully allocated
-    // rbx: JSObject
-    __ jmp(&allocated);
+    // Retrieve smi-tagged arguments count from the stack.
+    __ movq(rax, Operand(rsp, 0));
+    __ SmiToInteger32(rax, rax);
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // rbx: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(rbx);
+    // Push the allocated receiver to the stack. We need two copies
+    // because we may have to return the original one and the calling
+    // conventions dictate that the called function pops the receiver.
+    __ push(rbx);
+    __ push(rbx);
+
+    // Set up pointer to last argument.
+    __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    Label loop, entry;
+    __ movq(rcx, rax);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ push(Operand(rbx, rcx, times_pointer_size, 0));
+    __ bind(&entry);
+    __ decq(rcx);
+    __ j(greater_equal, &loop);
+
+    // Call the function.
+    if (is_api_function) {
+      __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                    CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(rax);
+      __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Store offset of return address for deoptimizer.
+    if (!is_api_function && !count_constructions) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    __ JumpIfSmi(rax, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+    __ j(above_equal, &exit);
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ movq(rax, Operand(rsp, 0));
+
+    // Restore the arguments count and leave the construct frame.
+    __ bind(&exit);
+    __ movq(rbx, Operand(rsp, kPointerSize));  // Get arguments count.
+
+    // Leave construct frame.
   }
 
-  // Allocate the new receiver object using the runtime call.
-  // rdi: function (constructor)
-  __ bind(&rt_call);
-  // Must restore rdi (constructor) before calling runtime.
-  __ movq(rdi, Operand(rsp, 0));
-  __ push(rdi);
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ movq(rbx, rax);  // store result in rbx
-
-  // New object allocated.
-  // rbx: newly allocated object
-  __ bind(&allocated);
-  // Retrieve the function from the stack.
-  __ pop(rdi);
-
-  // Retrieve smi-tagged arguments count from the stack.
-  __ movq(rax, Operand(rsp, 0));
-  __ SmiToInteger32(rax, rax);
-
-  // Push the allocated receiver to the stack. We need two copies
-  // because we may have to return the original one and the calling
-  // conventions dictate that the called function pops the receiver.
-  __ push(rbx);
-  __ push(rbx);
-
-  // Setup pointer to last argument.
-  __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
-  // Copy arguments and receiver to the expression stack.
-  Label loop, entry;
-  __ movq(rcx, rax);
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ push(Operand(rbx, rcx, times_pointer_size, 0));
-  __ bind(&entry);
-  __ decq(rcx);
-  __ j(greater_equal, &loop);
-
-  // Call the function.
-  if (is_api_function) {
-    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
-                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(rax);
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Restore context from the frame.
-  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  __ JumpIfSmi(rax, &use_receiver);
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
-  __ j(above_equal, &exit);
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ movq(rax, Operand(rsp, 0));
-
-  // Restore the arguments count and leave the construct frame.
-  __ bind(&exit);
-  __ movq(rbx, Operand(rsp, kPointerSize));  // get arguments count
-  __ LeaveConstructFrame();
-
   // Remove caller arguments from the stack and return.
   __ pop(rcx);
   SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
@@ -413,104 +398,108 @@
   // - Object*** argv
   // (see Handle::Invoke in execution.cc).
 
-  // Platform specific argument handling. After this, the stack contains
-  // an internal frame and the pushed function and receiver, and
-  // register rax and rbx holds the argument count and argument array,
-  // while rdi holds the function pointer and rsi the context.
+  // Open a C++ scope for the FrameScope.
+  {
+    // Platform specific argument handling. After this, the stack contains
+    // an internal frame and the pushed function and receiver, and
+    // register rax and rbx holds the argument count and argument array,
+    // while rdi holds the function pointer and rsi the context.
+
 #ifdef _WIN64
-  // MSVC parameters in:
-  // rcx : entry (ignored)
-  // rdx : function
-  // r8 : receiver
-  // r9 : argc
-  // [rsp+0x20] : argv
+    // MSVC parameters in:
+    // rcx : entry (ignored)
+    // rdx : function
+    // r8 : receiver
+    // r9 : argc
+    // [rsp+0x20] : argv
 
-  // Clear the context before we push it when entering the JS frame.
-  __ Set(rsi, 0);
-  __ EnterInternalFrame();
+    // Clear the context before we push it when entering the internal frame.
+    __ Set(rsi, 0);
+    // Enter an internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Load the function context into rsi.
-  __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+    // Load the function context into rsi.
+    __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
 
-  // Push the function and the receiver onto the stack.
-  __ push(rdx);
-  __ push(r8);
+    // Push the function and the receiver onto the stack.
+    __ push(rdx);
+    __ push(r8);
 
-  // Load the number of arguments and setup pointer to the arguments.
-  __ movq(rax, r9);
-  // Load the previous frame pointer to access C argument on stack
-  __ movq(kScratchRegister, Operand(rbp, 0));
-  __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
-  // Load the function pointer into rdi.
-  __ movq(rdi, rdx);
+    // Load the number of arguments and setup pointer to the arguments.
+    __ movq(rax, r9);
+    // Load the previous frame pointer to access C argument on stack
+    __ movq(kScratchRegister, Operand(rbp, 0));
+    __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+    // Load the function pointer into rdi.
+    __ movq(rdi, rdx);
 #else  // _WIN64
-  // GCC parameters in:
-  // rdi : entry (ignored)
-  // rsi : function
-  // rdx : receiver
-  // rcx : argc
-  // r8  : argv
+    // GCC parameters in:
+    // rdi : entry (ignored)
+    // rsi : function
+    // rdx : receiver
+    // rcx : argc
+    // r8  : argv
 
-  __ movq(rdi, rsi);
-  // rdi : function
+    __ movq(rdi, rsi);
+    // rdi : function
 
-  // Clear the context before we push it when entering the JS frame.
-  __ Set(rsi, 0);
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+    // Clear the context before we push it when entering the internal frame.
+    __ Set(rsi, 0);
+    // Enter an internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the function and receiver and setup the context.
-  __ push(rdi);
-  __ push(rdx);
-  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+    // Push the function and receiver and setup the context.
+    __ push(rdi);
+    __ push(rdx);
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  // Load the number of arguments and setup pointer to the arguments.
-  __ movq(rax, rcx);
-  __ movq(rbx, r8);
+    // Load the number of arguments and setup pointer to the arguments.
+    __ movq(rax, rcx);
+    __ movq(rbx, r8);
 #endif  // _WIN64
 
-  // Current stack contents:
-  // [rsp + 2 * kPointerSize ... ]: Internal frame
-  // [rsp + kPointerSize]         : function
-  // [rsp]                        : receiver
-  // Current register contents:
-  // rax : argc
-  // rbx : argv
-  // rsi : context
-  // rdi : function
+    // Current stack contents:
+    // [rsp + 2 * kPointerSize ... ]: Internal frame
+    // [rsp + kPointerSize]         : function
+    // [rsp]                        : receiver
+    // Current register contents:
+    // rax : argc
+    // rbx : argv
+    // rsi : context
+    // rdi : function
 
-  // Copy arguments to the stack in a loop.
-  // Register rbx points to array of pointers to handle locations.
-  // Push the values of these handles.
-  Label loop, entry;
-  __ Set(rcx, 0);  // Set loop variable to 0.
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
-  __ push(Operand(kScratchRegister, 0));  // dereference handle
-  __ addq(rcx, Immediate(1));
-  __ bind(&entry);
-  __ cmpq(rcx, rax);
-  __ j(not_equal, &loop);
+    // Copy arguments to the stack in a loop.
+    // Register rbx points to array of pointers to handle locations.
+    // Push the values of these handles.
+    Label loop, entry;
+    __ Set(rcx, 0);  // Set loop variable to 0.
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+    __ push(Operand(kScratchRegister, 0));  // dereference handle
+    __ addq(rcx, Immediate(1));
+    __ bind(&entry);
+    __ cmpq(rcx, rax);
+    __ j(not_equal, &loop);
 
-  // Invoke the code.
-  if (is_construct) {
-    // Expects rdi to hold function pointer.
-    __ Call(masm->isolate()->builtins()->JSConstructCall(),
-            RelocInfo::CODE_TARGET);
-  } else {
-    ParameterCount actual(rax);
-    // Function must be in rdi.
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the code.
+    if (is_construct) {
+      // Expects rdi to hold function pointer.
+      CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+      __ CallStub(&stub);
+    } else {
+      ParameterCount actual(rax);
+      // Function must be in rdi.
+      __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+    // Exit the internal frame. Notice that this also removes the empty
+    // context and the function left on the stack by the code
+    // invocation.
   }
 
-  // Exit the JS frame. Notice that this also removes the empty
-  // context and the function left on the stack by the code
-  // invocation.
-  __ LeaveInternalFrame();
   // TODO(X64): Is argument correct? Is there a receiver to remove?
-  __ ret(1 * kPointerSize);  // remove receiver
+  __ ret(1 * kPointerSize);  // Remove receiver.
 }
 
 
@@ -526,23 +515,24 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function onto the stack.
-  __ push(rdi);
-  // Push call kind information.
-  __ push(rcx);
+    // Push a copy of the function onto the stack.
+    __ push(rdi);
+    // Push call kind information.
+    __ push(rcx);
 
-  __ push(rdi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyCompile, 1);
+    __ push(rdi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyCompile, 1);
 
-  // Restore call kind information.
-  __ pop(rcx);
-  // Restore receiver.
-  __ pop(rdi);
+    // Restore call kind information.
+    __ pop(rcx);
+    // Restore receiver.
+    __ pop(rdi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -552,23 +542,24 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function onto the stack.
-  __ push(rdi);
-  // Push call kind information.
-  __ push(rcx);
+    // Push a copy of the function onto the stack.
+    __ push(rdi);
+    // Push call kind information.
+    __ push(rcx);
 
-  __ push(rdi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
+    __ push(rdi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-  // Restore call kind information.
-  __ pop(rcx);
-  // Restore function.
-  __ pop(rdi);
+    // Restore call kind information.
+    __ pop(rcx);
+    // Restore function.
+    __ pop(rdi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -579,14 +570,15 @@
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Pass the deoptimization type to the runtime system.
-  __ Push(Smi::FromInt(static_cast<int>(type)));
+    // Pass the deoptimization type to the runtime system.
+    __ Push(Smi::FromInt(static_cast<int>(type)));
 
-  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+    // Tear down internal frame.
+  }
 
   // Get the full codegen state from the stack and untag it.
   __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
@@ -623,9 +615,10 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ Pushad();
-  __ EnterInternalFrame();
-  __ CallRuntime(Runtime::kNotifyOSR, 0);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
   __ Popad();
   __ ret(0);
 }
@@ -647,7 +640,7 @@
     __ testq(rax, rax);
     __ j(not_zero, &done);
     __ pop(rbx);
-    __ Push(FACTORY->undefined_value());
+    __ Push(masm->isolate()->factory()->undefined_value());
     __ push(rbx);
     __ incq(rax);
     __ bind(&done);
@@ -695,18 +688,21 @@
     __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ Integer32ToSmi(rax, rax);
-    __ push(rax);
+    {
+      // Enter an internal frame in order to preserve argument count.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Integer32ToSmi(rax, rax);
+      __ push(rax);
 
-    __ push(rbx);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ movq(rbx, rax);
-    __ Set(rdx, 0);  // indicate regular JS_FUNCTION
+      __ push(rbx);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ movq(rbx, rax);
+      __ Set(rdx, 0);  // indicate regular JS_FUNCTION
 
-    __ pop(rax);
-    __ SmiToInteger32(rax, rax);
-    __ LeaveInternalFrame();
+      __ pop(rax);
+      __ SmiToInteger32(rax, rax);
+    }
+
     // Restore the function to rdi.
     __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
     __ jmp(&patch_receiver, Label::kNear);
@@ -807,168 +803,166 @@
   //  rsp+8: arguments
   // rsp+16: receiver ("this")
   // rsp+24: function
-  __ EnterInternalFrame();
-  // Stack frame:
-  //    rbp: Old base pointer
-  // rbp[1]: return address
-  // rbp[2]: function arguments
-  // rbp[3]: receiver
-  // rbp[4]: function
-  static const int kArgumentsOffset = 2 * kPointerSize;
-  static const int kReceiverOffset = 3 * kPointerSize;
-  static const int kFunctionOffset = 4 * kPointerSize;
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Stack frame:
+    //    rbp: Old base pointer
+    // rbp[1]: return address
+    // rbp[2]: function arguments
+    // rbp[3]: receiver
+    // rbp[4]: function
+    static const int kArgumentsOffset = 2 * kPointerSize;
+    static const int kReceiverOffset = 3 * kPointerSize;
+    static const int kFunctionOffset = 4 * kPointerSize;
 
-  __ push(Operand(rbp, kFunctionOffset));
-  __ push(Operand(rbp, kArgumentsOffset));
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    __ push(Operand(rbp, kFunctionOffset));
+    __ push(Operand(rbp, kArgumentsOffset));
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
-  __ movq(rcx, rsp);
-  // Make rcx the space we have left. The stack might already be overflowed
-  // here which will cause rcx to become negative.
-  __ subq(rcx, kScratchRegister);
-  // Make rdx the space we need for the array when it is unrolled onto the
-  // stack.
-  __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
-  // Check if the arguments will overflow the stack.
-  __ cmpq(rcx, rdx);
-  __ j(greater, &okay);  // Signed comparison.
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+    __ movq(rcx, rsp);
+    // Make rcx the space we have left. The stack might already be overflowed
+    // here which will cause rcx to become negative.
+    __ subq(rcx, kScratchRegister);
+    // Make rdx the space we need for the array when it is unrolled onto the
+    // stack.
+    __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+    // Check if the arguments will overflow the stack.
+    __ cmpq(rcx, rdx);
+    __ j(greater, &okay);  // Signed comparison.
 
-  // Out of stack space.
-  __ push(Operand(rbp, kFunctionOffset));
-  __ push(rax);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  __ bind(&okay);
-  // End of stack check.
+    // Out of stack space.
+    __ push(Operand(rbp, kFunctionOffset));
+    __ push(rax);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    __ bind(&okay);
+    // End of stack check.
 
-  // Push current index and limit.
-  const int kLimitOffset =
-      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
-  __ push(rax);  // limit
-  __ push(Immediate(0));  // index
+    // Push current index and limit.
+    const int kLimitOffset =
+        StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+    const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+    __ push(rax);  // limit
+    __ push(Immediate(0));  // index
 
-  // Get the receiver.
-  __ movq(rbx, Operand(rbp, kReceiverOffset));
+    // Get the receiver.
+    __ movq(rbx, Operand(rbp, kReceiverOffset));
 
-  // Check that the function is a JS function (otherwise it must be a proxy).
-  Label push_receiver;
-  __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &push_receiver);
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ movq(rdi, Operand(rbp, kFunctionOffset));
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+    __ j(not_equal, &push_receiver);
 
-  // Change context eagerly to get the right global object if necessary.
-  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+    // Change context eagerly to get the right global object if necessary.
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  // Do not transform the receiver for strict mode functions.
-  Label call_to_object, use_global_receiver;
-  __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
-           Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
-  __ j(not_equal, &push_receiver);
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+             Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+    __ j(not_equal, &push_receiver);
 
-  // Do not transform the receiver for natives.
-  __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
-           Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
-  __ j(not_equal, &push_receiver);
+    // Do not transform the receiver for natives.
+    __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+             Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+    __ j(not_equal, &push_receiver);
 
-  // Compute the receiver in non-strict mode.
-  __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
-  __ CompareRoot(rbx, Heap::kNullValueRootIndex);
-  __ j(equal, &use_global_receiver);
-  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &use_global_receiver);
+    // Compute the receiver in non-strict mode.
+    __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
+    __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+    __ j(equal, &use_global_receiver);
+    __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+    __ j(equal, &use_global_receiver);
 
-  // If given receiver is already a JavaScript object then there's no
-  // reason for converting it.
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
-  __ j(above_equal, &push_receiver);
+    // If given receiver is already a JavaScript object then there's no
+    // reason for converting it.
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+    __ j(above_equal, &push_receiver);
 
-  // Convert the receiver to an object.
-  __ bind(&call_to_object);
-  __ push(rbx);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ movq(rbx, rax);
-  __ jmp(&push_receiver, Label::kNear);
+    // Convert the receiver to an object.
+    __ bind(&call_to_object);
+    __ push(rbx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ movq(rbx, rax);
+    __ jmp(&push_receiver, Label::kNear);
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
-  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
-  __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
-  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+    __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
 
-  // Push the receiver.
-  __ bind(&push_receiver);
-  __ push(rbx);
+    // Push the receiver.
+    __ bind(&push_receiver);
+    __ push(rbx);
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ movq(rax, Operand(rbp, kIndexOffset));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ movq(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ movq(rax, Operand(rbp, kIndexOffset));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
 
-  // Use inline caching to speed up access to arguments.
-  Handle<Code> ic =
-      masm->isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // It is important that we do not have a test instruction after the
-  // call.  A test instruction after the call is used to indicate that
-  // we have generated an inline version of the keyed load.  In this
-  // case, we know that we are not generating a test instruction next.
+    // Use inline caching to speed up access to arguments.
+    Handle<Code> ic =
+        masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // It is important that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to indicate that
+    // we have generated an inline version of the keyed load.  In this
+    // case, we know that we are not generating a test instruction next.
 
-  // Push the nth argument.
-  __ push(rax);
+    // Push the nth argument.
+    __ push(rax);
 
-  // Update the index on the stack and in register rax.
-  __ movq(rax, Operand(rbp, kIndexOffset));
-  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
-  __ movq(Operand(rbp, kIndexOffset), rax);
+    // Update the index on the stack and in register rax.
+    __ movq(rax, Operand(rbp, kIndexOffset));
+    __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+    __ movq(Operand(rbp, kIndexOffset), rax);
 
-  __ bind(&entry);
-  __ cmpq(rax, Operand(rbp, kLimitOffset));
-  __ j(not_equal, &loop);
+    __ bind(&entry);
+    __ cmpq(rax, Operand(rbp, kLimitOffset));
+    __ j(not_equal, &loop);
 
-  // Invoke the function.
-  Label call_proxy;
-  ParameterCount actual(rax);
-  __ SmiToInteger32(rax, rax);
-  __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &call_proxy);
-  __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(rax);
+    __ SmiToInteger32(rax, rax);
+    __ movq(rdi, Operand(rbp, kFunctionOffset));
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+    __ j(not_equal, &call_proxy);
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
 
-  __ LeaveInternalFrame();
-  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+    frame_scope.GenerateLeaveFrame();
+    __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 
-  // Invoke the function proxy.
-  __ bind(&call_proxy);
-  __ push(rdi);  // add function proxy as last argument
-  __ incq(rax);
-  __ Set(rbx, 0);
-  __ SetCallKind(rcx, CALL_AS_METHOD);
-  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
-  __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(rdi);  // add function proxy as last argument
+    __ incq(rax);
+    __ Set(rbx, 0);
+    __ SetCallKind(rcx, CALL_AS_METHOD);
+    __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+    __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
 
-  __ LeaveInternalFrame();
+    // Leave internal frame.
+  }
   __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 }
 
 
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
-
-
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. If the parameter initial_capacity is larger than zero an elements
 // backing store is allocated with this size and filled with the hole values.
@@ -979,13 +973,11 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
-                                 int initial_capacity,
                                  Label* gc_required) {
-  ASSERT(initial_capacity >= 0);
+  const int initial_capacity = JSArray::kPreallocatedArrayElements;
+  STATIC_ASSERT(initial_capacity >= 0);
 
-  // Load the initial map from the array function.
-  __ movq(scratch1, FieldOperand(array_function,
-                                 JSFunction::kPrototypeOrInitialMapOffset));
+  __ LoadInitialArrayMap(array_function, scratch2, scratch1);
 
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
@@ -1005,9 +997,10 @@
   // result: JSObject
   // scratch1: initial map
   // scratch2: start of next object
+  Factory* factory = masm->isolate()->factory();
   __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
   __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
-          FACTORY->empty_fixed_array());
+          factory->empty_fixed_array());
   // Field JSArray::kElementsOffset is initialized later.
   __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
 
@@ -1015,7 +1008,7 @@
   // fixed array.
   if (initial_capacity == 0) {
     __ Move(FieldOperand(result, JSArray::kElementsOffset),
-            FACTORY->empty_fixed_array());
+            factory->empty_fixed_array());
     return;
   }
 
@@ -1032,15 +1025,14 @@
   // scratch1: elements array
   // scratch2: start of next object
   __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
-          FACTORY->fixed_array_map());
+          factory->fixed_array_map());
   __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
           Smi::FromInt(initial_capacity));
 
   // Fill the FixedArray with the hole value. Inline the code if short.
   // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
   static const int kLoopUnfoldLimit = 4;
-  ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
-  __ Move(scratch3, FACTORY->the_hole_value());
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
   if (initial_capacity <= kLoopUnfoldLimit) {
     // Use a scratch register here to have only one reloc info when unfolding
     // the loop.
@@ -1051,13 +1043,17 @@
     }
   } else {
     Label loop, entry;
+    __ movq(scratch2, Immediate(initial_capacity));
     __ jmp(&entry);
     __ bind(&loop);
-    __ movq(Operand(scratch1, 0), scratch3);
-    __ addq(scratch1, Immediate(kPointerSize));
+    __ movq(FieldOperand(scratch1,
+                         scratch2,
+                         times_pointer_size,
+                         FixedArray::kHeaderSize),
+            scratch3);
     __ bind(&entry);
-    __ cmpq(scratch1, scratch2);
-    __ j(below, &loop);
+    __ decq(scratch2);
+    __ j(not_sign, &loop);
   }
 }
 
@@ -1073,38 +1069,22 @@
 // register elements_array is scratched.
 static void AllocateJSArray(MacroAssembler* masm,
                             Register array_function,  // Array function.
-                            Register array_size,  // As a smi.
+                            Register array_size,  // As a smi, cannot be 0.
                             Register result,
                             Register elements_array,
                             Register elements_array_end,
                             Register scratch,
                             bool fill_with_hole,
                             Label* gc_required) {
-  Label not_empty, allocated;
+  __ LoadInitialArrayMap(array_function, scratch, elements_array);
 
-  // Load the initial map from the array function.
-  __ movq(elements_array,
-          FieldOperand(array_function,
-                       JSFunction::kPrototypeOrInitialMapOffset));
-
-  // Check whether an empty sized array is requested.
-  __ testq(array_size, array_size);
-  __ j(not_zero, &not_empty);
-
-  // If an empty array is requested allocate a small elements array anyway. This
-  // keeps the code below free of special casing for the empty array.
-  int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
-  __ AllocateInNewSpace(size,
-                        result,
-                        elements_array_end,
-                        scratch,
-                        gc_required,
-                        TAG_OBJECT);
-  __ jmp(&allocated);
+  if (FLAG_debug_code) {  // Assert that array size is not zero.
+    __ testq(array_size, array_size);
+    __ Assert(not_zero, "array size is unexpectedly 0");
+  }
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested elements.
-  __ bind(&not_empty);
   SmiIndex index =
       masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
   __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
@@ -1122,9 +1102,9 @@
   // elements_array: initial map
   // elements_array_end: start of next object
   // array_size: size of array (smi)
-  __ bind(&allocated);
+  Factory* factory = masm->isolate()->factory();
   __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
-  __ Move(elements_array, FACTORY->empty_fixed_array());
+  __ Move(elements_array, factory->empty_fixed_array());
   __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
   // Field JSArray::kElementsOffset is initialized later.
   __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
@@ -1143,16 +1123,7 @@
   // elements_array_end: start of next object
   // array_size: size of array (smi)
   __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
-          FACTORY->fixed_array_map());
-  Label not_empty_2, fill_array;
-  __ SmiTest(array_size);
-  __ j(not_zero, &not_empty_2);
-  // Length of the FixedArray is the number of pre-allocated elements even
-  // though the actual JSArray has length 0.
-  __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
-          Smi::FromInt(kPreallocatedArrayElements));
-  __ jmp(&fill_array);
-  __ bind(&not_empty_2);
+          factory->fixed_array_map());
   // For non-empty JSArrays the length of the FixedArray and the JSArray is the
   // same.
   __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
@@ -1161,10 +1132,9 @@
   // result: JSObject
   // elements_array: elements array
   // elements_array_end: start of next object
-  __ bind(&fill_array);
   if (fill_with_hole) {
     Label loop, entry;
-    __ Move(scratch, FACTORY->the_hole_value());
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
     __ lea(elements_array, Operand(elements_array,
                                    FixedArray::kHeaderSize - kHeapObjectTag));
     __ jmp(&entry);
@@ -1193,13 +1163,15 @@
 // Both registers are preserved by this code so no need to differentiate between
 // a construct call and a normal call.
 static void ArrayNativeCode(MacroAssembler* masm,
-                            Label *call_generic_code) {
-  Label argc_one_or_more, argc_two_or_more;
+                            Label* call_generic_code) {
+  Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
+      has_non_smi_element, finish, cant_transition_map, not_double;
 
   // Check for array construction with zero arguments.
   __ testq(rax, rax);
   __ j(not_zero, &argc_one_or_more);
 
+  __ bind(&empty_array);
   // Handle construction of an empty array.
   AllocateEmptyJSArray(masm,
                        rdi,
@@ -1207,7 +1179,6 @@
                        rcx,
                        rdx,
                        r8,
-                       kPreallocatedArrayElements,
                        call_generic_code);
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->array_function_native(), 1);
@@ -1220,6 +1191,16 @@
   __ cmpq(rax, Immediate(1));
   __ j(not_equal, &argc_two_or_more);
   __ movq(rdx, Operand(rsp, kPointerSize));  // Get the argument from the stack.
+
+  __ SmiTest(rdx);
+  __ j(not_zero, &not_empty_array);
+  __ pop(r8);  // Adjust stack.
+  __ Drop(1);
+  __ push(r8);
+  __ movq(rax, Immediate(0));  // Treat this as a call with argc of zero.
+  __ jmp(&empty_array);
+
+  __ bind(&not_empty_array);
   __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
 
   // Handle construction of an empty array of a certain size. Bail out if size
@@ -1289,8 +1270,11 @@
   __ movq(rcx, rax);
   __ jmp(&entry);
   __ bind(&loop);
-  __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
-  __ movq(Operand(rdx, 0), kScratchRegister);
+  __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
+  if (FLAG_smi_only_arrays) {
+    __ JumpIfNotSmi(r8, &has_non_smi_element);
+  }
+  __ movq(Operand(rdx, 0), r8);
   __ addq(rdx, Immediate(kPointerSize));
   __ bind(&entry);
   __ decq(rcx);
@@ -1301,11 +1285,81 @@
   // rbx: JSArray
   // esp[0]: return address
   // esp[8]: last argument
+  __ bind(&finish);
   __ pop(rcx);
   __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
   __ push(rcx);
   __ movq(rax, rbx);
   __ ret(0);
+
+  __ bind(&has_non_smi_element);
+  // Double values are handled by the runtime.
+  __ CheckMap(r8,
+              masm->isolate()->factory()->heap_number_map(),
+              &not_double,
+              DONT_DO_SMI_CHECK);
+  __ bind(&cant_transition_map);
+  __ UndoAllocationInNewSpace(rbx);
+  __ jmp(call_generic_code);
+
+  __ bind(&not_double);
+  // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+  // rbx: JSArray
+  __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         r11,
+                                         kScratchRegister,
+                                         &cant_transition_map);
+
+  __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
+  __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
+                      kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+  // Finish the array initialization loop.
+  Label loop2;
+  __ bind(&loop2);
+  __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
+  __ movq(Operand(rdx, 0), r8);
+  __ addq(rdx, Immediate(kPointerSize));
+  __ decq(rcx);
+  __ j(greater_equal, &loop2);
+  __ jmp(&finish);
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : argc
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the InternalArray function.
+  __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+    __ Check(not_smi, "Unexpected initial map for InternalArray function");
+    __ CmpObjectType(rbx, MAP_TYPE, rcx);
+    __ Check(equal, "Unexpected initial map for InternalArray function");
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code in case the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
+  Handle<Code> array_code =
+      masm->isolate()->builtins()->InternalArrayCodeGeneric();
+  __ Jump(array_code, RelocInfo::CODE_TARGET);
 }
 
 
@@ -1378,9 +1432,130 @@
 
 
 void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
-  // TODO(849): implement custom construct stub.
-  // Generate a copy of the generic stub for now.
-  Generate_JSConstructStubGeneric(masm);
+  // ----------- S t a t e -------------
+  //  -- rax                 : number of arguments
+  //  -- rdi                 : constructor function
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->string_ctor_calls(), 1);
+
+  if (FLAG_debug_code) {
+    __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
+    __ cmpq(rdi, rcx);
+    __ Assert(equal, "Unexpected String function");
+  }
+
+  // Load the first argument into rax and get rid of the rest
+  // (including the receiver).
+  Label no_arguments;
+  __ testq(rax, rax);
+  __ j(zero, &no_arguments);
+  __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+  __ pop(rcx);
+  __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+  __ push(rcx);
+  __ movq(rax, rbx);
+
+  // Lookup the argument in the number to string cache.
+  Label not_cached, argument_is_string;
+  NumberToStringStub::GenerateLookupNumberStringCache(
+      masm,
+      rax,  // Input.
+      rbx,  // Result.
+      rcx,  // Scratch 1.
+      rdx,  // Scratch 2.
+      false,  // Input is known to be smi?
+      &not_cached);
+  __ IncrementCounter(counters->string_ctor_cached_number(), 1);
+  __ bind(&argument_is_string);
+
+  // ----------- S t a t e -------------
+  //  -- rbx    : argument converted to string
+  //  -- rdi    : constructor function
+  //  -- rsp[0] : return address
+  // -----------------------------------
+
+  // Allocate a JSValue and put the tagged pointer into rax.
+  Label gc_required;
+  __ AllocateInNewSpace(JSValue::kSize,
+                        rax,  // Result.
+                        rcx,  // New allocation top (we ignore it).
+                        no_reg,
+                        &gc_required,
+                        TAG_OBJECT);
+
+  // Set the map.
+  __ LoadGlobalFunctionInitialMap(rdi, rcx);
+  if (FLAG_debug_code) {
+    __ cmpb(FieldOperand(rcx, Map::kInstanceSizeOffset),
+            Immediate(JSValue::kSize >> kPointerSizeLog2));
+    __ Assert(equal, "Unexpected string wrapper instance size");
+    __ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
+    __ Assert(equal, "Unexpected unused properties of string wrapper");
+  }
+  __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+
+  // Set properties and elements.
+  __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+  __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
+  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+
+  // Set the value.
+  __ movq(FieldOperand(rax, JSValue::kValueOffset), rbx);
+
+  // Ensure the object is fully initialized.
+  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+  // We're done. Return.
+  __ ret(0);
+
+  // The argument was not found in the number to string cache. Check
+  // if it's a string already before calling the conversion builtin.
+  Label convert_argument;
+  __ bind(&not_cached);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfSmi(rax, &convert_argument);
+  Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
+  __ j(NegateCondition(is_string), &convert_argument);
+  __ movq(rbx, rax);
+  __ IncrementCounter(counters->string_ctor_string_value(), 1);
+  __ jmp(&argument_is_string);
+
+  // Invoke the conversion builtin and put the result into rbx.
+  __ bind(&convert_argument);
+  __ IncrementCounter(counters->string_ctor_conversions(), 1);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rdi);  // Preserve the function.
+    __ push(rax);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+    __ pop(rdi);
+  }
+  __ movq(rbx, rax);
+  __ jmp(&argument_is_string);
+
+  // Load the empty string into rbx, remove the receiver from the
+  // stack, and jump back to the case where the argument is a string.
+  __ bind(&no_arguments);
+  __ LoadRoot(rbx, Heap::kEmptyStringRootIndex);
+  __ pop(rcx);
+  __ lea(rsp, Operand(rsp, kPointerSize));
+  __ push(rcx);
+  __ jmp(&argument_is_string);
+
+  // At this point the argument is already a string. Call runtime to
+  // create a string wrapper.
+  __ bind(&gc_required);
+  __ IncrementCounter(counters->string_ctor_gc_required(), 1);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rbx);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
+  __ ret(0);
 }
 
 
@@ -1489,6 +1664,9 @@
   __ bind(&invoke);
   __ call(rdx);
 
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
   // Leave frame and return.
   LeaveArgumentsAdaptorFrame(masm);
   __ ret(0);
@@ -1520,10 +1698,11 @@
 
   // Pass the function to optimize as the argument to the on-stack
   // replacement runtime function.
-  __ EnterInternalFrame();
-  __ push(rax);
-  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rax);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
@@ -1541,7 +1720,9 @@
 
   StackCheckStub stub;
   __ TailCallStub(&stub);
-  __ Abort("Unreachable code: returned from tail call.");
+  if (FLAG_debug_code) {
+    __ Abort("Unreachable code: returned from tail call.");
+  }
   __ bind(&ok);
   __ ret(0);
 
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 6499ea0..2845039 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -68,9 +68,9 @@
   // Get the function info from the stack.
   __ movq(rdx, Operand(rsp, 1 * kPointerSize));
 
-  int map_index = strict_mode_ == kStrictMode
-      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
-      : Context::FUNCTION_MAP_INDEX;
+  int map_index = (language_mode_ == CLASSIC_MODE)
+      ? Context::FUNCTION_MAP_INDEX
+      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -124,12 +124,12 @@
   // Get the function from the stack.
   __ movq(rcx, Operand(rsp, 1 * kPointerSize));
 
-  // Setup the object header.
+  // Set up the object header.
   __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
   __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
   __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
 
-  // Setup the fixed slots.
+  // Set up the fixed slots.
   __ Set(rbx, 0);  // Set to NULL.
   __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
   __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
@@ -155,6 +155,131 @@
 }
 
 
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [rsp + (1 * kPointerSize)]: function
+  // [rsp + (2 * kPointerSize)]: serialized scope info
+
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        rax, rbx, rcx, &gc, TAG_OBJECT);
+
+  // Get the function from the stack.
+  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+
+  // Get the serialized scope info from the stack.
+  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+
+  // Set up the object header.
+  __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
+  __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+  __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
+
+  // If this block context is nested in the global context we get a smi
+  // sentinel instead of a function. The block context should get the
+  // canonical empty function of the global context as its closure which
+  // we still have to look up.
+  Label after_sentinel;
+  __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
+  if (FLAG_debug_code) {
+    const char* message = "Expected 0 as a Smi sentinel";
+    __ cmpq(rcx, Immediate(0));
+    __ Assert(equal, message);
+  }
+  __ movq(rcx, GlobalObjectOperand());
+  __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+  __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
+  __ bind(&after_sentinel);
+
+  // Set up the fixed slots.
+  __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
+  __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
+  __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
+
+  // Copy the global object from the previous context.
+  __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+  __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
+
+  // Initialize the rest of the slots to the hole value.
+  __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < slots_; i++) {
+    __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
+  }
+
+  // Return and remove the on-stack parameter.
+  __ movq(rsi, rax);
+  __ ret(2 * kPointerSize);
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
+  //
+  // rcx: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = 0;
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
+  }
+  int size = JSArray::kSize + elements_size;
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
+      __ movq(rbx, FieldOperand(rcx, i));
+      __ movq(FieldOperand(rax, i), rbx);
+    }
+  }
+
+  if (length > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+    __ lea(rdx, Operand(rax, JSArray::kSize));
+    __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+
+    // Copy the elements array.
+    if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
+      for (int i = 0; i < elements_size; i += kPointerSize) {
+        __ movq(rbx, FieldOperand(rcx, i));
+        __ movq(FieldOperand(rdx, i), rbx);
+      }
+    } else {
+      ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
+      int i;
+      for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
+        __ movq(rbx, FieldOperand(rcx, i));
+        __ movq(FieldOperand(rdx, i), rbx);
+      }
+      while (i < elements_size) {
+        __ movsd(xmm0, FieldOperand(rcx, i));
+        __ movsd(FieldOperand(rdx, i), xmm0);
+        i += kDoubleSize;
+      }
+      ASSERT(i == elements_size);
+    }
+  }
+}
+
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
   //
@@ -162,29 +287,54 @@
   // [rsp + (2 * kPointerSize)]: literal index.
   // [rsp + (3 * kPointerSize)]: literals array.
 
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
-  int size = JSArray::kSize + elements_size;
-
   // Load boilerplate object into rcx and check if we need to create a
   // boilerplate.
-  Label slow_case;
   __ movq(rcx, Operand(rsp, 3 * kPointerSize));
   __ movq(rax, Operand(rsp, 2 * kPointerSize));
   SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
   __ movq(rcx,
           FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
   __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+  Label slow_case;
   __ j(equal, &slow_case);
 
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  // rcx is boilerplate object.
+  Factory* factory = masm->isolate()->factory();
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
+    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+           factory->fixed_cow_array_map());
+    __ j(not_equal, &check_fast_elements);
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&check_fast_elements);
+    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+           factory->fixed_array_map());
+    __ j(not_equal, &double_elements);
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
   if (FLAG_debug_code) {
     const char* message;
     Heap::RootListIndex expected_map_index;
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map_index = Heap::kFixedArrayMapRootIndex;
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+      message = "Expected (writable) fixed double array";
+      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
     } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
     }
@@ -196,33 +346,7 @@
     __ pop(rcx);
   }
 
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
-      __ movq(rbx, FieldOperand(rcx, i));
-      __ movq(FieldOperand(rax, i), rbx);
-    }
-  }
-
-  if (length_ > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
-    __ lea(rdx, Operand(rax, JSArray::kSize));
-    __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
-    // Copy the elements array.
-    for (int i = 0; i < elements_size; i += kPointerSize) {
-      __ movq(rbx, FieldOperand(rcx, i));
-      __ movq(FieldOperand(rdx, i), rbx);
-    }
-  }
-
-  // Return and remove the on-stack parameters.
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
   __ ret(3 * kPointerSize);
 
   __ bind(&slow_case);
@@ -230,9 +354,54 @@
 }
 
 
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [rsp + kPointerSize]: object literal flags.
+  // [rsp + (2 * kPointerSize)]: constant properties.
+  // [rsp + (3 * kPointerSize)]: literal index.
+  // [rsp + (4 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into ecx and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ movq(rcx, Operand(rsp, 4 * kPointerSize));
+  __ movq(rax, Operand(rsp, 3 * kPointerSize));
+  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+  __ movq(rcx,
+          FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
+  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &slow_case);
+
+  // Check that the boilerplate contains only fast properties and we can
+  // statically determine the instance size.
+  int size = JSObject::kHeaderSize + length_ * kPointerSize;
+  __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset));
+  __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
+  __ j(not_equal, &slow_case);
+
+  // Allocate the JS object and copy header together with all in-object
+  // properties from the boilerplate.
+  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+  for (int i = 0; i < size; i += kPointerSize) {
+    __ movq(rbx, FieldOperand(rcx, i));
+    __ movq(FieldOperand(rax, i), rbx);
+  }
+
+  // Return and remove the on-stack parameters.
+  __ ret(4 * kPointerSize);
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
+
+
 // The stub expects its argument on the stack and returns its result in tos_:
 // zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   Label patch;
   const Register argument = rax;
   const Register map = rdx;
@@ -328,6 +497,25 @@
 }
 
 
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  __ PushCallerSaved(save_doubles_);
+  const int argument_count = 1;
+  __ PrepareCallCFunction(argument_count);
+#ifdef _WIN64
+  __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+  __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  __ PopCallerSaved(save_doubles_);
+  __ ret(0);
+}
+
+
 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
                                  Type type,
                                  Heap::RootListIndex value,
@@ -622,12 +810,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(rax);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ movq(rcx, rax);
-    __ pop(rax);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(rax);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ movq(rcx, rax);
+      __ pop(rax);
+    }
     __ bind(&heapnumber_allocated);
     // rcx: allocated 'empty' number
 
@@ -751,6 +940,10 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
+
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -1414,6 +1607,8 @@
   __ cmpq(rbx, Operand(rcx, 0));
   __ j(not_equal, &cache_miss, Label::kNear);
   // Cache hit!
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
   __ movq(rax, Operand(rcx, 2 * kIntSize));
   if (tagged) {
     __ fstp(0);  // Clear FPU stack.
@@ -1424,6 +1619,7 @@
   }
 
   __ bind(&cache_miss);
+  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
   // Update cache with new value.
   if (tagged) {
   __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
@@ -1432,7 +1628,7 @@
     __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
     __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
   }
-  GenerateOperation(masm);
+  GenerateOperation(masm, type_);
   __ movq(Operand(rcx, 0), rbx);
   __ movq(Operand(rcx, 2 * kIntSize), rax);
   __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
@@ -1447,17 +1643,18 @@
     __ subq(rsp, Immediate(kDoubleSize));
     __ movsd(Operand(rsp, 0), xmm1);
     __ fld_d(Operand(rsp, 0));
-    GenerateOperation(masm);
+    GenerateOperation(masm, type_);
     __ fstp_d(Operand(rsp, 0));
     __ movsd(xmm1, Operand(rsp, 0));
     __ addq(rsp, Immediate(kDoubleSize));
     // We return the value in xmm1 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
-    // Allocate an unused object bigger than a HeapNumber.
-    __ Push(Smi::FromInt(2 * kDoubleSize));
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Allocate an unused object bigger than a HeapNumber.
+      __ Push(Smi::FromInt(2 * kDoubleSize));
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 
@@ -1473,10 +1670,11 @@
     __ bind(&runtime_call);
     __ AllocateHeapNumber(rax, rdi, &skip_cache);
     __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
-    __ EnterInternalFrame();
-    __ push(rax);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(rax);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -1488,6 +1686,7 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -1496,14 +1695,17 @@
 }
 
 
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
+void TranscendentalCacheStub::GenerateOperation(
+    MacroAssembler* masm, TranscendentalCache::Type type) {
   // Registers:
   // rax: Newly allocated HeapNumber, which must be preserved.
   // rbx: Bits of input double. Must be preserved.
   // rcx: Pointer to cache entry. Must be preserved.
   // st(0): Input double
   Label done;
-  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
+  if (type == TranscendentalCache::SIN ||
+      type == TranscendentalCache::COS ||
+      type == TranscendentalCache::TAN) {
     // Both fsin and fcos require arguments in the range +/-2^63 and
     // return NaN for infinities and NaN. They can share all code except
     // the actual fsin/fcos operation.
@@ -1524,8 +1726,12 @@
     __ j(not_equal, &non_nan_result, Label::kNear);
     // Input is +/-Infinity or NaN. Result is NaN.
     __ fstp(0);
-    __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
-    __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+    // NaN is represented by 0x7ff8000000000000.
+    __ subq(rsp, Immediate(kPointerSize));
+    __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
+    __ movl(Operand(rsp, 0), Immediate(0x00000000));
+    __ fld_d(Operand(rsp, 0));
+    __ addq(rsp, Immediate(kPointerSize));
     __ jmp(&done);
 
     __ bind(&non_nan_result);
@@ -1566,19 +1772,25 @@
     // FPU Stack: input % 2*pi
     __ movq(rax, rdi);  // Restore rax, pointer to the new HeapNumber.
     __ bind(&in_range);
-    switch (type_) {
+    switch (type) {
       case TranscendentalCache::SIN:
         __ fsin();
         break;
       case TranscendentalCache::COS:
         __ fcos();
         break;
+      case TranscendentalCache::TAN:
+        // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
+        // FP register stack.
+        __ fptan();
+        __ fstp(0);  // Pop FP register stack.
+        break;
       default:
         UNREACHABLE();
     }
     __ bind(&done);
   } else {
-    ASSERT(type_ == TranscendentalCache::LOG);
+    ASSERT(type == TranscendentalCache::LOG);
     __ fldln2();
     __ fxch();
     __ fyl2x();
@@ -1784,152 +1996,259 @@
 
 
 void MathPowStub::Generate(MacroAssembler* masm) {
-  // Registers are used as follows:
-  // rdx = base
-  // rax = exponent
-  // rcx = temporary, result
+  // Choose register conforming to calling convention (when bailing out).
+#ifdef _WIN64
+  const Register exponent = rdx;
+#else
+  const Register exponent = rdi;
+#endif
+  const Register base = rax;
+  const Register scratch = rcx;
+  const XMMRegister double_result = xmm3;
+  const XMMRegister double_base = xmm2;
+  const XMMRegister double_exponent = xmm1;
+  const XMMRegister double_scratch = xmm4;
 
-  Label allocate_return, call_runtime;
+  Label call_runtime, done, exponent_not_smi, int_exponent;
 
-  // Load input parameters.
-  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-  __ movq(rax, Operand(rsp, 1 * kPointerSize));
+  // Save 1 in double_result - we need this several times later on.
+  __ movq(scratch, Immediate(1));
+  __ cvtlsi2sd(double_result, scratch);
 
-  // Save 1 in xmm3 - we need this several times later on.
-  __ Set(rcx, 1);
-  __ cvtlsi2sd(xmm3, rcx);
+  if (exponent_type_ == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack.
+    __ movq(base, Operand(rsp, 2 * kPointerSize));
+    __ movq(exponent, Operand(rsp, 1 * kPointerSize));
+    __ JumpIfSmi(base, &base_is_smi, Label::kNear);
+    __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &call_runtime);
 
-  Label exponent_nonsmi;
-  Label base_nonsmi;
-  // If the exponent is a heap number go to that specific case.
-  __ JumpIfNotSmi(rax, &exponent_nonsmi);
-  __ JumpIfNotSmi(rdx, &base_nonsmi);
+    __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+    __ jmp(&unpack_exponent, Label::kNear);
 
-  // Optimized version when both exponent and base are smis.
-  Label powi;
-  __ SmiToInteger32(rdx, rdx);
-  __ cvtlsi2sd(xmm0, rdx);
-  __ jmp(&powi);
-  // Exponent is a smi and base is a heapnumber.
-  __ bind(&base_nonsmi);
-  __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &call_runtime);
+    __ bind(&base_is_smi);
+    __ SmiToInteger32(base, base);
+    __ cvtlsi2sd(double_base, base);
+    __ bind(&unpack_exponent);
 
-  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+    __ SmiToInteger32(exponent, exponent);
+    __ jmp(&int_exponent);
 
-  // Optimized version of pow if exponent is a smi.
-  // xmm0 contains the base.
-  __ bind(&powi);
-  __ SmiToInteger32(rax, rax);
+    __ bind(&exponent_not_smi);
+    __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &call_runtime);
+    __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type_ == TAGGED) {
+    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+    __ SmiToInteger32(exponent, exponent);
+    __ jmp(&int_exponent);
 
-  // Save exponent in base as we need to check if exponent is negative later.
-  // We know that base and exponent are in different registers.
-  __ movq(rdx, rax);
+    __ bind(&exponent_not_smi);
+    __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+  }
+
+  if (exponent_type_ != INTEGER) {
+    Label fast_power;
+    // Detect integer exponents stored as double.
+    __ cvttsd2si(exponent, double_exponent);
+    // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+    __ cmpl(exponent, Immediate(0x80000000u));
+    __ j(equal, &call_runtime);
+    __ cvtlsi2sd(double_scratch, exponent);
+    // Already ruled out NaNs for exponent.
+    __ ucomisd(double_exponent, double_scratch);
+    __ j(equal, &int_exponent);
+
+    if (exponent_type_ == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label continue_sqrt, continue_rsqrt, not_plus_half;
+      // Test for 0.5.
+      // Load double_scratch with 0.5.
+      __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
+      __ movq(double_scratch, scratch);
+      // Already ruled out NaNs for exponent.
+      __ ucomisd(double_scratch, double_exponent);
+      __ j(not_equal, &not_plus_half, Label::kNear);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      // According to IEEE-754, double-precision -Infinity has the highest
+      // 12 bits set and the lowest 52 bits cleared.
+      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+      __ movq(double_scratch, scratch);
+      __ ucomisd(double_scratch, double_base);
+      // Comparing -Infinity with NaN results in "unordered", which sets the
+      // zero flag as if both were equal.  However, it also sets the carry flag.
+      __ j(not_equal, &continue_sqrt, Label::kNear);
+      __ j(carry, &continue_sqrt, Label::kNear);
+
+      // Set result to Infinity in the special case.
+      __ xorps(double_result, double_result);
+      __ subsd(double_result, double_scratch);
+      __ jmp(&done);
+
+      __ bind(&continue_sqrt);
+      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+      __ xorps(double_scratch, double_scratch);
+      __ addsd(double_scratch, double_base);  // Convert -0 to 0.
+      __ sqrtsd(double_result, double_scratch);
+      __ jmp(&done);
+
+      // Test for -0.5.
+      __ bind(&not_plus_half);
+      // Load double_scratch with -0.5 by substracting 1.
+      __ subsd(double_scratch, double_result);
+      // Already ruled out NaNs for exponent.
+      __ ucomisd(double_scratch, double_exponent);
+      __ j(not_equal, &fast_power, Label::kNear);
+
+      // Calculates reciprocal of square root of base.  Check for the special
+      // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      // According to IEEE-754, double-precision -Infinity has the highest
+      // 12 bits set and the lowest 52 bits cleared.
+      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+      __ movq(double_scratch, scratch);
+      __ ucomisd(double_scratch, double_base);
+      // Comparing -Infinity with NaN results in "unordered", which sets the
+      // zero flag as if both were equal.  However, it also sets the carry flag.
+      __ j(not_equal, &continue_rsqrt, Label::kNear);
+      __ j(carry, &continue_rsqrt, Label::kNear);
+
+      // Set result to 0 in the special case.
+      __ xorps(double_result, double_result);
+      __ jmp(&done);
+
+      __ bind(&continue_rsqrt);
+      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+      __ xorps(double_exponent, double_exponent);
+      __ addsd(double_exponent, double_base);  // Convert -0 to +0.
+      __ sqrtsd(double_exponent, double_exponent);
+      __ divsd(double_result, double_exponent);
+      __ jmp(&done);
+    }
+
+    // Using FPU instructions to calculate power.
+    Label fast_power_failed;
+    __ bind(&fast_power);
+    __ fnclex();  // Clear flags to catch exceptions later.
+    // Transfer (B)ase and (E)xponent onto the FPU register stack.
+    __ subq(rsp, Immediate(kDoubleSize));
+    __ movsd(Operand(rsp, 0), double_exponent);
+    __ fld_d(Operand(rsp, 0));  // E
+    __ movsd(Operand(rsp, 0), double_base);
+    __ fld_d(Operand(rsp, 0));  // B, E
+
+    // Exponent is in st(1) and base is in st(0)
+    // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+    // FYL2X calculates st(1) * log2(st(0))
+    __ fyl2x();    // X
+    __ fld(0);     // X, X
+    __ frndint();  // rnd(X), X
+    __ fsub(1);    // rnd(X), X-rnd(X)
+    __ fxch(1);    // X - rnd(X), rnd(X)
+    // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+    __ f2xm1();    // 2^(X-rnd(X)) - 1, rnd(X)
+    __ fld1();     // 1, 2^(X-rnd(X)) - 1, rnd(X)
+    __ faddp(1);   // 1, 2^(X-rnd(X)), rnd(X)
+    // FSCALE calculates st(0) * 2^st(1)
+    __ fscale();   // 2^X, rnd(X)
+    __ fstp(1);
+    // Bail out to runtime in case of exceptions in the status word.
+    __ fnstsw_ax();
+    __ testb(rax, Immediate(0x5F));  // Check for all but precision exception.
+    __ j(not_zero, &fast_power_failed, Label::kNear);
+    __ fstp_d(Operand(rsp, 0));
+    __ movsd(double_result, Operand(rsp, 0));
+    __ addq(rsp, Immediate(kDoubleSize));
+    __ jmp(&done);
+
+    __ bind(&fast_power_failed);
+    __ fninit();
+    __ addq(rsp, Immediate(kDoubleSize));
+    __ jmp(&call_runtime);
+  }
+
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+  const XMMRegister double_scratch2 = double_exponent;
+  // Back up exponent as we need to check if exponent is negative later.
+  __ movq(scratch, exponent);  // Back up exponent.
+  __ movsd(double_scratch, double_base);  // Back up base.
+  __ movsd(double_scratch2, double_result);  // Load double_exponent with 1.
 
   // Get absolute value of exponent.
-  Label no_neg;
-  __ cmpl(rax, Immediate(0));
-  __ j(greater_equal, &no_neg, Label::kNear);
-  __ negl(rax);
+  Label no_neg, while_true, no_multiply;
+  __ testl(scratch, scratch);
+  __ j(positive, &no_neg, Label::kNear);
+  __ negl(scratch);
   __ bind(&no_neg);
 
-  // Load xmm1 with 1.
-  __ movaps(xmm1, xmm3);
-  Label while_true;
-  Label no_multiply;
-
   __ bind(&while_true);
-  __ shrl(rax, Immediate(1));
+  __ shrl(scratch, Immediate(1));
   __ j(not_carry, &no_multiply, Label::kNear);
-  __ mulsd(xmm1, xmm0);
+  __ mulsd(double_result, double_scratch);
   __ bind(&no_multiply);
-  __ mulsd(xmm0, xmm0);
+
+  __ mulsd(double_scratch, double_scratch);
   __ j(not_zero, &while_true);
 
-  // Base has the original value of the exponent - if the exponent  is
-  // negative return 1/result.
-  __ testl(rdx, rdx);
-  __ j(positive, &allocate_return);
-  // Special case if xmm1 has reached infinity.
-  __ divsd(xmm3, xmm1);
-  __ movaps(xmm1, xmm3);
-  __ xorps(xmm0, xmm0);
-  __ ucomisd(xmm0, xmm1);
-  __ j(equal, &call_runtime);
+  // If the exponent is negative, return 1/result.
+  __ testl(exponent, exponent);
+  __ j(greater, &done);
+  __ divsd(double_scratch2, double_result);
+  __ movsd(double_result, double_scratch2);
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ xorps(double_scratch2, double_scratch2);
+  __ ucomisd(double_scratch2, double_result);
+  // double_exponent aliased as double_scratch2 has already been overwritten
+  // and may not have contained the exponent value in the first place when the
+  // input was a smi.  We reset it with exponent value before bailing out.
+  __ j(not_equal, &done);
+  __ cvtlsi2sd(double_exponent, exponent);
 
-  __ jmp(&allocate_return);
+  // Returning or bailing out.
+  Counters* counters = masm->isolate()->counters();
+  if (exponent_type_ == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 
-  // Exponent (or both) is a heapnumber - no matter what we should now work
-  // on doubles.
-  __ bind(&exponent_nonsmi);
-  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &call_runtime);
-  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
-  // Test if exponent is nan.
-  __ ucomisd(xmm1, xmm1);
-  __ j(parity_even, &call_runtime);
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in eax.
+    __ bind(&done);
+    __ AllocateHeapNumber(rax, rcx, &call_runtime);
+    __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
+    __ IncrementCounter(counters->math_pow(), 1);
+    __ ret(2 * kPointerSize);
+  } else {
+    __ bind(&call_runtime);
+    // Move base to the correct argument register.  Exponent is already in xmm1.
+    __ movsd(xmm0, double_base);
+    ASSERT(double_exponent.is(xmm1));
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(2);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()), 2);
+    }
+    // Return value is in xmm0.
+    __ movsd(double_result, xmm0);
+    // Restore context register.
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 
-  Label base_not_smi, handle_special_cases;
-  __ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
-  __ SmiToInteger32(rdx, rdx);
-  __ cvtlsi2sd(xmm0, rdx);
-  __ jmp(&handle_special_cases, Label::kNear);
-
-  __ bind(&base_not_smi);
-  __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &call_runtime);
-  __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
-  __ andl(rcx, Immediate(HeapNumber::kExponentMask));
-  __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
-  // base is NaN or +/-Infinity
-  __ j(greater_equal, &call_runtime);
-  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
-
-  // base is in xmm0 and exponent is in xmm1.
-  __ bind(&handle_special_cases);
-  Label not_minus_half;
-  // Test for -0.5.
-  // Load xmm2 with -0.5.
-  __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
-  __ movq(xmm2, rcx);
-  // xmm2 now has -0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half, Label::kNear);
-
-  // Calculates reciprocal of square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorps(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-  __ divsd(xmm3, xmm1);
-  __ movaps(xmm1, xmm3);
-  __ jmp(&allocate_return);
-
-  // Test for 0.5.
-  __ bind(&not_minus_half);
-  // Load xmm2 with 0.5.
-  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
-  __ addsd(xmm2, xmm3);
-  // xmm2 now has 0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &call_runtime);
-  // Calculates square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorps(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);  // Convert -0 to 0.
-  __ sqrtsd(xmm1, xmm1);
-
-  __ bind(&allocate_return);
-  __ AllocateHeapNumber(rcx, rax, &call_runtime);
-  __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
-  __ movq(rax, rcx);
-  __ ret(2 * kPointerSize);
-
-  __ bind(&call_runtime);
-  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+    __ bind(&done);
+    __ IncrementCounter(counters->math_pow(), 1);
+    __ ret(0);
+  }
 }
 
 
@@ -2043,6 +2362,7 @@
   const int kParameterMapHeaderSize =
       FixedArray::kHeaderSize + 2 * kPointerSize;
   Label no_parameter_map;
+  __ xor_(r8, r8);
   __ testq(rbx, rbx);
   __ j(zero, &no_parameter_map, Label::kNear);
   __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
@@ -2085,7 +2405,7 @@
     __ movq(FieldOperand(rax, i), rdx);
   }
 
-  // Setup the callee in-object property.
+  // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ movq(rdx, Operand(rsp, 3 * kPointerSize));
   __ movq(FieldOperand(rax, JSObject::kHeaderSize +
@@ -2100,7 +2420,7 @@
                        Heap::kArgumentsLengthIndex * kPointerSize),
           rcx);
 
-  // Setup the elements pointer in the allocated arguments object.
+  // Set up the elements pointer in the allocated arguments object.
   // If we allocated a parameter map, edi will point there, otherwise to the
   // backing store.
   __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
@@ -2136,16 +2456,13 @@
   Label parameters_loop, parameters_test;
 
   // Load tagged parameter count into r9.
-  __ movq(r9, Operand(rsp, 1 * kPointerSize));
+  __ Integer32ToSmi(r9, rbx);
   __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
-  __ addq(r8, Operand(rsp, 3 * kPointerSize));
+  __ addq(r8, Operand(rsp, 1 * kPointerSize));
   __ subq(r8, r9);
   __ Move(r11, factory->the_hole_value());
   __ movq(rdx, rdi);
-  __ SmiToInteger64(kScratchRegister, r9);
-  __ lea(rdi, Operand(rdi, kScratchRegister,
-                      times_pointer_size,
-                      kParameterMapHeaderSize));
+  __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
   // r9 = loop variable (tagged)
   // r8 = mapping index (tagged)
   // r11 = the hole value
@@ -2181,9 +2498,8 @@
   Label arguments_loop, arguments_test;
   __ movq(r8, rbx);
   __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-  // Untag rcx and r8 for the loop below.
+  // Untag rcx for the loop below.
   __ SmiToInteger64(rcx, rcx);
-  __ SmiToInteger64(r8, r8);
   __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
   __ subq(rdx, kScratchRegister);
   __ jmp(&arguments_test, Label::kNear);
@@ -2307,7 +2623,7 @@
   // Get the parameters pointer from the stack.
   __ movq(rdx, Operand(rsp, 2 * kPointerSize));
 
-  // Setup the elements pointer in the allocated arguments object and
+  // Set up the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
   __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
   __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
@@ -2346,10 +2662,6 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
-  if (!FLAG_regexp_entry_native) {
-    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-    return;
-  }
 
   // Stack frame on entry.
   //  rsp[0]: return address
@@ -2455,26 +2767,40 @@
   __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
   // First check for flat two byte string.
-  __ andb(rbx, Immediate(
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
+  __ andb(rbx, Immediate(kIsNotStringMask |
+                         kStringRepresentationMask |
+                         kStringEncodingMask |
+                         kShortExternalStringMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be a flat ascii string.
-  __ andb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+  // Any other flat string must be a flat ASCII string.  None of the following
+  // string type tests will succeed if subject is not a string or a short
+  // external string.
+  __ andb(rbx, Immediate(kIsNotStringMask |
+                         kStringRepresentationMask |
+                         kShortExternalStringMask));
   __ j(zero, &seq_ascii_string, Label::kNear);
 
+  // rbx: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, check_encoding;
+  Label cons_string, external_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmpq(rbx, Immediate(kExternalStringTag));
   __ j(less, &cons_string, Label::kNear);
-  __ j(equal, &runtime);
+  __ j(equal, &external_string);
+
+  // Catch non-string subject or short external string.
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+  __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
+  __ j(not_zero, &runtime);
 
   // String is sliced.
   __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
@@ -2498,16 +2824,16 @@
            Immediate(kStringRepresentationMask | kStringEncodingMask));
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be ascii.
+  // Any other flat string must be sequential ASCII or external.
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
            Immediate(kStringRepresentationMask));
-  __ j(not_zero, &runtime);
+  __ j(not_zero, &external_string);
 
   __ bind(&seq_ascii_string);
-  // rdi: subject string (sequential ascii)
+  // rdi: subject string (sequential ASCII)
   // rax: RegExp data (FixedArray)
   __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
-  __ Set(rcx, 1);  // Type is ascii.
+  __ Set(rcx, 1);  // Type is ASCII.
   __ jmp(&check_code, Label::kNear);
 
   __ bind(&seq_two_byte_string);
@@ -2523,7 +2849,7 @@
   __ JumpIfSmi(r11, &runtime);
 
   // rdi: subject string
-  // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
+  // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
   // r11: code
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
@@ -2531,7 +2857,7 @@
 
   // rdi: subject string
   // rbx: previous index
-  // rcx: encoding of subject string (1 if ascii 0 if two_byte);
+  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
   // r11: code
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = masm->isolate()->counters();
@@ -2588,7 +2914,7 @@
   // Keep track on aliasing between argX defined above and the registers used.
   // rdi: subject string
   // rbx: previous index
-  // rcx: encoding of subject string (1 if ascii 0 if two_byte);
+  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
   // r11: code
   // r14: slice offset
   // r15: original subject string
@@ -2670,12 +2996,18 @@
   // Store last subject and last input.
   __ movq(rax, Operand(rsp, kSubjectOffset));
   __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
-  __ movq(rcx, rbx);
-  __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+  __ RecordWriteField(rbx,
+                      RegExpImpl::kLastSubjectOffset,
+                      rax,
+                      rdi,
+                      kDontSaveFPRegs);
   __ movq(rax, Operand(rsp, kSubjectOffset));
   __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
-  __ movq(rcx, rbx);
-  __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+  __ RecordWriteField(rbx,
+                      RegExpImpl::kLastInputOffset,
+                      rax,
+                      rdi,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   __ LoadAddress(rcx,
@@ -2727,7 +3059,28 @@
   __ Throw(rax);
 
   __ bind(&termination_exception);
-  __ ThrowUncatchable(TERMINATION, rax);
+  __ ThrowUncatchable(rax);
+
+  // External string.  Short external strings have already been ruled out.
+  // rdi: subject string (expected to be external)
+  // rbx: scratch
+  __ bind(&external_string);
+  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ testb(rbx, Immediate(kIsIndirectStringMask));
+    __ Assert(zero, "external string expected, but not found");
+  }
+  __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ testb(rbx, Immediate(kStringEncodingMask));
+  __ j(not_zero, &seq_ascii_string);
+  __ jmp(&seq_two_byte_string);
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
@@ -3132,7 +3485,7 @@
   __ JumpIfNotBothSequentialAsciiStrings(
       rdx, rax, rcx, rbx, &check_unequal_objects);
 
-  // Inline comparison of ascii strings.
+  // Inline comparison of ASCII strings.
   if (cc_ == equal) {
     StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      rdx,
@@ -3231,7 +3584,52 @@
 }
 
 
+void InterruptStub::Generate(MacroAssembler* masm) {
+  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+  // Cache the called function in a global property cell.  Cache states
+  // are uninitialized, monomorphic (indicated by a JSFunction), and
+  // megamorphic.
+  // rbx : cache cell for call target
+  // rdi : the function to call
+  Isolate* isolate = masm->isolate();
+  Label initialize, done;
+
+  // Load the cache state into rcx.
+  __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
+
+  // A monomorphic cache hit or an already megamorphic state: invoke the
+  // function without changing the state.
+  __ cmpq(rcx, rdi);
+  __ j(equal, &done, Label::kNear);
+  __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
+  __ j(equal, &done, Label::kNear);
+
+  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+  // megamorphic.
+  __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
+  __ j(equal, &initialize, Label::kNear);
+  // MegamorphicSentinel is an immortal immovable object (undefined) so no
+  // write-barrier is needed.
+  __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+          TypeFeedbackCells::MegamorphicSentinel(isolate));
+  __ jmp(&done, Label::kNear);
+
+  // An uninitialized cache is patched with the function.
+  __ bind(&initialize);
+  __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
+  // No need for a write barrier here - cells are rescanned.
+
+  __ bind(&done);
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
+  // rdi : the function to call
+  // rbx : cache cell for call target
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
@@ -3252,10 +3650,6 @@
     __ bind(&call);
   }
 
-  // Get the function to call from the stack.
-  // +2 ~ receiver, return address
-  __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-
   // Check that the function really is a JavaScript function.
   __ JumpIfSmi(rdi, &non_function);
   // Goto slow case if we do not have a function.
@@ -3292,7 +3686,7 @@
   __ push(rcx);
   __ Set(rax, argc_ + 1);
   __ Set(rbx, 0);
-  __ SetCallKind(rcx, CALL_AS_FUNCTION);
+  __ SetCallKind(rcx, CALL_AS_METHOD);
   __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
   {
     Handle<Code> adaptor =
@@ -3314,14 +3708,80 @@
 }
 
 
+void CallConstructStub::Generate(MacroAssembler* masm) {
+  // rax : number of arguments
+  // rbx : cache cell for call target
+  // rdi : constructor function
+  Label slow, non_function_call;
+
+  // Check that function is not a smi.
+  __ JumpIfSmi(rdi, &non_function_call);
+  // Check that function is a JSFunction.
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &slow);
+
+  if (RecordCallTarget()) {
+    GenerateRecordCallTarget(masm);
+  }
+
+  // Jump to the function-specific construct stub.
+  __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
+  __ jmp(rbx);
+
+  // rdi: called object
+  // rax: number of arguments
+  // rcx: object map
+  Label do_call;
+  __ bind(&slow);
+  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+  __ j(not_equal, &non_function_call);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
+  __ bind(&non_function_call);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
+  // Set expected number of arguments to zero (not changing rax).
+  __ Set(rbx, 0);
+  __ SetCallKind(rcx, CALL_AS_METHOD);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
 bool CEntryStub::NeedsImmovableCode() {
   return false;
 }
 
 
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
-  // Throw exception in eax.
-  __ Throw(rax);
+bool CEntryStub::IsPregenerated() {
+#ifdef _WIN64
+  return result_size_ == 1;
+#else
+  return true;
+#endif
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  // It is important that the store buffer overflow stubs are generated first.
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  stub.GetCode()->set_is_pregenerated(true);
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  save_doubles.GetCode()->set_is_pregenerated(true);
 }
 
 
@@ -3465,12 +3925,6 @@
 }
 
 
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
-                                          UncatchableExceptionType type) {
-  __ ThrowUncatchable(type, rax);
-}
-
-
 void CEntryStub::Generate(MacroAssembler* masm) {
   // rax: number of arguments including receiver
   // rbx: pointer to C function  (C callee-saved)
@@ -3534,22 +3988,34 @@
                true);
 
   __ bind(&throw_out_of_memory_exception);
-  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+  // Set external caught exception to false.
+  Isolate* isolate = masm->isolate();
+  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+                                    isolate);
+  __ Set(rax, static_cast<int64_t>(false));
+  __ Store(external_caught, rax);
+
+  // Set pending exception and rax to out of memory exception.
+  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+                                      isolate);
+  __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+  __ Store(pending_exception, rax);
+  // Fall through to the next label.
 
   __ bind(&throw_termination_exception);
-  GenerateThrowUncatchable(masm, TERMINATION);
+  __ ThrowUncatchable(rax);
 
   __ bind(&throw_normal_exception);
-  GenerateThrowTOS(masm);
+  __ Throw(rax);
 }
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  Label invoke, exit;
+  Label invoke, handler_entry, exit;
   Label not_outermost_js, not_outermost_js_2;
   {  // NOLINT. Scope block confuses linter.
     MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
-    // Setup frame.
+    // Set up frame.
     __ push(rbp);
     __ movq(rbp, rsp);
 
@@ -3605,20 +4071,23 @@
   __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
   __ bind(&cont);
 
-  // Call a faked try-block that does the invoke.
-  __ call(&invoke);
-
-  // Caught exception: Store result (exception) in the pending
-  // exception field in the JSEnv and return a failure sentinel.
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ jmp(&invoke);
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       isolate);
   __ Store(pending_exception, rax);
   __ movq(rax, Failure::Exception(), RelocInfo::NONE);
   __ jmp(&exit);
 
-  // Invoke: Link this frame into the handler chain.
+  // Invoke: Link this frame into the handler chain.  There's only one
+  // handler block in this code object, so its index is 0.
   __ bind(&invoke);
-  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
 
   // Clear any pending exceptions.
   __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
@@ -3627,11 +4096,11 @@
   // Fake a receiver (NULL).
   __ push(Immediate(0));  // receiver
 
-  // Invoke the function by calling through JS entry trampoline
-  // builtin and pop the faked function when we return. We load the address
-  // from an external reference instead of inlining the call target address
-  // directly in the code, because the builtin stubs may not have been
-  // generated yet at the time this code is generated.
+  // Invoke the function by calling through JS entry trampoline builtin and
+  // pop the faked function when we return. We load the address from an
+  // external reference instead of inlining the call target address directly
+  // in the code, because the builtin stubs may not have been generated yet
+  // at the time this code is generated.
   if (is_construct) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
                                       isolate);
@@ -3740,7 +4209,7 @@
     __ bind(&miss);
   }
 
-  __ TryGetFunctionPrototype(rdx, rbx, &slow);
+  __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(rbx, &slow);
@@ -3757,14 +4226,17 @@
     __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
     __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
   } else {
+    // Get return address and delta to inlined map check.
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
-    __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
     if (FLAG_debug_code) {
       __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
       __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
       __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
     }
+    __ movq(kScratchRegister,
+            Operand(kScratchRegister, kOffsetToMapCheckValue));
+    __ movq(Operand(kScratchRegister, 0), rax);
   }
 
   __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
@@ -3791,9 +4263,11 @@
     __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
     // Store offset of true in the root array at the inline check site.
-    ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
-        == 0xB0 - 0x100);
-    __ movl(rax, Immediate(0xB0));  // TrueValue is at -10 * kPointerSize.
+    int true_offset = 0x100 +
+        (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
+    // Assert it is a 1-byte signed value.
+    ASSERT(true_offset >= 0 && true_offset < 0x100);
+    __ movl(rax, Immediate(true_offset));
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
     __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -3812,9 +4286,11 @@
     __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
     // Store offset of false in the root array at the inline check site.
-    ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
-        == 0xB8 - 0x100);
-    __ movl(rax, Immediate(0xB8));  // FalseValue is at -9 * kPointerSize.
+    int false_offset = 0x100 +
+        (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
+    // Assert it is a 1-byte signed value.
+    ASSERT(false_offset >= 0 && false_offset < 0x100);
+    __ movl(rax, Immediate(false_offset));
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
     __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -3904,85 +4380,25 @@
 
   // If the index is non-smi trigger the non-smi case.
   __ JumpIfNotSmi(index_, &index_not_smi_);
-
-  // Put smi-tagged index into scratch register.
-  __ movq(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
-  __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
+  __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
   __ j(above_equal, index_out_of_range_);
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(result_, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string);
+  __ SmiToInteger32(index_, index_);
 
-  // Handle non-flat strings.
-  __ and_(result_, Immediate(kStringRepresentationMask));
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ cmpb(result_, Immediate(kExternalStringTag));
-  __ j(greater, &sliced_string);
-  __ j(equal, &call_runtime_);
+  StringCharLoadGenerator::Generate(
+      masm, object_, index_, result_, &call_runtime_);
 
-  // ConsString.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  Label assure_seq_string;
-  __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
-                 Heap::kEmptyStringRootIndex);
-  __ j(not_equal, &call_runtime_);
-  // Get the first of the two strings and load its instance type.
-  __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
-  __ jmp(&assure_seq_string, Label::kNear);
-
-  // SlicedString, unpack and add offset.
-  __ bind(&sliced_string);
-  __ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
-  __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset));
-
-  __ bind(&assure_seq_string);
-  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
-  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(result_, Immediate(kStringRepresentationMask));
-  __ j(not_zero, &call_runtime_);
-  __ jmp(&flat_string);
-
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ testb(result_, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string);
-
-  // 2-byte string.
-  // Load the 2-byte character code into the result register.
-  __ SmiToInteger32(scratch_, scratch_);
-  __ movzxwl(result_, FieldOperand(object_,
-                                   scratch_, times_2,
-                                   SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ SmiToInteger32(scratch_, scratch_);
-  __ movzxbl(result_, FieldOperand(object_,
-                                   scratch_, times_1,
-                                   SeqAsciiString::kHeaderSize));
-  __ bind(&got_char_code);
   __ Integer32ToSmi(result_, result_);
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   Factory* factory = masm->isolate()->factory();
@@ -3995,7 +4411,6 @@
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ push(object_);
-  __ push(index_);
   __ push(index_);  // Consumed by runtime conversion function.
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4004,19 +4419,18 @@
     // NumberToSmi discards numbers that are not exact integers.
     __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
-  if (!scratch_.is(rax)) {
+  if (!index_.is(rax)) {
     // Save the conversion result before the pop instructions below
     // have a chance to overwrite it.
-    __ movq(scratch_, rax);
+    __ movq(index_, rax);
   }
-  __ pop(index_);
   __ pop(object_);
   // Reload the instance type.
   __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
   __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  __ JumpIfNotSmi(index_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -4026,6 +4440,7 @@
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
   __ push(object_);
+  __ Integer32ToSmi(index_, index_);
   __ push(index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   if (!result_.is(rax)) {
@@ -4058,7 +4473,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -4085,14 +4501,15 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  Label string_add_runtime, call_builtin;
+  Label call_runtime, call_builtin;
   Builtins::JavaScript builtin_id = Builtins::ADD;
 
   // Load the two arguments.
@@ -4101,14 +4518,14 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (flags_ == NO_STRING_ADD_FLAGS) {
-    __ JumpIfSmi(rax, &string_add_runtime);
+    __ JumpIfSmi(rax, &call_runtime);
     __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
-    __ j(above_equal, &string_add_runtime);
+    __ j(above_equal, &call_runtime);
 
     // First argument is a a string, test second.
-    __ JumpIfSmi(rdx, &string_add_runtime);
+    __ JumpIfSmi(rdx, &call_runtime);
     __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
-    __ j(above_equal, &string_add_runtime);
+    __ j(above_equal, &call_runtime);
   } else {
     // Here at least one of the arguments is definitely a string.
     // We convert the one that is not known to be a string.
@@ -4174,9 +4591,9 @@
   __ SmiCompare(rbx, Smi::FromInt(2));
   __ j(not_equal, &longer_than_two);
 
-  // Check that both strings are non-external ascii strings.
+  // Check that both strings are non-external ASCII strings.
   __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
-                                                  &string_add_runtime);
+                                                  &call_runtime);
 
   // Get the two characters forming the sub string.
   __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
@@ -4191,20 +4608,30 @@
   __ ret(2 * kPointerSize);
 
   __ bind(&make_two_character_string);
-  __ Set(rbx, 2);
-  __ jmp(&make_flat_ascii_string);
+  __ Set(rdi, 2);
+  __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
+  // rbx - first byte: first character
+  // rbx - second byte: *maybe* second character
+  // Make sure that the second byte of rbx contains the second character.
+  __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+  __ shll(rcx, Immediate(kBitsPerByte));
+  __ orl(rbx, rcx);
+  // Write both characters to the new string.
+  __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
+  __ IncrementCounter(counters->string_add_native(), 1);
+  __ ret(2 * kPointerSize);
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
+  __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
   __ j(below, &string_add_flat_result);
   // Handle exceptionally long strings in the runtime system.
   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
-  __ j(above, &string_add_runtime);
+  __ j(above, &call_runtime);
 
   // If result is not supposed to be flat, allocate a cons string object. If
-  // both strings are ascii the result is an ascii cons string.
+  // both strings are ASCII the result is an ASCII cons string.
   // rax: first string
   // rbx: length of resulting flat string
   // rdx: second string
@@ -4218,8 +4645,8 @@
   __ testl(rcx, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii);
   __ bind(&ascii_data);
-  // Allocate an acsii cons string.
-  __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
+  // Allocate an ASCII cons string.
+  __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
   __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
@@ -4232,7 +4659,7 @@
   __ ret(2 * kPointerSize);
   __ bind(&non_ascii);
   // At least one of the strings is two-byte. Check whether it happens
-  // to contain only ascii characters.
+  // to contain only ASCII characters.
   // rcx: first instance type AND second instance type.
   // r8: first instance type.
   // r9: second instance type.
@@ -4244,111 +4671,103 @@
   __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
   __ j(equal, &ascii_data);
   // Allocate a two byte cons string.
-  __ AllocateTwoByteConsString(rcx, rdi, no_reg, &string_add_runtime);
+  __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
   __ jmp(&allocated);
 
-  // Handle creating a flat result. First check that both strings are not
-  // external strings.
+  // We cannot encounter sliced strings or cons strings here since:
+  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
+  // Handle creating a flat result from either external or sequential strings.
+  // Locate the first characters' locations.
   // rax: first string
   // rbx: length of resulting flat string as smi
   // rdx: second string
   // r8: instance type of first string
   // r9: instance type of first string
+  Label first_prepared, second_prepared;
+  Label first_is_sequential, second_is_sequential;
   __ bind(&string_add_flat_result);
-  __ SmiToInteger32(rbx, rbx);
-  __ movl(rcx, r8);
-  __ and_(rcx, Immediate(kStringRepresentationMask));
-  __ cmpl(rcx, Immediate(kExternalStringTag));
-  __ j(equal, &string_add_runtime);
-  __ movl(rcx, r9);
-  __ and_(rcx, Immediate(kStringRepresentationMask));
-  __ cmpl(rcx, Immediate(kExternalStringTag));
-  __ j(equal, &string_add_runtime);
-  // We cannot encounter sliced strings here since:
-  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
-  // Now check if both strings are ascii strings.
-  // rax: first string
-  // rbx: length of resulting flat string
-  // rdx: second string
-  // r8: instance type of first string
-  // r9: instance type of second string
+
+  __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
+  // r14: length of first string
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ testb(r8, Immediate(kStringRepresentationMask));
+  __ j(zero, &first_is_sequential, Label::kNear);
+  // Rule out short external string and load string resource.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ testb(r8, Immediate(kShortExternalStringMask));
+  __ j(not_zero, &call_runtime);
+  __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
+  __ jmp(&first_prepared, Label::kNear);
+  __ bind(&first_is_sequential);
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+  __ bind(&first_prepared);
+
+  // Check whether both strings have same encoding.
+  __ xorl(r8, r9);
+  __ testb(r8, Immediate(kStringEncodingMask));
+  __ j(not_zero, &call_runtime);
+
+  __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
+  // r15: length of second string
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ testb(r9, Immediate(kStringRepresentationMask));
+  __ j(zero, &second_is_sequential, Label::kNear);
+  // Rule out short external string and load string resource.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ testb(r9, Immediate(kShortExternalStringMask));
+  __ j(not_zero, &call_runtime);
+  __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
+  __ jmp(&second_prepared, Label::kNear);
+  __ bind(&second_is_sequential);
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+  __ bind(&second_prepared);
+
   Label non_ascii_string_add_flat_result;
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ testl(r8, Immediate(kStringEncodingMask));
+  // r9: instance type of second string
+  // First string and second string have the same encoding.
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ SmiToInteger32(rbx, rbx);
+  __ testb(r9, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii_string_add_flat_result);
-  __ testl(r9, Immediate(kStringEncodingMask));
-  __ j(zero, &string_add_runtime);
 
   __ bind(&make_flat_ascii_string);
-  // Both strings are ascii strings. As they are short they are both flat.
-  __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
-  // rcx: result string
-  __ movq(rbx, rcx);
+  // Both strings are ASCII strings. As they are short they are both flat.
+  __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
+  // rax: result string
   // Locate first character of result.
-  __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // Locate first character of first argument
-  __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
-  __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // rax: first char of first argument
-  // rbx: result string
-  // rcx: first character of result
-  // rdx: second string
-  // rdi: length of first argument
-  StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
-  // Locate first character of second argument.
-  __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
-  __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // rbx: result string
-  // rcx: next character of result
-  // rdx: first char of second argument
-  // rdi: length of second argument
-  StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
-  __ movq(rax, rbx);
+  __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+  // rcx: first char of first string
+  // rbx: first character of result
+  // r14: length of first string
+  StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
+  // rbx: next character of result
+  // rdx: first char of second string
+  // r15: length of second string
+  StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
   __ IncrementCounter(counters->string_add_native(), 1);
   __ ret(2 * kPointerSize);
 
-  // Handle creating a flat two byte result.
-  // rax: first string - known to be two byte
-  // rbx: length of resulting flat string
-  // rdx: second string
-  // r8: instance type of first string
-  // r9: instance type of first string
   __ bind(&non_ascii_string_add_flat_result);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ and_(r9, Immediate(kStringEncodingMask));
-  __ j(not_zero, &string_add_runtime);
-  // Both strings are two byte strings. As they are short they are both
-  // flat.
-  __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
-  // rcx: result string
-  __ movq(rbx, rcx);
+  // Both strings are ASCII strings. As they are short they are both flat.
+  __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
+  // rax: result string
   // Locate first character of result.
-  __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // Locate first character of first argument.
-  __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
-  __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // rax: first char of first argument
-  // rbx: result string
-  // rcx: first character of result
-  // rdx: second argument
-  // rdi: length of first argument
-  StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
-  // Locate first character of second argument.
-  __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
-  __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // rbx: result string
-  // rcx: next character of result
-  // rdx: first char of second argument
-  // rdi: length of second argument
-  StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
-  __ movq(rax, rbx);
+  __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+  // rcx: first char of first string
+  // rbx: first character of result
+  // r14: length of first string
+  StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
+  // rbx: next character of result
+  // rdx: first char of second string
+  // r15: length of second string
+  StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
   __ IncrementCounter(counters->string_add_native(), 1);
   __ ret(2 * kPointerSize);
 
   // Just jump to runtime to add the two strings.
-  __ bind(&string_add_runtime);
+  __ bind(&call_runtime);
   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 
   if (call_builtin.is_linked()) {
@@ -4566,7 +4985,12 @@
 
     __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
     __ j(equal, not_found);
-    // Must be null (deleted entry).
+    // Must be the hole (deleted entry).
+    if (FLAG_debug_code) {
+      __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+      __ cmpq(kScratchRegister, candidate);
+      __ Assert(equal, "oddball in symbol table is not undefined or the hole");
+    }
     __ jmp(&next_probe[i]);
 
     __ bind(&is_string);
@@ -4580,7 +5004,7 @@
     // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
     Register temp = kScratchRegister;
 
-    // Check that the candidate is a non-external ascii string.
+    // Check that the candidate is a non-external ASCII string.
     __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
     __ JumpIfInstanceTypeIsNotSequentialAscii(
         temp, temp, &next_probe[i]);
@@ -4695,8 +5119,12 @@
 
   __ SmiSub(rcx, rcx, rdx);  // Overflow doesn't happen.
   __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
-  Label return_rax;
-  __ j(equal, &return_rax);
+  Label not_original_string;
+  __ j(not_equal, &not_original_string, Label::kNear);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->sub_string_native(), 1);
+  __ ret(kArgumentsSize);
+  __ bind(&not_original_string);
   // Special handling of sub-strings of length 1 and 2. One character strings
   // are handled in the runtime system (looked up in the single character
   // cache). Two character strings are looked for in the symbol cache.
@@ -4715,71 +5143,77 @@
   // Get the two characters forming the sub string.
   __ SmiToInteger32(rdx, rdx);  // From index is no longer smi.
   __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
-  __ movzxbq(rcx,
+  __ movzxbq(rdi,
              FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
 
   // Try to lookup two character string in symbol table.
   Label make_two_character_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
+      masm, rbx, rdi, r9, r11, r14, r15, &make_two_character_string);
+  __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(3 * kPointerSize);
 
   __ bind(&make_two_character_string);
-  // Setup registers for allocating the two character string.
-  __ movq(rax, Operand(rsp, kStringOffset));
-  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+  // Set up registers for allocating the two character string.
+  __ movzxwq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
+  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
+  __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
+  __ IncrementCounter(counters->sub_string_native(), 1);
+  __ ret(3 * kPointerSize);
+
+  __ bind(&result_longer_than_two);
+  // rax: string
+  // rbx: instance type
+  // rcx: sub string length
+  // rdx: from index (smi)
+  // Deal with different string types: update the index if necessary
+  // and put the underlying string into edi.
+  Label underlying_unpacked, sliced_string, seq_or_external_string;
+  // If the string is not indirect, it can only be sequential or external.
+  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+  STATIC_ASSERT(kIsIndirectStringMask != 0);
+  __ testb(rbx, Immediate(kIsIndirectStringMask));
+  __ j(zero, &seq_or_external_string, Label::kNear);
+
+  __ testb(rbx, Immediate(kSlicedNotConsMask));
+  __ j(not_zero, &sliced_string, Label::kNear);
+  // Cons string.  Check whether it is flat, then fetch first part.
+  // Flat cons strings have an empty second part.
+  __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
+                 Heap::kEmptyStringRootIndex);
+  __ j(not_equal, &runtime);
+  __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
+  // Update instance type.
+  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-  __ Set(rcx, 2);
+  __ jmp(&underlying_unpacked, Label::kNear);
+
+  __ bind(&sliced_string);
+  // Sliced string.  Fetch parent and correct start index by offset.
+  __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
+  __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
+  // Update instance type.
+  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+  __ jmp(&underlying_unpacked, Label::kNear);
+
+  __ bind(&seq_or_external_string);
+  // Sequential or external string.  Just move string to the correct register.
+  __ movq(rdi, rax);
+
+  __ bind(&underlying_unpacked);
 
   if (FLAG_string_slices) {
     Label copy_routine;
+    // rdi: underlying subject string
+    // rbx: instance type of underlying subject string
+    // rdx: adjusted start index (smi)
+    // rcx: length
     // If coming from the make_two_character_string path, the string
     // is too short to be sliced anyways.
-    STATIC_ASSERT(2 < SlicedString::kMinLength);
-    __ jmp(&copy_routine);
-    __ bind(&result_longer_than_two);
-
-    // rax: string
-    // rbx: instance type
-    // rcx: sub string length
-    // rdx: from index (smi)
-    Label allocate_slice, sliced_string, seq_string;
     __ cmpq(rcx, Immediate(SlicedString::kMinLength));
     // Short slice.  Copy instead of slicing.
     __ j(less, &copy_routine);
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ testb(rbx, Immediate(kStringRepresentationMask));
-    __ j(zero, &seq_string, Label::kNear);
-    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-    STATIC_ASSERT(kIsIndirectStringMask != 0);
-    __ testb(rbx, Immediate(kIsIndirectStringMask));
-    // External string.  Jump to runtime.
-    __ j(zero, &runtime);
-
-    __ testb(rbx, Immediate(kSlicedNotConsMask));
-    __ j(not_zero, &sliced_string, Label::kNear);
-    // Cons string.  Check whether it is flat, then fetch first part.
-    __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
-                   Heap::kEmptyStringRootIndex);
-    __ j(not_equal, &runtime);
-    __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
-    __ jmp(&allocate_slice, Label::kNear);
-
-    __ bind(&sliced_string);
-    // Sliced string.  Fetch parent and correct start index by offset.
-    __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
-    __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
-    __ jmp(&allocate_slice, Label::kNear);
-
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
-    __ movq(rdi, rax);
-
-    __ bind(&allocate_slice);
-    // edi: underlying subject string
-    // ebx: instance type of original subject string
-    // edx: offset
-    // ecx: length
     // Allocate new sliced string.  At this point we do not reload the instance
     // type including the string encoding because we simply rely on the info
     // provided by the original string.  It does not matter if the original
@@ -4790,93 +5224,96 @@
     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
     __ testb(rbx, Immediate(kStringEncodingMask));
     __ j(zero, &two_byte_slice, Label::kNear);
-    __ AllocateAsciiSlicedString(rax, rbx, no_reg, &runtime);
+    __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
     __ jmp(&set_slice_header, Label::kNear);
     __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(rax, rbx, no_reg, &runtime);
+    __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
     __ bind(&set_slice_header);
-    __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
     __ Integer32ToSmi(rcx, rcx);
     __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
-    __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
     __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
            Immediate(String::kEmptyHashField));
-    __ jmp(&return_rax);
+    __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
+    __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
+    __ IncrementCounter(counters->sub_string_native(), 1);
+    __ ret(kArgumentsSize);
 
     __ bind(&copy_routine);
-  } else {
-    __ bind(&result_longer_than_two);
   }
 
-  // rax: string
-  // rbx: instance type
-  // rcx: result string length
-  // Check for flat ascii string
-  Label non_ascii_flat;
-  __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
+  // rdi: underlying subject string
+  // rbx: instance type of underlying subject string
+  // rdx: adjusted start index (smi)
+  // rcx: length
+  // The subject string can only be external or sequential string of either
+  // encoding at this point.
+  Label two_byte_sequential, sequential_string;
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ testb(rbx, Immediate(kExternalStringTag));
+  __ j(zero, &sequential_string);
+
+  // Handle external string.
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ testb(rbx, Immediate(kShortExternalStringMask));
+  __ j(not_zero, &runtime);
+  __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  __ bind(&sequential_string);
+  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+  __ testb(rbx, Immediate(kStringEncodingMask));
+  __ j(zero, &two_byte_sequential);
 
   // Allocate the result.
-  __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
+  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
 
   // rax: result string
   // rcx: result string length
-  __ movq(rdx, rsi);  // esi used by following code.
-  // Locate first character of result.
-  __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
-  // Load string argument and locate character of sub string start.
-  __ movq(rsi, Operand(rsp, kStringOffset));
-  __ movq(rbx, Operand(rsp, kFromOffset));
-  {
-    SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
-    __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+  __ movq(r14, rsi);  // esi used by following code.
+  {  // Locate character of sub string start.
+    SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
+    __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
                         SeqAsciiString::kHeaderSize - kHeapObjectTag));
   }
+  // Locate first character of result.
+  __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
 
   // rax: result string
   // rcx: result length
-  // rdx: original value of rsi
   // rdi: first character of result
   // rsi: character of sub string start
+  // r14: original value of rsi
   StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
-  __ movq(rsi, rdx);  // Restore rsi.
-  Counters* counters = masm->isolate()->counters();
+  __ movq(rsi, r14);  // Restore rsi.
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(kArgumentsSize);
 
-  __ bind(&non_ascii_flat);
-  // rax: string
-  // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
-  // rcx: result string length
-  // Check for sequential two byte string
-  __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
-  __ j(not_equal, &runtime);
-
+  __ bind(&two_byte_sequential);
   // Allocate the result.
-  __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
+  __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
 
   // rax: result string
   // rcx: result string length
-  __ movq(rdx, rsi);  // esi used by following code.
-  // Locate first character of result.
-  __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
-  // Load string argument and locate character of sub string start.
-  __ movq(rsi, Operand(rsp, kStringOffset));
-  __ movq(rbx, Operand(rsp, kFromOffset));
-  {
-    SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
-    __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+  __ movq(r14, rsi);  // esi used by following code.
+  {  // Locate character of sub string start.
+    SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
+    __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
                         SeqAsciiString::kHeaderSize - kHeapObjectTag));
   }
+  // Locate first character of result.
+  __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
 
   // rax: result string
   // rcx: result length
-  // rdx: original value of rsi
   // rdi: first character of result
   // rsi: character of sub string start
+  // r14: original value of rsi
   StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
-  __ movq(rsi, rdx);  // Restore esi.
-
-  __ bind(&return_rax);
+  __ movq(rsi, r14);  // Restore esi.
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(kArgumentsSize);
 
@@ -5017,7 +5454,7 @@
   __ movb(scratch, Operand(left, index, times_1, 0));
   __ cmpb(scratch, Operand(right, index, times_1, 0));
   __ j(not_equal, chars_not_equal, near_jump);
-  __ addq(index, Immediate(1));
+  __ incq(index);
   __ j(not_zero, &loop);
 }
 
@@ -5047,7 +5484,7 @@
   // Check that both are sequential ASCII strings.
   __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
 
-  // Inline comparison of ascii strings.
+  // Inline comparison of ASCII strings.
   __ IncrementCounter(counters->string_compare_native(), 1);
   // Drop arguments from the stack
   __ pop(rcx);
@@ -5090,15 +5527,15 @@
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
   Label generic_stub;
-  Label unordered;
+  Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss;
   Condition either_smi = masm->CheckEitherSmi(rax, rdx);
   __ j(either_smi, &generic_stub, Label::kNear);
 
   __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
-  __ j(not_equal, &miss, Label::kNear);
+  __ j(not_equal, &maybe_undefined1, Label::kNear);
   __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
-  __ j(not_equal, &miss, Label::kNear);
+  __ j(not_equal, &maybe_undefined2, Label::kNear);
 
   // Load left and right operand
   __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
@@ -5119,11 +5556,25 @@
   __ ret(0);
 
   __ bind(&unordered);
-
   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
   __ bind(&generic_stub);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
+  __ bind(&maybe_undefined1);
+  if (Token::IsOrderedRelationalCompareOp(op_)) {
+    __ Cmp(rax, masm->isolate()->factory()->undefined_value());
+    __ j(not_equal, &miss);
+    __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+    __ j(not_equal, &maybe_undefined2, Label::kNear);
+    __ jmp(&unordered);
+  }
+
+  __ bind(&maybe_undefined2);
+  if (Token::IsOrderedRelationalCompareOp(op_)) {
+    __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
+    __ j(equal, &unordered);
+  }
+
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -5174,9 +5625,10 @@
 
 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::STRINGS);
-  ASSERT(GetCondition() == equal);
   Label miss;
 
+  bool equality = Token::IsEqualityOp(op_);
+
   // Registers containing left and right operands respectively.
   Register left = rdx;
   Register right = rax;
@@ -5214,24 +5666,31 @@
 
   // Check that both strings are symbols. If they are, we're done
   // because we already know they are not identical.
-  Label do_compare;
-  STATIC_ASSERT(kSymbolTag != 0);
-  __ and_(tmp1, tmp2);
-  __ testb(tmp1, Immediate(kIsSymbolMask));
-  __ j(zero, &do_compare, Label::kNear);
-  // Make sure rax is non-zero. At this point input operands are
-  // guaranteed to be non-zero.
-  ASSERT(right.is(rax));
-  __ ret(0);
+  if (equality) {
+    Label do_compare;
+    STATIC_ASSERT(kSymbolTag != 0);
+    __ and_(tmp1, tmp2);
+    __ testb(tmp1, Immediate(kIsSymbolMask));
+    __ j(zero, &do_compare, Label::kNear);
+    // Make sure rax is non-zero. At this point input operands are
+    // guaranteed to be non-zero.
+    ASSERT(right.is(rax));
+    __ ret(0);
+    __ bind(&do_compare);
+  }
 
   // Check that both strings are sequential ASCII.
   Label runtime;
-  __ bind(&do_compare);
   __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
 
   // Compare flat ASCII strings. Returns when done.
-  StringCompareStub::GenerateFlatAsciiStringEquals(
-      masm, left, right, tmp1, tmp2);
+  if (equality) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(
+        masm, left, right, tmp1, tmp2);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(
+        masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
+  }
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
@@ -5239,7 +5698,11 @@
   __ push(left);
   __ push(right);
   __ push(tmp1);
-  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+  if (equality) {
+    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+  } else {
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+  }
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -5266,49 +5729,62 @@
 }
 
 
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+  Label miss;
+  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
+  __ j(either_smi, &miss, Label::kNear);
+
+  __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ Cmp(rcx, known_map_);
+  __ j(not_equal, &miss, Label::kNear);
+  __ Cmp(rbx, known_map_);
+  __ j(not_equal, &miss, Label::kNear);
+
+  __ subq(rax, rdx);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  // Save the registers.
-  __ pop(rcx);
-  __ push(rdx);
-  __ push(rax);
-  __ push(rcx);
+  {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss =
+        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
 
-  // Call the runtime system in a fresh internal frame.
-  ExternalReference miss =
-      ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-  __ EnterInternalFrame();
-  __ push(rdx);
-  __ push(rax);
-  __ Push(Smi::FromInt(op_));
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rdx);
+    __ push(rax);
+    __ push(rdx);
+    __ push(rax);
+    __ Push(Smi::FromInt(op_));
+    __ CallExternalReference(miss, 3);
 
-  // Compute the entry point of the rewritten stub.
-  __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
-
-  // Restore registers.
-  __ pop(rcx);
-  __ pop(rax);
-  __ pop(rdx);
-  __ push(rcx);
+    // Compute the entry point of the rewritten stub.
+    __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
+    __ pop(rax);
+    __ pop(rdx);
+  }
 
   // Do a tail call to the rewritten stub.
   __ jmp(rdi);
 }
 
 
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register properties,
-    String* name,
-    Register r0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register properties,
+                                                        Handle<String> name,
+                                                        Register r0) {
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
   // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
+  // (their names are the hole value).
   for (int i = 0; i < kInlinedProbes; i++) {
     // r0 points to properties hash.
     // Compute the masked index: (hash + i + i * i) & mask.
@@ -5337,11 +5813,18 @@
     __ Cmp(entity_name, Handle<String>(name));
     __ j(equal, miss);
 
+    Label the_hole;
+    // Check for the hole and skip.
+    __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
+    __ j(equal, &the_hole, Label::kNear);
+
     // Check if the entry name is not a symbol.
     __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
     __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
              Immediate(kIsSymbolMask));
     __ j(zero, miss);
+
+    __ bind(&the_hole);
   }
 
   StringDictionaryLookupStub stub(properties,
@@ -5350,12 +5833,10 @@
                                   StringDictionaryLookupStub::NEGATIVE_LOOKUP);
   __ Push(Handle<Object>(name));
   __ push(Immediate(name->Hash()));
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
+  __ CallStub(&stub);
   __ testq(r0, r0);
   __ j(not_zero, miss);
   __ jmp(done);
-  return result;
 }
 
 
@@ -5370,6 +5851,11 @@
                                                         Register name,
                                                         Register r0,
                                                         Register r1) {
+  ASSERT(!elements.is(r0));
+  ASSERT(!elements.is(r1));
+  ASSERT(!name.is(r0));
+  ASSERT(!name.is(r1));
+
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -5412,6 +5898,8 @@
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Stack frame on entry:
   //  esp[0 * kPointerSize]: return address.
   //  esp[1 * kPointerSize]: key's hash.
@@ -5497,6 +5985,369 @@
 }
 
 
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal.
+  { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField and
+  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
+  // GenerateStoreField calls the stub with two different permutations of
+  // registers.  This is the second.
+  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
+  // StoreIC::GenerateNormal via GenerateDictionaryStore.
+  { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
+  // KeyedStoreIC::GenerateGeneric.
+  { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
+  { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
+  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+  // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
+  // and ElementsTransitionGenerator::GenerateDoubleToObject
+  { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
+  { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
+  // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+  // and ElementsTransitionGenerator::GenerateDoubleToObject
+  { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
+  // ElementsTransitionGenerator::GenerateDoubleToObject
+  { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
+  // StoreArrayLiteralElementStub::Generate
+  { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
+  // Null termination.
+  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
+};
+
+#undef REG
+
+bool RecordWriteStub::IsPregenerated() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+  StoreBufferOverflowStub stub2(kSaveFPRegs);
+  stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two instructions are generated with labels so as to get the
+  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
+  // forth between a compare instructions (a nop in this position) and the
+  // real branch when we start and stop incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+  __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  masm->set_byte_at(0, kTwoByteNopInstruction);
+  masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     not_zero,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+#ifdef _WIN64
+  Register arg3 = r8;
+  Register arg2 = rdx;
+  Register arg1 = rcx;
+#else
+  Register arg3 = rdx;
+  Register arg2 = rsi;
+  Register arg1 = rdi;
+#endif
+  Register address =
+      arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
+  ASSERT(!address.is(regs_.object()));
+  ASSERT(!address.is(arg1));
+  __ Move(address, regs_.address());
+  __ Move(arg1, regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    // TODO(gc) Can we just set address arg2 in the beginning?
+    __ Move(arg2, address);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ movq(arg2, Operand(address, 0));
+  }
+  __ LoadAddress(arg3, ExternalReference::isolate_address());
+  int argument_count = 3;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_object;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(),
+                 regs_.scratch0(),
+                 regs_.scratch1(),
+                 &on_black,
+                 Label::kNear);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     zero,
+                     &ensure_not_white,
+                     Label::kNear);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     zero,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need an extra register for this, so we push the object register
+  // temporarily.
+  __ push(regs_.object());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    &need_incremental_pop_object,
+                    Label::kNear);
+  __ pop(regs_.object());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&need_incremental_pop_object);
+  __ pop(regs_.object());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : element value to store
+  //  -- rbx    : array literal
+  //  -- rdi    : map of array literal
+  //  -- rcx    : element index as smi
+  //  -- rdx    : array literal index in function
+  //  -- rsp[0] : return address
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label fast_elements;
+
+  __ CheckFastElements(rdi, &double_elements);
+
+  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+  __ JumpIfSmi(rax, &smi_element);
+  __ CheckFastSmiOnlyElements(rdi, &fast_elements);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+
+  __ bind(&slow_elements);
+  __ pop(rdi);  // Pop return address and remember to put back later for tail
+                // call.
+  __ push(rbx);
+  __ push(rcx);
+  __ push(rax);
+  __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+  __ push(rdx);
+  __ push(rdi);  // Return return address so that tail call returns to right
+                 // place.
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ SmiToInteger32(kScratchRegister, rcx);
+  __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+  __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
+                           FixedArrayBase::kHeaderSize));
+  __ movq(Operand(rcx, 0), rax);
+  // Update the write barrier for the array store.
+  __ RecordWrite(rbx, rcx, rax,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ ret(0);
+
+  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+  // FAST_ELEMENTS, and value is Smi.
+  __ bind(&smi_element);
+  __ SmiToInteger32(kScratchRegister, rcx);
+  __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+  __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
+                       FixedArrayBase::kHeaderSize), rax);
+  __ ret(0);
+
+  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+
+  __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
+  __ SmiToInteger32(r11, rcx);
+  __ StoreNumberToDoubleElements(rax,
+                                 r9,
+                                 r11,
+                                 xmm0,
+                                 &slow_elements);
+  __ ret(0);
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 4058118..6a1a18f 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -48,6 +48,8 @@
                                    ArgumentType argument_type)
       : type_(type), argument_type_(argument_type) {}
   void Generate(MacroAssembler* masm);
+  static void GenerateOperation(MacroAssembler* masm,
+                                TranscendentalCache::Type type);
  private:
   TranscendentalCache::Type type_;
   ArgumentType argument_type_;
@@ -55,7 +57,32 @@
   Major MajorKey() { return TranscendentalCache; }
   int MinorKey() { return type_ | argument_type_; }
   Runtime::FunctionId RuntimeFunction();
-  void GenerateOperation(MacroAssembler* masm);
+};
+
+
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated() { return true; }
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+  NO_GENERIC_BINARY_FLAGS = 0,
+  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
 };
 
 
@@ -124,7 +151,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -210,7 +237,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -397,13 +424,12 @@
 
   void Generate(MacroAssembler* masm);
 
-  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register properties,
-      String* name,
-      Register r0);
+  static void GenerateNegativeLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register properties,
+                                     Handle<String> name,
+                                     Register r0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -413,6 +439,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -425,7 +453,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return DictionaryBits::encode(dictionary_.code()) |
@@ -446,6 +474,246 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
+  static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
+
+  static const byte kFiveByteNopInstruction = 0x3d;  // Cmpl eax, #imm32.
+  static const byte kFiveByteJumpInstruction = 0xe9;  // Jmp #imm32.
+
+  static Mode GetMode(Code* stub) {
+    byte first_instruction = stub->instruction_start()[0];
+    byte second_instruction = stub->instruction_start()[2];
+
+    if (first_instruction == kTwoByteJumpInstruction) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(first_instruction == kTwoByteNopInstruction);
+
+    if (second_instruction == kFiveByteJumpInstruction) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(second_instruction == kFiveByteNopInstruction);
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteNopInstruction;
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteJumpInstruction;
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteJumpInstruction;
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 7);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers, where the third
+  // is always rcx (needed for shift operations).  The input is two registers
+  // that must be preserved and one scratch register provided by the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_orig_(object),
+          address_orig_(address),
+          scratch0_orig_(scratch0),
+          object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
+      if (scratch0.is(rcx)) {
+        scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
+      }
+      if (object.is(rcx)) {
+        object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
+      }
+      if (address.is(rcx)) {
+        address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
+      }
+      ASSERT(!AreAliased(scratch0_, object_, address_, rcx));
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!address_orig_.is(object_));
+      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+      // We don't have to save scratch0_orig_ because it was given to us as
+      // a scratch register.  But if we had to switch to a different reg then
+      // we should save the new scratch0_.
+      if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+      if (!rcx.is(scratch0_orig_) &&
+          !rcx.is(object_orig_) &&
+          !rcx.is(address_orig_)) {
+        masm->push(rcx);
+      }
+      masm->push(scratch1_);
+      if (!address_.is(address_orig_)) {
+        masm->push(address_);
+        masm->movq(address_, address_orig_);
+      }
+      if (!object_.is(object_orig_)) {
+        masm->push(object_);
+        masm->movq(object_, object_orig_);
+      }
+    }
+
+    void Restore(MacroAssembler* masm) {
+      // These will have been preserved the entire time, so we just need to move
+      // them back.  Only in one case is the orig_ reg different from the plain
+      // one, since only one of them can alias with rcx.
+      if (!object_.is(object_orig_)) {
+        masm->movq(object_orig_, object_);
+        masm->pop(object_);
+      }
+      if (!address_.is(address_orig_)) {
+        masm->movq(address_orig_, address_);
+        masm->pop(address_);
+      }
+      masm->pop(scratch1_);
+      if (!rcx.is(scratch0_orig_) &&
+          !rcx.is(object_orig_) &&
+          !rcx.is(address_orig_)) {
+        masm->pop(rcx);
+      }
+      if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.
+
+    // The three scratch registers (incl. rcx) will be restored by other means
+    // so we don't bother pushing them here.  Rbx, rbp and r12-15 are callee
+    // save and don't need to be preserved.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_orig_;
+    Register address_orig_;
+    Register scratch0_orig_;
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+    // Third scratch register is always rcx.
+
+    Register GetRegThatIsNotRcxOr(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(rcx)) continue;
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 4> {};
+  class ValueBits: public BitField<int, 4, 4> {};
+  class AddressBits: public BitField<int, 8, 4> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  Label slow_;
+  RegisterAllocation regs_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 507bbd4..a8d39b2 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "codegen.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -38,17 +39,90 @@
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
 
 #define __ masm.
 
+
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
+  size_t actual_size;
+  // Allocate buffer in executable space.
+  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+                                                 &actual_size,
+                                                 true));
+  if (buffer == NULL) {
+    // Fallback to library function if function cannot be created.
+    switch (type) {
+      case TranscendentalCache::SIN: return &sin;
+      case TranscendentalCache::COS: return &cos;
+      case TranscendentalCache::TAN: return &tan;
+      case TranscendentalCache::LOG: return &log;
+      default: UNIMPLEMENTED();
+    }
+  }
+
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+  // xmm0: raw double input.
+  // Move double input into registers.
+  __ push(rbx);
+  __ push(rdi);
+  __ movq(rbx, xmm0);
+  __ push(rbx);
+  __ fld_d(Operand(rsp, 0));
+  TranscendentalCacheStub::GenerateOperation(&masm, type);
+  // The return value is expected to be in xmm0.
+  __ fstp_d(Operand(rsp, 0));
+  __ pop(rbx);
+  __ movq(xmm0, rbx);
+  __ pop(rdi);
+  __ pop(rbx);
+  __ Ret();
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  ASSERT(desc.reloc_size == 0);
+
+  CPU::FlushICache(buffer, actual_size);
+  OS::ProtectCode(buffer, actual_size);
+  return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+  size_t actual_size;
+  // Allocate buffer in executable space.
+  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+                                                 &actual_size,
+                                                 true));
+  if (buffer == NULL) return &sqrt;
+
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+  // xmm0: raw double input.
+  // Move double input into registers.
+  __ sqrtsd(xmm0, xmm0);
+  __ Ret();
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  ASSERT(desc.reloc_size == 0);
+
+  CPU::FlushICache(buffer, actual_size);
+  OS::ProtectCode(buffer, actual_size);
+  return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
 #ifdef _WIN64
 typedef double (*ModuloFunction)(double, double);
 // Define custom fmod implementation.
@@ -139,6 +213,367 @@
 
 #endif
 
+#undef __
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rbx    : target map
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  // Set transitioned map.
+  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+  __ RecordWriteField(rdx,
+                      HeapObject::kMapOffset,
+                      rbx,
+                      rdi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rbx    : target map
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  // The fail label is not actually used since we do not allocate.
+  Label allocated, new_backing_store, only_change_map, done;
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
+  __ j(equal, &only_change_map);
+
+  // Check backing store for COW-ness.  For COW arrays we have to
+  // allocate a new backing store.
+  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
+  __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
+                 Heap::kFixedCOWArrayMapRootIndex);
+  __ j(equal, &new_backing_store);
+  // Check if the backing store is in new-space. If not, we need to allocate
+  // a new one since the old one is in pointer-space.
+  // If in new space, we can reuse the old backing store because it is
+  // the same size.
+  __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
+
+  __ movq(r14, r8);  // Destination array equals source array.
+
+  // r8 : source FixedArray
+  // r9 : elements array length
+  // r14: destination FixedDoubleArray
+  // Set backing store's map
+  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+  __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+
+  __ bind(&allocated);
+  // Set transitioned map.
+  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+  __ RecordWriteField(rdx,
+                      HeapObject::kMapOffset,
+                      rbx,
+                      rdi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Convert smis to doubles and holes to hole NaNs.  The Array's length
+  // remains unchanged.
+  STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
+  STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
+
+  Label loop, entry, convert_hole;
+  __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+  // r15: the-hole NaN
+  __ jmp(&entry);
+
+  // Allocate new backing store.
+  __ bind(&new_backing_store);
+  __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+  __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
+  // Set backing store's map
+  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+  __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+  // Set receiver's backing store.
+  __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
+  __ movq(r11, r14);
+  __ RecordWriteField(rdx,
+                      JSObject::kElementsOffset,
+                      r11,
+                      r15,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Set backing store's length.
+  __ Integer32ToSmi(r11, r9);
+  __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
+  __ jmp(&allocated);
+
+  __ bind(&only_change_map);
+  // Set transitioned map.
+  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+  __ RecordWriteField(rdx,
+                      HeapObject::kMapOffset,
+                      rbx,
+                      rdi,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&done);
+
+  // Conversion loop.
+  __ bind(&loop);
+  __ movq(rbx,
+          FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
+  // r9 : current element's index
+  // rbx: current element (smi-tagged)
+  __ JumpIfNotSmi(rbx, &convert_hole);
+  __ SmiToInteger32(rbx, rbx);
+  __ cvtlsi2sd(xmm0, rbx);
+  __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
+           xmm0);
+  __ jmp(&entry);
+  __ bind(&convert_hole);
+
+  if (FLAG_debug_code) {
+    __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+    __ Assert(equal, "object found in smi-only array");
+  }
+
+  __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
+  __ bind(&entry);
+  __ decq(r9);
+  __ j(not_sign, &loop);
+
+  __ bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rbx    : target map
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label loop, entry, convert_hole, gc_required, only_change_map;
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
+  __ j(equal, &only_change_map);
+
+  __ push(rax);
+
+  __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
+  // r8 : source FixedDoubleArray
+  // r9 : number of elements
+  __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+  __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
+  // r11: destination FixedArray
+  __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
+  __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
+  __ Integer32ToSmi(r14, r9);
+  __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
+
+  // Prepare for conversion loop.
+  __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+  __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
+  // rsi: the-hole NaN
+  // rdi: pointer to the-hole
+  __ jmp(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ pop(rax);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ jmp(fail);
+
+  // Box doubles into heap numbers.
+  __ bind(&loop);
+  __ movq(r14, FieldOperand(r8,
+                            r9,
+                            times_pointer_size,
+                            FixedDoubleArray::kHeaderSize));
+  // r9 : current element's index
+  // r14: current element
+  __ cmpq(r14, rsi);
+  __ j(equal, &convert_hole);
+
+  // Non-hole double, copy value into a heap number.
+  __ AllocateHeapNumber(rax, r15, &gc_required);
+  // rax: new heap number
+  __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+  __ movq(FieldOperand(r11,
+                       r9,
+                       times_pointer_size,
+                       FixedArray::kHeaderSize),
+          rax);
+  __ movq(r15, r9);
+  __ RecordWriteArray(r11,
+                      rax,
+                      r15,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&entry, Label::kNear);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ movq(FieldOperand(r11,
+                       r9,
+                       times_pointer_size,
+                       FixedArray::kHeaderSize),
+          rdi);
+
+  __ bind(&entry);
+  __ decq(r9);
+  __ j(not_sign, &loop);
+
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
+  __ RecordWriteField(rdx,
+                      JSObject::kElementsOffset,
+                      r11,
+                      r15,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(rax);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+  __ bind(&only_change_map);
+  // Set transitioned map.
+  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+  __ RecordWriteField(rdx,
+                      HeapObject::kMapOffset,
+                      rbx,
+                      rdi,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ testb(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential, Label::kNear);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ testb(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string, Label::kNear);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ addq(index, result);
+  __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded, Label::kNear);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
+                 Heap::kEmptyStringRootIndex);
+  __ j(not_equal, call_runtime);
+  __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label seq_string;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ testb(result, Immediate(kStringRepresentationMask));
+  __ j(zero, &seq_string, Label::kNear);
+
+  // Handle external strings.
+  Label ascii_external, done;
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ testb(result, Immediate(kIsIndirectStringMask));
+    __ Assert(zero, "external string expected, but not found");
+  }
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ testb(result, Immediate(kShortExternalStringTag));
+  __ j(not_zero, call_runtime);
+  // Check encoding.
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ testb(result, Immediate(kStringEncodingMask));
+  __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+  __ j(not_equal, &ascii_external, Label::kNear);
+  // Two-byte string.
+  __ movzxwl(result, Operand(result, index, times_2, 0));
+  __ jmp(&done, Label::kNear);
+  __ bind(&ascii_external);
+  // Ascii string.
+  __ movzxbl(result, Operand(result, index, times_1, 0));
+  __ jmp(&done, Label::kNear);
+
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii;
+  __ bind(&seq_string);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ testb(result, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii, Label::kNear);
+
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ movzxwl(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii);
+  __ movzxbl(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
+  __ bind(&done);
+}
 
 #undef __
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index a0648ce..2e80751 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -69,6 +69,21 @@
 };
 
 
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODEGEN_X64_H_
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index ae5045f..80e22c6 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,7 +27,7 @@
 
 // CPU specific code for x64 independent of OS goes here.
 
-#ifdef __GNUC__
+#if defined(__GNUC__) && !defined(__MINGW64__)
 #include "third_party/valgrind/valgrind.h"
 #endif
 
@@ -41,7 +41,7 @@
 namespace v8 {
 namespace internal {
 
-void CPU::Setup() {
+void CPU::SetUp() {
   CpuFeatures::Probe();
 }
 
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 423e6f2..eec83d9 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -100,65 +100,66 @@
                                           RegList non_object_regs,
                                           bool convert_call_to_jmp) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as as two smis causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  for (int i = 0; i < kNumJSCallerSaved; i++) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    ASSERT(!reg.is(kScratchRegister));
-    if ((object_regs & (1 << r)) != 0) {
-      __ push(reg);
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as as two smis causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      ASSERT(!reg.is(kScratchRegister));
+      if ((object_regs & (1 << r)) != 0) {
+        __ push(reg);
+      }
+      // Store the 64-bit value as two smis.
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ movq(kScratchRegister, reg);
+        __ Integer32ToSmi(reg, reg);
+        __ push(reg);
+        __ sar(kScratchRegister, Immediate(32));
+        __ Integer32ToSmi(kScratchRegister, kScratchRegister);
+        __ push(kScratchRegister);
+      }
     }
-    // Store the 64-bit value as two smis.
-    if ((non_object_regs & (1 << r)) != 0) {
-      __ movq(kScratchRegister, reg);
-      __ Integer32ToSmi(reg, reg);
-      __ push(reg);
-      __ sar(kScratchRegister, Immediate(32));
-      __ Integer32ToSmi(kScratchRegister, kScratchRegister);
-      __ push(kScratchRegister);
-    }
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ Set(rax, 0);  // No arguments (argc == 0).
-  __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
+    __ Set(rax, 0);  // No arguments (argc == 0).
+    __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values from the expression stack.
-  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    if (FLAG_debug_code) {
-      __ Set(reg, kDebugZapValue);
+    // Restore the register values from the expression stack.
+    for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if (FLAG_debug_code) {
+        __ Set(reg, kDebugZapValue);
+      }
+      if ((object_regs & (1 << r)) != 0) {
+        __ pop(reg);
+      }
+      // Reconstruct the 64-bit value from two smis.
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ pop(kScratchRegister);
+        __ SmiToInteger32(kScratchRegister, kScratchRegister);
+        __ shl(kScratchRegister, Immediate(32));
+        __ pop(reg);
+        __ SmiToInteger32(reg, reg);
+        __ or_(reg, kScratchRegister);
+      }
     }
-    if ((object_regs & (1 << r)) != 0) {
-      __ pop(reg);
-    }
-    // Reconstruct the 64-bit value from two smis.
-    if ((non_object_regs & (1 << r)) != 0) {
-      __ pop(kScratchRegister);
-      __ SmiToInteger32(kScratchRegister, kScratchRegister);
-      __ shl(kScratchRegister, Immediate(32));
-      __ pop(reg);
-      __ SmiToInteger32(reg, reg);
-      __ or_(reg, kScratchRegister);
-    }
+
+    // Get rid of the internal frame.
   }
 
-  // Get rid of the internal frame.
-  __ LeaveInternalFrame();
-
   // If this call did not replace a call but patched other code then there will
   // be an unwanted return address left on the stack. Here we get rid of that.
   if (convert_call_to_jmp) {
@@ -228,8 +229,36 @@
 }
 
 
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
   // Register state just before return from JS function (from codegen-x64.cc).
+  // ----------- S t a t e -------------
+  //  -- rax: return value
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
+}
+
+
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+  // Register state for CallFunctionStub (from code-stubs-x64.cc).
+  // ----------- S t a t e -------------
+  //  -- rdi : function
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, rdi.bit(), 0, false);
+}
+
+
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+  // Register state for CallFunctionStub (from code-stubs-x64.cc).
+  // ----------- S t a t e -------------
+  //  -- rdi : function
+  //  -- rbx: cache cell for call target
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+  // Register state for CallConstructStub (from code-stubs-x64.cc).
   // rax is the actual number of arguments not encoded as a smi, see comment
   // above IC call.
   // ----------- S t a t e -------------
@@ -240,21 +269,16 @@
 }
 
 
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
-  // Register state just before return from JS function (from codegen-x64.cc).
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+  // Register state for CallConstructStub (from code-stubs-x64.cc).
+  // rax is the actual number of arguments not encoded as a smi, see comment
+  // above IC call.
   // ----------- S t a t e -------------
-  //  -- rax: return value
+  //  -- rax: number of arguments
+  //  -- rbx: cache cell for call target
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
-}
-
-
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
-  // Register state for stub CallFunction (from CallFunctionStub in ic-x64.cc).
-  // ----------- S t a t e -------------
-  //  No registers used on entry.
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, 0, 0, false);
+  // The number of arguments in rax is not smi encoded.
+  Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
 }
 
 
@@ -263,9 +287,7 @@
   Label check_codesize;
   __ bind(&check_codesize);
   __ RecordDebugBreakSlot();
-  for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
-    __ nop();
-  }
+  __ Nop(Assembler::kDebugBreakSlotLength);
   ASSERT_EQ(Assembler::kDebugBreakSlotLength,
             masm->SizeOfCodeGeneratedSince(&check_codesize));
 }
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index f322312..2adf587 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -87,13 +87,19 @@
 #endif
   }
 
+  Isolate* isolate = code->GetIsolate();
 
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
-  DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+  DeoptimizerData* data = isolate->deoptimizer_data();
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -105,7 +111,8 @@
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
@@ -131,14 +138,18 @@
   ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
          *(call_target_address - 2) == 0x07 &&  // offset
          *(call_target_address - 1) == 0xe8);   // call
-  *(call_target_address - 3) = 0x90;  // nop
-  *(call_target_address - 2) = 0x90;  // nop
+  *(call_target_address - 3) = 0x66;  // 2 byte nop part 1
+  *(call_target_address - 2) = 0x90;  // 2 byte nop part 2
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, call_target_address, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+                                         Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
@@ -146,13 +157,16 @@
          Assembler::target_address_at(call_target_address));
   // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
   // restore the conditional branch.
-  ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
-         *(call_target_address - 2) == 0x90 &&  // nop
+  ASSERT(*(call_target_address - 3) == 0x66 &&  // 2 byte nop part 1
+         *(call_target_address - 2) == 0x90 &&  // 2 byte nop part 2
          *(call_target_address - 1) == 0xe8);   // call
   *(call_target_address - 3) = 0x73;  // jae
   *(call_target_address - 2) = 0x07;  // offset
   Assembler::set_target_address_at(call_target_address,
                                    check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, call_target_address, check_code);
 }
 
 
@@ -192,12 +206,13 @@
   ASSERT(Translation::BEGIN == opcode);
   USE(opcode);
   int count = iterator.Next();
+  iterator.Skip(1);  // Drop JS frame count.
   ASSERT(count == 1);
   USE(count);
 
   opcode = static_cast<Translation::Opcode>(iterator.Next());
   USE(opcode);
-  ASSERT(Translation::FRAME == opcode);
+  ASSERT(Translation::JS_FRAME == opcode);
   unsigned node_id = iterator.Next();
   USE(node_id);
   ASSERT(node_id == ast_id);
@@ -233,9 +248,7 @@
   output_ = new FrameDescription*[1];
   output_[0] = new(output_frame_size) FrameDescription(
       output_frame_size, function_);
-#ifdef DEBUG
-  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
 
   // Clear the incoming parameters in the optimized frame to avoid
   // confusing the garbage collector.
@@ -300,7 +313,7 @@
     output_[0] = input_;
     output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
   } else {
-    // Setup the frame pointer and the context pointer.
+    // Set up the frame pointer and the context pointer.
     output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
     output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
 
@@ -324,13 +337,219 @@
 }
 
 
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
-                                 int frame_index) {
-  // Read the ast node id, function, and frame height for this output frame.
-  Translation::Opcode opcode =
-      static_cast<Translation::Opcode>(iterator->Next());
-  USE(opcode);
-  ASSERT(Translation::FRAME == opcode);
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+                                                 int frame_index) {
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating arguments adaptor => height=%d\n", height_in_bytes);
+  }
+
+  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+  // Arguments adaptor can not be topmost or bottommost.
+  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address of the frame is computed from the previous
+  // frame's top and this frame's size.
+  intptr_t top_address;
+  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = height;
+  unsigned output_offset = output_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+
+  // Read caller's PC from the previous frame.
+  output_offset -= kPointerSize;
+  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+  output_frame->SetFrameSlot(output_offset, callers_pc);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; caller's pc\n",
+           top_address + output_offset, output_offset, callers_pc);
+  }
+
+  // Read caller's FP from the previous frame, and set this frame's FP.
+  output_offset -= kPointerSize;
+  intptr_t value = output_[frame_index - 1]->GetFp();
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  output_frame->SetFp(fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // A marker value is used in place of the context.
+  output_offset -= kPointerSize;
+  intptr_t context = reinterpret_cast<intptr_t>(
+      Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  output_frame->SetFrameSlot(output_offset, context);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; context (adaptor sentinel)\n",
+           top_address + output_offset, output_offset, context);
+  }
+
+  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(function);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Number of incoming arguments.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; argc (%d)\n",
+           top_address + output_offset, output_offset, value, height - 1);
+  }
+
+  ASSERT(0 == output_offset);
+
+  Builtins* builtins = isolate_->builtins();
+  Code* adaptor_trampoline =
+      builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+  intptr_t pc_value = reinterpret_cast<intptr_t>(
+      adaptor_trampoline->instruction_start() +
+      isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+  output_frame->SetPc(pc_value);
+}
+
+
+void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
+                                              int frame_index) {
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating construct stub => height=%d\n", height_in_bytes);
+  }
+
+  unsigned fixed_frame_size = 6 * kPointerSize;
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+  output_frame->SetFrameType(StackFrame::CONSTRUCT);
+
+  // Construct stub can not be topmost or bottommost.
+  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address of the frame is computed from the previous
+  // frame's top and this frame's size.
+  intptr_t top_address;
+  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = height;
+  unsigned output_offset = output_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+
+  // Read caller's PC from the previous frame.
+  output_offset -= kPointerSize;
+  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+  output_frame->SetFrameSlot(output_offset, callers_pc);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; caller's pc\n",
+           top_address + output_offset, output_offset, callers_pc);
+  }
+
+  // Read caller's FP from the previous frame, and set this frame's FP.
+  output_offset -= kPointerSize;
+  intptr_t value = output_[frame_index - 1]->GetFp();
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  output_frame->SetFp(fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // The context can be gotten from the previous frame.
+  output_offset -= kPointerSize;
+  value = output_[frame_index - 1]->GetContext();
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // A marker value is used in place of the function.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; function (construct sentinel)\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Number of incoming arguments.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; argc (%d)\n",
+           top_address + output_offset, output_offset, value, height - 1);
+  }
+
+  // The newly allocated object was passed as receiver in the artificial
+  // constructor stub environment created by HEnvironment::CopyForInlining().
+  output_offset -= kPointerSize;
+  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; allocated receiver\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  ASSERT(0 == output_offset);
+
+  Builtins* builtins = isolate_->builtins();
+  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+  intptr_t pc = reinterpret_cast<intptr_t>(
+      construct_stub->instruction_start() +
+      isolate_->heap()->construct_stub_deopt_pc_offset()->value());
+  output_frame->SetPc(pc);
+}
+
+
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+                                   int frame_index) {
   int node_id = iterator->Next();
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
@@ -350,9 +569,7 @@
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, function);
-#ifdef DEBUG
-  output_frame->SetKind(Code::FUNCTION);
-#endif
+  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
 
   bool is_bottommost = (0 == frame_index);
   bool is_topmost = (output_count_ - 1 == frame_index);
@@ -440,6 +657,7 @@
     value = reinterpret_cast<intptr_t>(function->context());
   }
   output_frame->SetFrameSlot(output_offset, value);
+  output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(rsi.code(), value);
   if (FLAG_trace_deopt) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
@@ -598,7 +816,10 @@
 
   Isolate* isolate = masm()->isolate();
 
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
   // Preserve deoptimizer object in register rax and get the input
   // frame descriptor pointer.
   __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -644,8 +865,11 @@
   __ PrepareCallCFunction(2);
   __ movq(arg1, rax);
   __ LoadAddress(arg2, ExternalReference::isolate_address());
-  __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 2);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 2);
+  }
   __ pop(rax);
 
   // Replace the current frame with the output frames.
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 1b8871f..adeda0b 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -34,6 +34,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "disasm.h"
+#include "lazy-instance.h"
 
 namespace disasm {
 
@@ -109,6 +110,7 @@
   { 0xC3, UNSET_OP_ORDER, "ret" },
   { 0xC9, UNSET_OP_ORDER, "leave" },
   { 0xF4, UNSET_OP_ORDER, "hlt" },
+  { 0xFC, UNSET_OP_ORDER, "cld" },
   { 0xCC, UNSET_OP_ORDER, "int3" },
   { 0x60, UNSET_OP_ORDER, "pushad" },
   { 0x61, UNSET_OP_ORDER, "popad" },
@@ -268,7 +270,8 @@
 }
 
 
-static InstructionTable instruction_table;
+static v8::internal::LazyInstance<InstructionTable>::type instruction_table =
+    LAZY_INSTANCE_INITIALIZER;
 
 
 static InstructionDesc cmov_instructions[16] = {
@@ -910,15 +913,19 @@
           switch (modrm_byte) {
             case 0xE0: mnem = "fchs"; break;
             case 0xE1: mnem = "fabs"; break;
+            case 0xE3: mnem = "fninit"; break;
             case 0xE4: mnem = "ftst"; break;
             case 0xE8: mnem = "fld1"; break;
             case 0xEB: mnem = "fldpi"; break;
             case 0xED: mnem = "fldln2"; break;
             case 0xEE: mnem = "fldz"; break;
+            case 0xF0: mnem = "f2xm1"; break;
             case 0xF1: mnem = "fyl2x"; break;
+            case 0xF2: mnem = "fptan"; break;
             case 0xF5: mnem = "fprem1"; break;
             case 0xF7: mnem = "fincstp"; break;
             case 0xF8: mnem = "fprem"; break;
+            case 0xFD: mnem = "fscale"; break;
             case 0xFE: mnem = "fsin"; break;
             case 0xFF: mnem = "fcos"; break;
             default: UnimplementedInstruction();
@@ -1034,7 +1041,18 @@
       }
     } else {
       get_modrm(*current, &mod, &regop, &rm);
-      if (opcode == 0x28) {
+      if (opcode == 0x1f) {
+        current++;
+        if (rm == 4) {  // SIB byte present.
+          current++;
+        }
+        if (mod == 1) {  // Byte displacement.
+          current += 1;
+        } else if (mod == 2) {  // 32-bit displacement.
+          current += 4;
+        }  // else no immediate displacement.
+        AppendToBuffer("nop");
+      } else if (opcode == 0x28) {
         AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
         current += PrintRightXMMOperand(current);
       } else if (opcode == 0x29) {
@@ -1178,7 +1196,7 @@
     int mod, regop, rm;
     get_modrm(*current, &mod, &regop, &rm);
     current++;
-    if (regop == 4) {  // SIB byte present.
+    if (rm == 4) {  // SIB byte present.
       current++;
     }
     if (mod == 1) {  // Byte displacement.
@@ -1322,7 +1340,7 @@
     data++;
   }
 
-  const InstructionDesc& idesc = instruction_table.Get(current);
+  const InstructionDesc& idesc = instruction_table.Get().Get(current);
   byte_size_operand_ = idesc.byte_size_operation;
   switch (idesc.type) {
     case ZERO_OPERANDS_INSTR:
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 7012c76..3e3d63d 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,32 +31,32 @@
 namespace v8 {
 namespace internal {
 
-static const int kNumRegs = 16;
-static const RegList kJSCallerSaved =
+const int kNumRegs = 16;
+const RegList kJSCallerSaved =
     1 << 0 |  // rax
     1 << 1 |  // rcx
     1 << 2 |  // rdx
     1 << 3 |  // rbx - used as a caller-saved register in JavaScript code
     1 << 7;   // rdi - callee function
 
-static const int kNumJSCallerSaved = 5;
+const int kNumJSCallerSaved = 5;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
 // Number of registers for which space is reserved in safepoints.
-static const int kNumSafepointRegisters = 16;
+const int kNumSafepointRegisters = 16;
 
 // ----------------------------------------------------
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset    = 0 * kPointerSize;
-  static const int kContextOffset = 1 * kPointerSize;
-  static const int kFPOffset      = 2 * kPointerSize;
-  static const int kStateOffset   = 3 * kPointerSize;
-  static const int kPCOffset      = 4 * kPointerSize;
+  static const int kNextOffset     = 0 * kPointerSize;
+  static const int kCodeOffset     = 1 * kPointerSize;
+  static const int kStateOffset    = 2 * kPointerSize;
+  static const int kContextOffset  = 3 * kPointerSize;
+  static const int kFPOffset       = 4 * kPointerSize;
 
-  static const int kSize = kPCOffset + kPointerSize;
+  static const int kSize = kFPOffset + kPointerSize;
 };
 
 
@@ -87,6 +87,9 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
+  // Fixed part of the frame consists of return address, caller fp,
+  // context and function.
+  static const int kFixedFrameSize    =  4 * kPointerSize;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
@@ -112,6 +115,8 @@
 class ArgumentsAdaptorFrameConstants : public AllStatic {
  public:
   static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+  static const int kFrameSize =
+      StandardFrameConstants::kFixedFrameSize + kPointerSize;
 };
 
 
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 556523f..85c5e75 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,11 +44,6 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -105,26 +100,54 @@
 };
 
 
+int FullCodeGenerator::self_optimization_header_size() {
+  return 20;
+}
+
+
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right, with the
 // return address on top of them.  The actual argument count matches the
 // formal parameter count expected by the function.
 //
 // The live registers are:
-//   o rdi: the JS function object being called (ie, ourselves)
+//   o rdi: the JS function object being called (i.e. ourselves)
 //   o rsi: our context
 //   o rbp: our caller's frame pointer
 //   o rsp: stack pointer (pointing to return address)
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-x64.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
-  ASSERT(info_ == NULL);
-  info_ = info;
-  scope_ = info->scope();
+void FullCodeGenerator::Generate() {
+  CompilationInfo* info = info_;
+  handler_table_ =
+      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
+  // We can optionally optimize based on counters rather than statistical
+  // sampling.
+  if (info->ShouldSelfOptimize()) {
+    if (FLAG_trace_opt_verbose) {
+      PrintF("[adding self-optimization header to %s]\n",
+             *info->function()->debug_name()->ToCString());
+    }
+    has_self_optimization_header_ = true;
+    MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
+        Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
+    JSGlobalPropertyCell* cell;
+    if (maybe_cell->To(&cell)) {
+      __ movq(rax, Handle<JSGlobalPropertyCell>(cell),
+              RelocInfo::EMBEDDED_OBJECT);
+      __ SmiAddConstant(FieldOperand(rax, JSGlobalPropertyCell::kValueOffset),
+                        Smi::FromInt(-1));
+      Handle<Code> compile_stub(
+          isolate()->builtins()->builtin(Builtins::kLazyRecompile));
+      __ j(zero, compile_stub, RelocInfo::CODE_TARGET);
+      ASSERT(masm_->pc_offset() == self_optimization_header_size());
+    }
+  }
+
 #ifdef DEBUG
   if (strlen(FLAG_stop_at) > 0 &&
       info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -136,7 +159,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). rcx is zero for method calls and non-zero for
   // function calls.
-  if (info->is_strict_mode() || info->is_native()) {
+  if (!info->is_classic_mode() || info->is_native()) {
     Label ok;
     __ testq(rcx, rcx);
     __ j(zero, &ok, Label::kNear);
@@ -147,6 +170,11 @@
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   __ push(rbp);  // Caller's frame pointer.
   __ movq(rbp, rsp);
   __ push(rsi);  // Callee's context.
@@ -195,11 +223,9 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ movq(Operand(rsi, context_offset), rax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have use a third register to avoid
-        // clobbering rsi.
-        __ movq(rcx, rsi);
-        __ RecordWrite(rcx, context_offset, rax, rbx);
+        // Update the write barrier.  This clobbers rax and rbx.
+        __ RecordWriteContextSlot(
+            rsi, context_offset, rax, rbx, kDontSaveFPRegs);
       }
     }
   }
@@ -226,9 +252,15 @@
     //   function, receiver address, parameter count.
     // The stub will rewrite receiver and parameter count if the previous
     // stack frame was an arguments adapter frame.
-    ArgumentsAccessStub stub(
-        is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
-                         : ArgumentsAccessStub::NEW_NON_STRICT_SLOW);
+    ArgumentsAccessStub::Type type;
+    if (!is_classic_mode()) {
+      type = ArgumentsAccessStub::NEW_STRICT;
+    } else if (function()->has_duplicate_parameters()) {
+      type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+    } else {
+      type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+    }
+    ArgumentsAccessStub stub(type);
     __ CallStub(&stub);
 
     SetVar(arguments, rax, rbx, rdx);
@@ -250,8 +282,11 @@
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        int ignored = 0;
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+        VariableProxy* proxy = scope()->function();
+        ASSERT(proxy->var()->mode() == CONST ||
+               proxy->var()->mode() == CONST_HARMONY);
+        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
+        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -287,7 +322,8 @@
 }
 
 
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+                                       Label* back_edge_target) {
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
@@ -377,7 +413,7 @@
 
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -399,7 +435,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -432,7 +468,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -491,7 +527,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -555,7 +591,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -638,15 +674,16 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ movq(location, src);
+
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
-    __ RecordWrite(scratch0, offset, src, scratch1);
+    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -657,13 +694,7 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ CompareRoot(rax, Heap::kTrueValueRootIndex);
     Split(equal, if_true, if_false, NULL);
@@ -673,16 +704,17 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        Variable::Mode mode,
-                                        FunctionLiteral* function,
-                                        int* global_count) {
+                                        VariableMode mode,
+                                        FunctionLiteral* function) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++(*global_count);
+      ++global_count_;
       break;
 
     case Variable::PARAMETER:
@@ -691,7 +723,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ movq(StackOperand(variable), result_register());
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
         __ movq(StackOperand(variable), kScratchRegister);
@@ -715,10 +747,16 @@
         VisitForAccumulatorValue(function);
         __ movq(ContextOperand(rsi, variable->index()), result_register());
         int offset = Context::SlotOffset(variable->index());
-        __ movq(rbx, rsi);
-        __ RecordWrite(rbx, offset, result_register(), rcx);
+        // We know that we have written a function, which is not a smi.
+        __ RecordWriteContextSlot(rsi,
+                                  offset,
+                                  result_register(),
+                                  rcx,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
         __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
@@ -731,11 +769,13 @@
       Comment cmnt(masm_, "[ Declaration");
       __ push(rsi);
       __ Push(variable->name());
-      // Declaration nodes are always introduced in one of three modes.
-      ASSERT(mode == Variable::VAR ||
-             mode == Variable::CONST ||
-             mode == Variable::LET);
-      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of four modes.
+      ASSERT(mode == VAR ||
+             mode == CONST ||
+             mode == CONST_HARMONY ||
+             mode == LET);
+      PropertyAttributes attr =
+          (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE;
       __ Push(Smi::FromInt(attr));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
@@ -743,7 +783,7 @@
       // must not destroy the current value.
       if (function != NULL) {
         VisitForStackValue(function);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         __ PushRoot(Heap::kTheHoleValueRootIndex);
       } else {
         __ Push(Smi::FromInt(0));  // Indicates no initial value.
@@ -755,9 +795,6 @@
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
-
-
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(rsi);  // The context is the first argument.
@@ -871,6 +908,8 @@
   __ cmpq(rax, null_value);
   __ j(equal, &exit);
 
+  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
   // Convert the object to a JS object.
   Label convert, done_convert;
   __ JumpIfSmi(rax, &convert);
@@ -882,51 +921,17 @@
   __ bind(&done_convert);
   __ push(rax);
 
+  // Check for proxies.
+  Label call_runtime;
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
+  __ j(below_equal, &call_runtime);
+
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  Label next, call_runtime;
-  Register empty_fixed_array_value = r8;
-  __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
-  Register empty_descriptor_array_value = r9;
-  __ LoadRoot(empty_descriptor_array_value,
-              Heap::kEmptyDescriptorArrayRootIndex);
-  __ movq(rcx, rax);
-  __ bind(&next);
-
-  // Check that there are no elements.  Register rcx contains the
-  // current JS object we've reached through the prototype chain.
-  __ cmpq(empty_fixed_array_value,
-          FieldOperand(rcx, JSObject::kElementsOffset));
-  __ j(not_equal, &call_runtime);
-
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in rbx for the subsequent
-  // prototype load.
-  __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
-  __ JumpIfSmi(rdx, &call_runtime);
-
-  // Check that there is an enum cache in the non-empty instance
-  // descriptors (rdx).  This is the case if the next enumeration
-  // index field does not contain a smi.
-  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
-  __ JumpIfSmi(rdx, &call_runtime);
-
-  // For all objects but the receiver, check that the cache is empty.
-  Label check_prototype;
-  __ cmpq(rcx, rax);
-  __ j(equal, &check_prototype, Label::kNear);
-  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ cmpq(rdx, empty_fixed_array_value);
-  __ j(not_equal, &call_runtime);
-
-  // Load the prototype from the map and loop if non-null.
-  __ bind(&check_prototype);
-  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
-  __ cmpq(rcx, null_value);
-  __ j(not_equal, &next);
+  __ CheckEnumCache(null_value, &call_runtime);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -953,7 +958,7 @@
   __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
   __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
-  // Setup the four remaining stack slots.
+  // Set up the four remaining stack slots.
   __ push(rax);  // Map.
   __ push(rdx);  // Enumeration cache.
   __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
@@ -962,14 +967,33 @@
   __ jmp(&loop);
 
   // We got a fixed array in register rax. Iterate through that.
+  Label non_proxy;
   __ bind(&fixed_array);
-  __ Push(Smi::FromInt(0));  // Map (0) - force slow check.
-  __ push(rax);
+
+  Handle<JSGlobalPropertyCell> cell =
+      isolate()->factory()->NewJSGlobalPropertyCell(
+          Handle<Object>(
+              Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+  RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+  __ LoadHeapObject(rbx, cell);
+  __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+          Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
+
+  __ Move(rbx, Smi::FromInt(1));  // Smi indicates slow check
+  __ movq(rcx, Operand(rsp, 0 * kPointerSize));  // Get enumerated object
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
+  __ j(above, &non_proxy);
+  __ Move(rbx, Smi::FromInt(0));  // Zero indicates proxy
+  __ bind(&non_proxy);
+  __ push(rbx);  // Smi
+  __ push(rax);  // Array
   __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
   __ push(rax);  // Fixed array length (as smi).
   __ Push(Smi::FromInt(0));  // Initial index.
 
   // Generate code for doing the condition check.
+  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
   __ movq(rax, Operand(rsp, 0 * kPointerSize));  // Get the current index.
   __ cmpq(rax, Operand(rsp, 1 * kPointerSize));  // Compare to the array length.
@@ -983,17 +1007,22 @@
                             index.scale,
                             FixedArray::kHeaderSize));
 
-  // Get the expected map from the stack or a zero map in the
+  // Get the expected map from the stack or a smi in the
   // permanent slow case into register rdx.
   __ movq(rdx, Operand(rsp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
+  // If not, we may have to filter the key.
   Label update_each;
   __ movq(rcx, Operand(rsp, 4 * kPointerSize));
   __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
+  // For proxies, no filtering is done.
+  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+  __ Cmp(rdx, Smi::FromInt(0));
+  __ j(equal, &update_each, Label::kNear);
+
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
   // just skip it.
@@ -1010,7 +1039,7 @@
   __ movq(result_register(), rbx);
   // Perform the assignment as if via '='.
   { EffectContext context(this);
-    EmitAssignment(stmt->each(), stmt->AssignmentId());
+    EmitAssignment(stmt->each());
   }
 
   // Generate code for the body of the loop.
@@ -1021,7 +1050,7 @@
   __ bind(loop_statement.continue_label());
   __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
 
-  EmitStackCheck(stmt);
+  EmitStackCheck(stmt, &loop);
   __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
@@ -1029,6 +1058,7 @@
   __ addq(rsp, Immediate(5 * kPointerSize));
 
   // Exit and decrement the loop depth.
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1047,7 +1077,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(info->language_mode());
     __ Push(info);
     __ CallStub(&stub);
   } else {
@@ -1077,7 +1107,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
                 Immediate(0));
@@ -1091,7 +1121,7 @@
     // If no outer scope calls eval, we do not need to check more
     // context extensions.  If we have reached an eval scope, we check
     // all extensions from this point.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1137,7 +1167,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
                 Immediate(0));
@@ -1168,16 +1198,23 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+  if (var->mode() == DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+  } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == Variable::CONST) {
+    if (local->mode() == CONST ||
+        local->mode() == CONST_HARMONY ||
+        local->mode() == LET) {
       __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
       __ j(not_equal, done);
-      __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+      if (local->mode() == CONST) {
+        __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+      } else {  // LET || CONST_HARMONY
+        __ Push(var->name());
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      }
     }
     __ jmp(done);
   }
@@ -1208,23 +1245,63 @@
     case Variable::LOCAL:
     case Variable::CONTEXT: {
       Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
-      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        Label done;
-        GetVar(rax, var);
-        __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
-        __ j(not_equal, &done, Label::kNear);
-        if (var->mode() == Variable::LET) {
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-        } else {  // Variable::CONST
-          __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        bool skip_init_check;
+        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+          skip_init_check = false;
+        } else {
+          // Check that we always have valid source position.
+          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          skip_init_check = var->mode() != CONST &&
+              var->initializer_position() < proxy->position();
         }
-        __ bind(&done);
-        context()->Plug(rax);
+
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          Label done;
+          GetVar(rax, var);
+          __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+          __ j(not_equal, &done, Label::kNear);
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            __ Push(var->name());
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+          }
+          __ bind(&done);
+          context()->Plug(rax);
+          break;
+        }
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1300,12 +1377,22 @@
 }
 
 
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+  if (expression == NULL) {
+    __ PushRoot(Heap::kNullValueRootIndex);
+  } else {
+    VisitForStackValue(expression);
+  }
+}
+
+
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
+  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
-  __ Push(expr->constant_properties());
+  __ Push(constant_properties);
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1313,10 +1400,15 @@
       ? ObjectLiteral::kHasFunction
       : ObjectLiteral::kNoFlags;
   __ Push(Smi::FromInt(flags));
+  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    __ CallStub(&stub);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1328,6 +1420,7 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore();
 
+  AccessorTable accessor_table(isolate()->zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
@@ -1350,9 +1443,9 @@
             VisitForAccumulatorValue(value);
             __ Move(rcx, key->handle());
             __ movq(rdx, Operand(rsp, 0));
-            Handle<Code> ic = is_strict_mode()
-                ? isolate()->builtins()->StoreIC_Initialize_Strict()
-                : isolate()->builtins()->StoreIC_Initialize();
+            Handle<Code> ic = is_classic_mode()
+                ? isolate()->builtins()->StoreIC_Initialize()
+                : isolate()->builtins()->StoreIC_Initialize_Strict();
             __ call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1372,19 +1465,28 @@
           __ Drop(3);
         }
         break;
-      case ObjectLiteral::Property::SETTER:
       case ObjectLiteral::Property::GETTER:
-        __ push(Operand(rsp, 0));  // Duplicate receiver.
-        VisitForStackValue(key);
-        __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
-                Smi::FromInt(1) :
-                Smi::FromInt(0));
-        VisitForStackValue(value);
-        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        accessor_table.lookup(key)->second->getter = value;
+        break;
+      case ObjectLiteral::Property::SETTER:
+        accessor_table.lookup(key)->second->setter = value;
         break;
     }
   }
 
+  // Emit code to define accessors, using only a single call to the runtime for
+  // each pair of corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end();
+       ++it) {
+    __ push(Operand(rsp, 0));  // Duplicate receiver.
+    VisitForStackValue(it->first);
+    EmitAccessor(it->second->getter);
+    EmitAccessor(it->second->setter);
+    __ Push(Smi::FromInt(NONE));
+    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+  }
+
   if (expr->has_function()) {
     ASSERT(result_saved);
     __ push(Operand(rsp, 0));
@@ -1404,24 +1506,42 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
 
   __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
-  __ Push(expr->constant_elements());
-  if (expr->constant_elements()->map() ==
-      isolate()->heap()->fixed_cow_array_map()) {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    __ CallStub(&stub);
+  __ Push(constant_elements);
+  Heap* heap = isolate()->heap();
+  if (has_constant_fast_elements &&
+      constant_elements_values->map() == heap->fixed_cow_array_map()) {
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
     __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        length);
+    __ CallStub(&stub);
   } else if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           FLAG_smi_only_arrays);
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
+    FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
+        ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
 
@@ -1444,14 +1564,28 @@
     }
     VisitForAccumulatorValue(subexpr);
 
-    // Store the subexpression value in the array's elements.
-    __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
-    __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
-    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-    __ movq(FieldOperand(rbx, offset), result_register());
-
-    // Update the write barrier for the array store.
-    __ RecordWrite(rbx, offset, result_register(), rcx);
+    if (constant_elements_kind == FAST_ELEMENTS) {
+      // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
+      // transition and don't need to call the runtime stub.
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
+      __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+      // Store the subexpression value in the array's elements.
+      __ movq(FieldOperand(rbx, offset), result_register());
+      // Update the write barrier for the array store.
+      __ RecordWriteField(rbx, offset, result_register(), rcx,
+                          kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          INLINE_SMI_CHECK);
+    } else {
+      // Store the subexpression value in the array's elements.
+      __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
+      __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+      __ Move(rcx, Smi::FromInt(i));
+      __ Move(rdx, Smi::FromInt(expr->literal_index()));
+      StoreArrayLiteralElementStub stub;
+      __ CallStub(&stub);
+    }
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1582,14 +1716,14 @@
   Literal* key = prop->key()->AsLiteral();
   __ Move(rcx, key->handle());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1666,7 +1800,7 @@
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
@@ -1698,9 +1832,9 @@
       __ movq(rdx, rax);
       __ pop(rax);  // Restore value.
       __ Move(rcx, prop->key()->AsLiteral()->handle());
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
       __ call(ic);
       break;
     }
@@ -1711,14 +1845,13 @@
       __ movq(rcx, rax);
       __ pop(rdx);
       __ pop(rax);  // Restore value.
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
       __ call(ic);
       break;
     }
   }
-  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
   context()->Plug(rax);
 }
 
@@ -1729,9 +1862,9 @@
     // Global var, const, or let.
     __ Move(rcx, var->name());
     __ movq(rdx, GlobalObjectOperand());
-    Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->StoreIC_Initialize_Strict()
-        : isolate()->builtins()->StoreIC_Initialize();
+    Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->StoreIC_Initialize()
+        : isolate()->builtins()->StoreIC_Initialize_Strict();
     __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
@@ -1756,13 +1889,13 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+  } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(rax);  // Value.
       __ push(rsi);  // Context.
       __ Push(var->name());
-      __ Push(Smi::FromInt(strict_mode_flag()));
+      __ Push(Smi::FromInt(language_mode()));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
       ASSERT(var->IsStackAllocated() || var->IsContextSlot());
@@ -1777,12 +1910,14 @@
       __ movq(location, rax);
       if (var->IsContextSlot()) {
         __ movq(rdx, rax);
-        __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+        __ RecordWriteContextSlot(
+            rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
       }
     }
 
-  } else if (var->mode() != Variable::CONST) {
-    // Assignment to var or initializing assignment to let.
+  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+    // Assignment to var or initializing assignment to let/const
+    // in harmony mode.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, rcx);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1795,14 +1930,15 @@
       __ movq(location, rax);
       if (var->IsContextSlot()) {
         __ movq(rdx, rax);
-        __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+        __ RecordWriteContextSlot(
+            rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(rax);  // Value.
       __ push(rsi);  // Context.
       __ Push(var->name());
-      __ Push(Smi::FromInt(strict_mode_flag()));
+      __ Push(Smi::FromInt(language_mode()));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
   }
@@ -1834,9 +1970,9 @@
   } else {
     __ pop(rdx);
   }
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->StoreIC_Initialize_Strict()
-      : isolate()->builtins()->StoreIC_Initialize();
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->StoreIC_Initialize()
+      : isolate()->builtins()->StoreIC_Initialize_Strict();
   __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -1874,9 +2010,9 @@
   }
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize()
+      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
   __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -1981,6 +2117,7 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   CallFunctionStub stub(arg_count, flags);
+  __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -1990,8 +2127,7 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ push(Operand(rsp, arg_count * kPointerSize));
@@ -2002,17 +2138,14 @@
   // Push the receiver of the enclosing function and do runtime call.
   __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
 
-  // Push the strict mode flag. In harmony mode every eval call
-  // is a strict mode eval call.
-  StrictModeFlag strict_mode = strict_mode_flag();
-  if (FLAG_harmony_block_scoping) {
-    strict_mode = kStrictMode;
-  }
-  __ Push(Smi::FromInt(strict_mode));
+  // Push the language mode.
+  __ Push(Smi::FromInt(language_mode()));
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  // Push the start position of the scope the calls resides in.
+  __ Push(Smi::FromInt(scope()->start_position()));
+
+  // Do the runtime call.
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2043,27 +2176,10 @@
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly in
-      // generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(rax);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and resolve
       // eval.
       __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in rax (function) and
       // rdx (receiver). Touch up the stack with the right values.
@@ -2073,6 +2189,7 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2175,14 +2292,29 @@
   __ Set(rax, arg_count);
   __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
 
-  Handle<Code> construct_builtin =
-      isolate()->builtins()->JSConstructCall();
-  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+  // Record call targets in unoptimized code, but not in the snapshot.
+  CallFunctionFlags flags;
+  if (!Serializer::enabled()) {
+    flags = RECORD_CALL_TARGET;
+    Handle<Object> uninitialized =
+        TypeFeedbackCells::UninitializedSentinel(isolate());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+    RecordTypeFeedbackCell(expr->id(), cell);
+    __ Move(rbx, cell);
+  } else {
+    flags = NO_CALL_FUNCTION_FLAGS;
+  }
+
+  CallConstructStub stub(flags);
+  __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2194,7 +2326,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ JumpIfSmi(rax, if_true);
   __ jmp(if_false);
 
@@ -2202,7 +2334,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2214,7 +2347,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
   Split(non_negative_smi, if_true, if_false, fall_through);
 
@@ -2222,7 +2355,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2246,14 +2380,15 @@
   __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ j(below, if_false);
   __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2267,14 +2402,15 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2290,7 +2426,7 @@
   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2298,7 +2434,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2374,12 +2511,13 @@
          Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2393,14 +2531,15 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2414,14 +2553,15 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2435,7 +2575,7 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2443,8 +2583,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2467,14 +2607,15 @@
   __ bind(&check_frame_marker);
   __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
          Smi::FromInt(StackFrame::CONSTRUCT));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2490,14 +2631,15 @@
 
   __ pop(rbx);
   __ cmpq(rax, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in rdx and the formal
@@ -2511,8 +2653,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -2534,7 +2676,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2545,20 +2688,24 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
   // Map is now in rax.
   __ j(below, &null);
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ j(equal, &function);
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-  __ j(above_equal, &function);
+  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ j(equal, &function);
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
 
-  // Check if the constructor in the map is a function.
+  // Check if the constructor in the map is a JS function.
   __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
   __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
   __ j(not_equal, &non_function_constructor);
@@ -2590,7 +2737,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2598,6 +2745,7 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2610,8 +2758,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
@@ -2630,9 +2778,12 @@
   // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
   __ PrepareCallCFunction(1);
 #ifdef _WIN64
-  __ LoadAddress(rcx, ExternalReference::isolate_address());
+  __ movq(rcx, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+  __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+
 #else
-  __ LoadAddress(rdi, ExternalReference::isolate_address());
+  __ movq(rdi, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+  __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
 #endif
   __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
@@ -2652,9 +2803,10 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2664,9 +2816,10 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2677,7 +2830,8 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2695,18 +2849,68 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 2);
+  ASSERT_NE(NULL, args->at(1)->AsLiteral());
+  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label runtime, done;
+  Register object = rax;
+  Register result = rax;
+  Register scratch = rcx;
+
+#ifdef DEBUG
+  __ AbortIfSmi(object);
+  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+  __ Assert(equal, "Trying to get date field from non-date.");
+#endif
+
+  if (index->value() == 0) {
+    __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ movq(scratch, stamp);
+      __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+      __ j(not_equal, &runtime, Label::kNear);
+      __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+                                           kPointerSize * index->value()));
+      __ jmp(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2);
+#ifdef _WIN64
+  __ movq(rcx, object);
+  __ movq(rdx, index, RelocInfo::NONE);
+#else
+  __ movq(rdi, object);
+  __ movq(rsi, index, RelocInfo::NONE);
+#endif
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    __ bind(&done);
+  }
+  context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-  MathPowStub stub;
+  MathPowStub stub(MathPowStub::ON_STACK);
   __ CallStub(&stub);
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
@@ -2726,14 +2930,15 @@
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
   __ movq(rdx, rax);
-  __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
+  __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -2745,7 +2950,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2763,7 +2969,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2771,7 +2978,6 @@
 
   Register object = rbx;
   Register index = rax;
-  Register scratch = rcx;
   Register result = rdx;
 
   __ pop(object);
@@ -2781,7 +2987,6 @@
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
-                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -2810,7 +3015,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2818,8 +3024,7 @@
 
   Register object = rbx;
   Register index = rax;
-  Register scratch1 = rcx;
-  Register scratch2 = rdx;
+  Register scratch = rdx;
   Register result = rax;
 
   __ pop(object);
@@ -2829,8 +3034,7 @@
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch1,
-                                  scratch2,
+                                  scratch,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -2859,7 +3063,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -2871,7 +3076,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -2883,10 +3089,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -2894,10 +3101,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -2905,10 +3113,23 @@
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallStub(&stub);
+  context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -2916,8 +3137,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -2925,7 +3147,8 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -2934,18 +3157,31 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
+  // Check for proxy.
+  Label proxy, done;
+  __ CmpObjectType(rax, JS_FUNCTION_PROXY_TYPE, rbx);
+  __ j(equal, &proxy);
+
   // InvokeFunction requires the function in rdi. Move it in there.
   __ movq(rdi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(rdi, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ jmp(&done);
+
+  __ bind(&proxy);
+  __ push(rax);
+  __ CallRuntime(Runtime::kCall, args->length());
+  __ bind(&done);
+
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2955,7 +3191,8 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3010,14 +3247,33 @@
   __ movq(Operand(index_2, 0), object);
   __ movq(Operand(index_1, 0), temp);
 
-  Label new_space;
-  __ InNewSpace(elements, temp, equal, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   temp,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   not_zero,
+                   &no_remembered_set,
+                   Label::kNear);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask.)
 
-  __ movq(object, elements);
-  __ RecordWriteHelper(object, index_1, temp);
-  __ RecordWriteHelper(elements, index_2, temp);
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index_1,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index_2,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
+
   // We are done. Drop elements from the stack, and return undefined.
   __ addq(rsp, Immediate(3 * kPointerSize));
   __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3031,7 +3287,8 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3087,7 +3344,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = rax;
@@ -3125,7 +3383,8 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3139,7 +3398,7 @@
 
   __ testl(FieldOperand(rax, String::kHashFieldOffset),
            Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ j(zero, if_true);
   __ jmp(if_false);
 
@@ -3147,7 +3406,8 @@
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3163,10 +3423,11 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, return_result, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
@@ -3352,7 +3613,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Get the separator ascii character value.
+  // Get the separator ASCII character value.
   // Register "string" holds the separator.
   __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ Set(index, 0);
@@ -3496,14 +3757,16 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ Push(Smi::FromInt(strict_mode_flag()));
+        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+            ? kNonStrictMode : kStrictMode;
+        __ Push(Smi::FromInt(strict_mode_flag));
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
         context()->Plug(rax);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
           __ push(GlobalObjectOperand());
           __ Push(var->name());
@@ -3545,17 +3808,41 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+        } else {
+          __ PushRoot(Heap::kTrueValueRootIndex);
+        }
+        __ jmp(&done, Label::kNear);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+        } else {
+          __ PushRoot(Heap::kFalseValueRootIndex);
+        }
+        __ bind(&done);
       }
       break;
     }
@@ -3760,9 +4047,9 @@
     case NAMED_PROPERTY: {
       __ Move(rcx, prop->key()->AsLiteral()->handle());
       __ pop(rdx);
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3777,9 +4064,9 @@
     case KEYED_PROPERTY: {
       __ pop(rcx);
       __ pop(rdx);
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3827,20 +4114,25 @@
     context()->Plug(rax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(rax, if_true);
@@ -3875,9 +4167,11 @@
     Split(not_zero, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(rax, if_false);
-    STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
-    Split(above_equal, if_true, if_false, fall_through);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
+    __ j(equal, if_true);
+    __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
+    Split(equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(rax, if_false);
     if (!FLAG_harmony_typeof) {
@@ -3895,18 +4189,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  Split(equal, if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -3914,6 +4197,10 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
   Label materialize_true, materialize_false;
@@ -3923,20 +4210,13 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(rax, Heap::kTrueValueRootIndex);
       Split(equal, if_true, if_false, fall_through);
       break;
@@ -3945,7 +4225,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ testq(rax, rax);
        // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
@@ -3959,33 +4239,25 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cc = equal;
-          __ pop(rdx);
           break;
         case Token::LT:
           cc = less;
-          __ pop(rdx);
           break;
         case Token::GT:
-          // Reverse left and right sizes to obtain ECMA-262 conversion order.
-          cc = less;
-          __ movq(rdx, result_register());
-          __ pop(rax);
+          cc = greater;
          break;
         case Token::LTE:
-          // Reverse left and right sizes to obtain ECMA-262 conversion order.
-          cc = greater_equal;
-          __ movq(rdx, result_register());
-          __ pop(rax);
+          cc = less_equal;
           break;
         case Token::GTE:
           cc = greater_equal;
-          __ pop(rdx);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
+      __ pop(rdx);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4005,7 +4277,7 @@
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
 
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ testq(rax, rax);
       Split(cc, if_true, if_false, fall_through);
     }
@@ -4017,8 +4289,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4026,14 +4299,20 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ CompareRoot(rax, Heap::kNullValueRootIndex);
-  if (expr->is_strict()) {
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Heap::RootListIndex nil_value = nil == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ CompareRoot(rax, nil_value);
+  if (expr->op() == Token::EQ_STRICT) {
     Split(equal, if_true, if_false, fall_through);
   } else {
+    Heap::RootListIndex other_nil_value = nil == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     __ j(equal, if_true);
-    __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+    __ CompareRoot(rax, other_nil_value);
     __ j(equal, if_true);
     __ JumpIfSmi(rax, if_false);
     // It can be an undetectable object.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 9d55594..0632ce4 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -221,7 +221,7 @@
 
   // Update write barrier. Make sure not to clobber the value.
   __ movq(scratch0, value);
-  __ RecordWrite(elements, scratch1, scratch0);
+  __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
 }
 
 
@@ -462,30 +462,58 @@
   __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
   __ shr(rdi, Immediate(String::kHashShift));
   __ xor_(rcx, rdi);
-  __ and_(rcx, Immediate(KeyedLookupCache::kCapacityMask));
+  int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+  __ and_(rcx, Immediate(mask));
 
   // Load the key (consisting of map and symbol) from the cache and
   // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
   ExternalReference cache_keys
       = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-  __ movq(rdi, rcx);
-  __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
-  __ LoadAddress(kScratchRegister, cache_keys);
-  __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    __ movq(rdi, rcx);
+    __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
+    __ LoadAddress(kScratchRegister, cache_keys);
+    int off = kPointerSize * i * 2;
+    __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+    __ j(not_equal, &try_next_entry);
+    __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+    __ j(equal, &hit_on_nth_entry[i]);
+    __ bind(&try_next_entry);
+  }
+
+  int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
+  __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
   __ j(not_equal, &slow);
-  __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
+  __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
   __ j(not_equal, &slow);
 
   // Get field offset, which is a 32-bit integer.
   ExternalReference cache_field_offsets
       = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-  __ LoadAddress(kScratchRegister, cache_field_offsets);
-  __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
-  __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
-  __ subq(rdi, rcx);
-  __ j(above_equal, &property_array_property);
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    if (i != 0) {
+      __ addl(rcx, Immediate(i));
+    }
+    __ LoadAddress(kScratchRegister, cache_field_offsets);
+    __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
+    __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+    __ subq(rdi, rcx);
+    __ j(above_equal, &property_array_property);
+    if (i != 0) {
+      __ jmp(&load_in_object_property);
+    }
+  }
 
   // Load in-object property.
+  __ bind(&load_in_object_property);
   __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
   __ addq(rcx, rdi);
   __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
@@ -531,14 +559,12 @@
 
   Register receiver = rdx;
   Register index = rax;
-  Register scratch1 = rbx;
-  Register scratch2 = rcx;
+  Register scratch = rcx;
   Register result = rax;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch1,
-                                          scratch2,
+                                          scratch,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -606,45 +632,42 @@
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
-  Label slow, slow_with_tagged_index, fast, array, extra;
+  Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
+  Label fast_object_with_map_check, fast_object_without_map_check;
+  Label fast_double_with_map_check, fast_double_without_map_check;
+  Label transition_smi_elements, finish_object_store, non_double_value;
+  Label transition_double_elements;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(rdx, &slow_with_tagged_index);
   // Get the map from the receiver.
-  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+  __ testb(FieldOperand(r9, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow_with_tagged_index);
   // Check that the key is a smi.
   __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
   __ SmiToInteger32(rcx, rcx);
 
-  __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
+  __ CmpInstanceType(r9, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
+  __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
   __ j(below, &slow);
-  __ CmpInstanceType(rbx, JS_PROXY_TYPE);
-  __ j(equal, &slow);
-  __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
-  __ j(equal, &slow);
 
   // Object case: Check key against length in the elements array.
   // rax: value
   // rdx: JSObject
   // rcx: index
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  // Check that the object is in fast mode and writable.
-  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &slow);
+  // Check array bounds.
   __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
   // rax: value
   // rbx: FixedArray
   // rcx: index
-  __ j(above, &fast);
+  __ j(above, &fast_object_with_map_check);
 
   // Slow case: call runtime.
   __ bind(&slow);
@@ -666,9 +689,20 @@
   __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
   __ j(below_equal, &slow);
   // Increment index to get new length.
+  __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &check_extra_double);
   __ leal(rdi, Operand(rcx, 1));
   __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
-  __ jmp(&fast);
+  __ jmp(&fast_object_without_map_check);
+
+  __ bind(&check_extra_double);
+  // rdi: elements array's map
+  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  __ leal(rdi, Operand(rcx, 1));
+  __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
@@ -678,9 +712,6 @@
   // rdx: receiver (a JSArray)
   // rcx: index
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &slow);
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
@@ -688,30 +719,100 @@
   __ j(below_equal, &extra);
 
   // Fast case: Do the store.
-  __ bind(&fast);
+  __ bind(&fast_object_with_map_check);
   // rax: value
   // rbx: receiver's elements array (a FixedArray)
   // rcx: index
+  // rdx: receiver (a JSArray)
+  __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &fast_double_with_map_check);
+  __ bind(&fast_object_without_map_check);
+  // Smi stores don't require further checks.
   Label non_smi_value;
+  __ JumpIfNotSmi(rax, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
   __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
           rax);
-  __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
   __ ret(0);
+
   __ bind(&non_smi_value);
-  // Slow case that needs to retain rcx for use by RecordWrite.
-  // Update write barrier for the elements array address.
-  __ movq(rdx, rax);
-  __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
+  // Writing a non-smi, check whether array allows non-smi elements.
+  // r9: receiver's map
+  __ CheckFastObjectElements(r9, &transition_smi_elements);
+  __ bind(&finish_object_store);
+  __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+          rax);
+  __ movq(rdx, rax);  // Preserve the value which is returned.
+  __ RecordWriteArray(
+      rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
   __ ret(0);
+
+  __ bind(&fast_double_with_map_check);
+  // Check for fast double array case. If this fails, call through to the
+  // runtime.
+  // rdi: elements array's map
+  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  __ bind(&fast_double_without_map_check);
+  // If the value is a number, store it as a double in the FastDoubleElements
+  // array.
+  __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
+                                 &transition_double_elements);
+  __ ret(0);
+
+  __ bind(&transition_smi_elements);
+  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+
+  // Transition the array appropriately depending on the value type.
+  __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
+  __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &non_double_value);
+
+  // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_DOUBLE_ELEMENTS,
+                                         rbx,
+                                         rdi,
+                                         &slow);
+  ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ jmp(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         rbx,
+                                         rdi,
+                                         &slow);
+  ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+                                         FAST_ELEMENTS,
+                                         rbx,
+                                         rdi,
+                                         &slow);
+  ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
 }
 
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                          int argc,
-                                          Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                               int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rdx                      : receiver
@@ -721,7 +822,7 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_ic_state,
+                                         extra_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
@@ -794,7 +895,7 @@
 
 
 // The generated code falls through if the call should be handled by runtime.
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   // rcx                    : function name
   // rsp[0]                 : return address
@@ -821,10 +922,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm,
-                             int argc,
-                             IC::UtilityId id,
-                             Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+                              int argc,
+                              IC::UtilityId id,
+                              Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rsp[0]                   : return address
@@ -846,21 +947,22 @@
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ push(rdx);
-  __ push(rcx);
+    // Push the receiver and the name of the function.
+    __ push(rdx);
+    __ push(rcx);
 
-  // Call the entry.
-  CEntryStub stub(1);
-  __ Set(rax, 2);
-  __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
-  __ CallStub(&stub);
+    // Call the entry.
+    CEntryStub stub(1);
+    __ Set(rax, 2);
+    __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
+    __ CallStub(&stub);
 
-  // Move result to rdi and exit the internal frame.
-  __ movq(rdi, rax);
-  __ LeaveInternalFrame();
+    // Move result to rdi and exit the internal frame.
+    __ movq(rdi, rax);
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -881,7 +983,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -913,39 +1015,6 @@
 }
 
 
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  // rcx                      : function name
-  // rsp[0]                   : return address
-  // rsp[8]                   : argument argc
-  // rsp[16]                  : argument argc - 1
-  // ...
-  // rsp[argc * 8]            : argument 1
-  // rsp[(argc + 1) * 8]      : argument 0 = receiver
-  // -----------------------------------
-
-  GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm,
-                          int argc,
-                          Code::ExtraICState extra_ic_state) {
-  // ----------- S t a t e -------------
-  // rcx                      : function name
-  // rsp[0]                   : return address
-  // rsp[8]                   : argument argc
-  // rsp[16]                  : argument argc - 1
-  // ...
-  // rsp[argc * 8]            : argument 1
-  // rsp[(argc + 1) * 8]      : argument 0 = receiver
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   // rcx                      : function name
@@ -1002,13 +1071,14 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-  __ EnterInternalFrame();
-  __ push(rcx);  // save the key
-  __ push(rdx);  // pass the receiver
-  __ push(rcx);  // pass the key
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(rcx);  // restore the key
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rcx);  // save the key
+    __ push(rdx);  // pass the receiver
+    __ push(rcx);  // pass the key
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(rcx);  // restore the key
+  }
   __ movq(rdi, rax);
   __ jmp(&do_call);
 
@@ -1072,27 +1142,12 @@
   __ JumpIfSmi(rcx, &miss);
   Condition cond = masm->IsObjectStringType(rcx, rax, rax);
   __ j(NegateCondition(cond), &miss);
-  GenerateCallNormal(masm, argc);
+  CallICBase::GenerateNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
 
 
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  // rcx                      : function name
-  // rsp[0]                   : return address
-  // rsp[8]                   : argument argc
-  // rsp[16]                  : argument argc - 1
-  // ...
-  // rsp[argc * 8]            : argument 1
-  // rsp[(argc + 1) * 8]      : argument 0 = receiver
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
 static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
                                              Register object,
                                              Register key,
@@ -1212,7 +1267,12 @@
   __ movq(mapped_location, rax);
   __ lea(r9, mapped_location);
   __ movq(r8, rax);
-  __ RecordWrite(rbx, r9, r8);
+  __ RecordWrite(rbx,
+                 r9,
+                 r8,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 INLINE_SMI_CHECK);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in rbx.
@@ -1221,7 +1281,12 @@
   __ movq(unmapped_location, rax);
   __ lea(r9, unmapped_location);
   __ movq(r8, rax);
-  __ RecordWrite(rbx, r9, r8);
+  __ RecordWrite(rbx,
+                 r9,
+                 r8,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 INLINE_SMI_CHECK);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -1408,11 +1473,10 @@
   //  -- rsp[0] : return address
   // -----------------------------------
   //
-  // This accepts as a receiver anything JSObject::SetElementsLength accepts
-  // (currently anything except for external and pixel arrays which means
-  // anything with elements of FixedArray type.), but currently is restricted
-  // to JSArray.
-  // Value must be a number, but only smis are accepted as the most common case.
+  // This accepts as a receiver anything JSArray::SetElementsLength accepts
+  // (currently anything except for external arrays which means anything with
+  // elements of FixedArray type).  Value must be a number, but only smis are
+  // accepted as the most common case.
 
   Label miss;
 
@@ -1434,6 +1498,13 @@
   __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
   __ j(not_equal, &miss);
 
+  // Check that the array has fast properties, otherwise the length
+  // property might have been redefined.
+  __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+  __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
+                 Heap::kHashTableMapRootIndex);
+  __ j(equal, &miss);
+
   // Check that value is a smi.
   __ JumpIfNotSmi(value, &miss);
 
@@ -1562,6 +1633,51 @@
 }
 
 
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rbx     : target map
+  //  -- rdx     : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  // Must return the modified receiver in eax.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+    __ movq(rax, rdx);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ pop(rbx);
+  __ push(rdx);
+  __ push(rbx);  // return address
+  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rbx     : target map
+  //  -- rdx     : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  // Must return the modified receiver in eax.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+    __ movq(rax, rdx);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ pop(rbx);
+  __ push(rdx);
+  __ push(rbx);  // return address
+  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
 #undef __
 
 
@@ -1573,11 +1689,9 @@
     case Token::LT:
       return less;
     case Token::GT:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return less;
+      return greater;
     case Token::LTE:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return greater_equal;
+      return less_equal;
     case Token::GTE:
       return greater_equal;
     default:
@@ -1609,6 +1723,9 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
+    if (state == KNOWN_OBJECTS) {
+      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index b82dc54..2ba2c57 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -67,9 +67,15 @@
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
-  HPhase phase("Code generation", chunk());
+  HPhase phase("Z_Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -133,7 +139,7 @@
   // when called as functions (without an explicit receiver
   // object). rcx is zero for method calls and non-zero for function
   // calls.
-  if (info_->is_strict_mode() || info_->is_native()) {
+  if (!info_->is_classic_mode() || info_->is_native()) {
     Label ok;
     __ testq(rcx, rcx);
     __ j(zero, &ok, Label::kNear);
@@ -205,11 +211,8 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ movq(Operand(rsi, context_offset), rax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have use a third register to avoid
-        // clobbering rsi.
-        __ movq(rcx, rsi);
-        __ RecordWrite(rcx, context_offset, rax, rbx);
+        // Update the write barrier. This clobbers rax and rbx.
+        __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
       }
     }
     Comment(";;; End allocate local context");
@@ -260,6 +263,9 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -322,6 +328,12 @@
 }
 
 
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  return value->Number();
+}
+
+
 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   Handle<Object> literal = chunk_->LookupLiteral(op);
   ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
@@ -356,7 +368,19 @@
 
   WriteTranslation(environment->outer(), translation);
   int closure_id = DefineDeoptimizationLiteral(environment->closure());
-  translation->BeginFrame(environment->ast_id(), closure_id, height);
+  switch (environment->frame_type()) {
+    case JS_FUNCTION:
+      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+      break;
+    case JS_CONSTRUCT:
+      translation->BeginConstructStubFrame(closure_id, translation_size);
+      break;
+    case ARGUMENTS_ADAPTOR:
+      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+      break;
+    default:
+      UNREACHABLE();
+  }
   for (int i = 0; i < translation_size; ++i) {
     LOperand* value = environment->values()->at(i);
     // spilled_registers_ and spilled_double_registers_ are either
@@ -492,10 +516,14 @@
     // |>------------  translation_size ------------<|
 
     int frame_count = 0;
+    int jsframe_count = 0;
     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
       ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
     }
-    Translation translation(&translations_, frame_count);
+    Translation translation(&translations_, frame_count, jsframe_count);
     WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
     int pc_offset = masm()->pc_offset();
@@ -512,7 +540,6 @@
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
-  ASSERT(entry != NULL);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -535,7 +562,6 @@
 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
-  ASSERT(FLAG_deopt);
   Handle<DeoptimizationInputData> data =
       factory()->NewDeoptimizationInputData(length, TENURED);
 
@@ -611,7 +637,7 @@
     Safepoint::DeoptMode deopt_mode) {
   ASSERT(kind == expected_safepoint_kind_);
 
-  const ZoneList<LOperand*>* operands = pointers->operands();
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
 
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deopt_mode);
@@ -973,11 +999,11 @@
         DeoptimizeIf(no_condition, instr->environment());
       }
     } else if (right->IsStackSlot()) {
-      __ or_(kScratchRegister, ToOperand(right));
+      __ orl(kScratchRegister, ToOperand(right));
       DeoptimizeIf(sign, instr->environment());
     } else {
       // Test the non-zero operand for negative sign.
-      __ or_(kScratchRegister, ToRegister(right));
+      __ orl(kScratchRegister, ToRegister(right));
       DeoptimizeIf(sign, instr->environment());
     }
     __ bind(&done);
@@ -1142,8 +1168,13 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  ASSERT(instr->result()->IsRegister());
-  __ Move(ToRegister(instr->result()), instr->value());
+  Handle<Object> value = instr->value();
+  if (value->IsSmi()) {
+    __ Move(ToRegister(instr->result()), value);
+  } else {
+    __ LoadHeapObject(ToRegister(instr->result()),
+                      Handle<HeapObject>::cast(value));
+  }
 }
 
 
@@ -1192,6 +1223,49 @@
 }
 
 
+void LCodeGen::DoDateField(LDateField* instr) {
+  Register object = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Smi* index = instr->index();
+  Label runtime, done;
+  ASSERT(object.is(result));
+  ASSERT(object.is(rax));
+
+#ifdef DEBUG
+  __ AbortIfSmi(object);
+  __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
+  __ Assert(equal, "Trying to get date field from non-date.");
+#endif
+
+  if (index->value() == 0) {
+    __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ movq(kScratchRegister, stamp);
+      __ cmpq(kScratchRegister, FieldOperand(object,
+                                             JSDate::kCacheStampOffset));
+      __ j(not_equal, &runtime, Label::kNear);
+      __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+                                           kPointerSize * index->value()));
+      __ jmp(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2);
+#ifdef _WIN64
+  __ movq(rcx, object);
+  __ movq(rdx, index, RelocInfo::NONE);
+#else
+  __ movq(rdi, object);
+  __ movq(rsi, index, RelocInfo::NONE);
+#endif
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    __ bind(&done);
+  }
+}
+
+
 void LCodeGen::DoBitNotI(LBitNotI* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->Equals(instr->result()));
@@ -1457,39 +1531,51 @@
 }
 
 
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
-  if (right->IsConstantOperand()) {
-    int32_t value = ToInteger32(LConstantOperand::cast(right));
-    if (left->IsRegister()) {
-      __ cmpl(ToRegister(left), Immediate(value));
-    } else {
-      __ cmpl(ToOperand(left), Immediate(value));
-    }
-  } else if (right->IsRegister()) {
-    __ cmpl(ToRegister(left), ToRegister(right));
-  } else {
-    __ cmpl(ToRegister(left), ToOperand(right));
-  }
-}
-
-
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-
-  if (instr->is_double()) {
-    // Don't base result on EFLAGS when a NaN is involved. Instead
-    // jump to the false block.
-    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
-  } else {
-    EmitCmpI(left, right);
-  }
-
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
-  EmitBranch(true_block, false_block, cc);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block =
+      EvalComparison(instr->op(), left_val, right_val) ? true_block
+                                                       : false_block;
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Don't base result on EFLAGS when a NaN is involved. Instead
+      // jump to the false block.
+      __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+      __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+    } else {
+      int32_t value;
+      if (right->IsConstantOperand()) {
+        value = ToInteger32(LConstantOperand::cast(right));
+        __ cmpl(ToRegister(left), Immediate(value));
+      } else if (left->IsConstantOperand()) {
+        value = ToInteger32(LConstantOperand::cast(left));
+        if (right->IsRegister()) {
+          __ cmpl(ToRegister(right), Immediate(value));
+        } else {
+          __ cmpl(ToOperand(right), Immediate(value));
+        }
+        // We transposed the operands. Reverse the condition.
+        cc = ReverseCondition(cc);
+      } else {
+        if (right->IsRegister()) {
+          __ cmpl(ToRegister(left), ToRegister(right));
+        } else {
+          __ cmpl(ToRegister(left), ToOperand(right));
+        }
+      }
+    }
+    EmitBranch(true_block, false_block, cc);
+  }
 }
 
 
@@ -1514,30 +1600,33 @@
 }
 
 
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
-
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
   if (instr->hydrogen()->representation().IsSpecialization() ||
       instr->hydrogen()->type().IsSmi()) {
-    // If the expression is known to untagged or smi, then it's definitely
-    // not null, and it can't be a an undetectable object.
-    // Jump directly to the false block.
     EmitGoto(false_block);
     return;
   }
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-
-  __ CompareRoot(reg, Heap::kNullValueRootIndex);
-  if (instr->is_strict()) {
+  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ CompareRoot(reg, nil_value);
+  if (instr->kind() == kStrictEquality) {
     EmitBranch(true_block, false_block, equal);
   } else {
+    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ j(equal, true_label);
-    __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+    __ CompareRoot(reg, other_nil_value);
     __ j(equal, true_label);
     __ JumpIfSmi(reg, false_label);
     // Check for undetectable objects by looking in the bit field in
@@ -1590,6 +1679,30 @@
 }
 
 
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+  Condition cond =  masm_->IsObjectStringType(input, temp1, temp1);
+
+  return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond = EmitIsString(reg, temp, false_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1621,6 +1734,21 @@
 }
 
 
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = TokenToCondition(op, false);
+  __ testq(rax, rax);
+
+  EmitBranch(true_block, false_block, condition);
+}
+
+
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1684,35 +1812,48 @@
 
 
 // Branches to a label or falls through with the answer in the z flag.
-// Trashes the temp register and possibly input (if it and temp are aliased).
+// Trashes the temp register.
 void LCodeGen::EmitClassOfTest(Label* is_true,
                                Label* is_false,
                                Handle<String> class_name,
                                Register input,
-                               Register temp) {
-  __ JumpIfSmi(input, is_false);
-  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
-  __ j(below, is_false);
+                               Register temp,
+                               Register temp2) {
+  ASSERT(!input.is(temp));
+  ASSERT(!input.is(temp2));
+  ASSERT(!temp.is(temp2));
 
-  // Map is now in temp.
-  // Functions have class 'Function'.
-  __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+  __ JumpIfSmi(input, is_false);
+
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ j(above_equal, is_true);
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+    __ j(below, is_false);
+    __ j(equal, is_true);
+    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+    __ j(equal, is_true);
   } else {
-    __ j(above_equal, is_false);
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+    __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
+    __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                             FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ j(above, is_false);
   }
 
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1741,6 +1882,7 @@
 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
+  Register temp2 = ToRegister(instr->TempAt(1));
   Handle<String> class_name = instr->hydrogen()->class_name();
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1749,7 +1891,7 @@
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  EmitClassOfTest(true_label, false_label, class_name, input, temp);
+  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
 
   EmitBranch(true_block, false_block, equal);
 }
@@ -1790,9 +1932,8 @@
     virtual void Generate() {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-
+    virtual LInstruction* instr() { return instr_; }
     Label* map_check() { return &map_check_; }
-
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -1816,9 +1957,10 @@
   Register map = ToRegister(instr->TempAt(0));
   __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
   __ bind(deferred->map_check());  // Label for calculating code patching.
-  __ movq(kScratchRegister, factory()->the_hole_value(),
-          RelocInfo::EMBEDDED_OBJECT);
-  __ cmpq(map, kScratchRegister);  // Patched to cached map.
+  Handle<JSGlobalPropertyCell> cache_cell =
+      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+  __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+  __ cmpq(map, Operand(kScratchRegister, 0));
   __ j(not_equal, &cache_miss, Label::kNear);
   // Patched to load either true or false.
   __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
@@ -1856,7 +1998,7 @@
     InstanceofStub stub(flags);
 
     __ push(ToRegister(instr->InputAt(0)));
-    __ Push(instr->function());
+    __ PushHeapObject(instr->function());
 
     static const int kAdditionalDelta = 10;
     int delta =
@@ -1900,9 +2042,6 @@
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = TokenToCondition(op, false);
-  if (op == Token::GT || op == Token::LTE) {
-    condition = ReverseCondition(condition);
-  }
   Label true_value, done;
   __ testq(rax, rax);
   __ j(condition, &true_value, Label::kNear);
@@ -1929,14 +2068,8 @@
 
 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
-  if (result.is(rax)) {
-    __ load_rax(instr->hydrogen()->cell().location(),
-                RelocInfo::GLOBAL_PROPERTY_CELL);
-  } else {
-    __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
-    __ movq(result, Operand(result, 0));
-  }
-  if (instr->hydrogen()->check_hole_value()) {
+  __ LoadGlobalCell(result, instr->hydrogen()->cell());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
     DeoptimizeIf(equal, instr->environment());
   }
@@ -1956,25 +2089,28 @@
 
 
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
-  Register value = ToRegister(instr->InputAt(0));
-  Register temp = ToRegister(instr->TempAt(0));
-  ASSERT(!value.is(temp));
-  bool check_hole = instr->hydrogen()->check_hole_value();
-  if (!check_hole && value.is(rax)) {
-    __ store_rax(instr->hydrogen()->cell().location(),
-                 RelocInfo::GLOBAL_PROPERTY_CELL);
-    return;
-  }
+  Register value = ToRegister(instr->value());
+  Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
+
   // If the cell we are storing to contains the hole it could have
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted. We deoptimize in that case.
-  __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
-  if (check_hole) {
-    __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    // We have a temp because CompareRoot might clobber kScratchRegister.
+    Register cell = ToRegister(instr->TempAt(0));
+    ASSERT(!value.is(cell));
+    __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+    __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
     DeoptimizeIf(equal, instr->environment());
+    // Store the value.
+    __ movq(Operand(cell, 0), value);
+  } else {
+    // Store the value.
+    __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+    __ movq(Operand(kScratchRegister, 0), value);
   }
-  __ movq(Operand(temp, 0), value);
+  // Cells are always rescanned, so no write barrier here.
 }
 
 
@@ -1983,7 +2119,7 @@
   ASSERT(ToRegister(instr->value()).is(rax));
 
   __ Move(rcx, instr->name());
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -1994,18 +2130,53 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ movq(result, ContextOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(equal, instr->environment());
+    } else {
+      Label is_not_hole;
+      __ j(not_equal, &is_not_hole, Label::kNear);
+      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+      __ bind(&is_not_hole);
+    }
+  }
 }
 
 
 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
-  __ movq(ContextOperand(context, instr->slot_index()), value);
-  if (instr->needs_write_barrier()) {
+
+  Operand target = ContextOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(equal, instr->environment());
+    } else {
+      __ j(not_equal, &skip_assignment);
+    }
+  }
+  __ movq(target, value);
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     int offset = Context::SlotOffset(instr->slot_index());
     Register scratch = ToRegister(instr->TempAt(0));
-    __ RecordWrite(context, offset, value, scratch);
+    __ RecordWriteContextSlot(context,
+                              offset,
+                              value,
+                              scratch,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
   }
+
+  __ bind(&skip_assignment);
 }
 
 
@@ -2025,9 +2196,9 @@
                                                Register object,
                                                Handle<Map> type,
                                                Handle<String> name) {
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsProperty() &&
+  ASSERT(lookup.IsFound() &&
          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
   if (lookup.type() == FIELD) {
     int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2043,7 +2214,7 @@
     }
   } else {
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    LoadHeapObject(result, Handle<HeapObject>::cast(function));
+    __ LoadHeapObject(result, function);
   }
 }
 
@@ -2223,17 +2394,15 @@
     LLoadKeyedFastDoubleElement* instr) {
   XMMRegister result(ToDoubleRegister(instr->result()));
 
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
-        sizeof(kHoleNanLower32);
-    Operand hole_check_operand = BuildFastArrayOperand(
-        instr->elements(),
-        instr->key(),
-        FAST_DOUBLE_ELEMENTS,
-        offset);
-    __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr->environment());
-  }
+  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+      sizeof(kHoleNanLower32);
+  Operand hole_check_operand = BuildFastArrayOperand(
+      instr->elements(),
+      instr->key(),
+      FAST_DOUBLE_ELEMENTS,
+      offset);
+  __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+  DeoptimizeIf(equal, instr->environment());
 
   Operand double_load_operand = BuildFastArrayOperand(
       instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2305,6 +2474,7 @@
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -2373,14 +2543,9 @@
 }
 
 
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   Register receiver = ToRegister(instr->receiver());
   Register function = ToRegister(instr->function());
-  Register length = ToRegister(instr->length());
-  Register elements = ToRegister(instr->elements());
-  ASSERT(receiver.is(rax));  // Used for parameter count.
-  ASSERT(function.is(rdi));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(rax));
 
   // If the receiver is null or undefined, we have to pass the global
   // object as a receiver to normal functions. Values have to be
@@ -2423,6 +2588,17 @@
   __ movq(receiver,
           FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  ASSERT(receiver.is(rax));  // Used for parameter count.
+  ASSERT(function.is(rdi));  // Required by InvokeFunction.
+  ASSERT(ToRegister(instr->result()).is(rax));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
@@ -2451,7 +2627,7 @@
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
       this, pointers, Safepoint::kLazyDeopt);
-  v8::internal::ParameterCount actual(rax);
+  ParameterCount actual(rax);
   __ InvokeFunction(function, actual, CALL_FUNCTION,
                     safepoint_generator, CALL_AS_METHOD);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2466,7 +2642,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadHeapObject(result, instr->hydrogen()->closure());
 }
 
 
@@ -2484,6 +2660,14 @@
 }
 
 
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  __ push(rsi);  // The context is the first argument.
+  __ PushHeapObject(instr->hydrogen()->pairs());
+  __ Push(Smi::FromInt(instr->hydrogen()->flags()));
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   Register result = ToRegister(instr->result());
   __ movq(result, GlobalObjectOperand());
@@ -2501,34 +2685,47 @@
                                  int arity,
                                  LInstruction* instr,
                                  CallKind call_kind) {
-  // Change context if needed.
-  bool change_context =
-      (info()->closure()->context() != function->context()) ||
-      scope()->contains_with() ||
-      (scope()->num_heap_slots() > 0);
-  if (change_context) {
-    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-  }
-
-  // Set rax to arguments count if adaption is not needed. Assumes that rax
-  // is available to write to at this point.
-  if (!function->NeedsArgumentsAdaption()) {
-    __ Set(rax, arity);
-  }
+  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+      function->shared()->formal_parameter_count() == arity;
 
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  // Invoke function.
-  __ SetCallKind(rcx, call_kind);
-  if (*function == *info()->closure()) {
-    __ CallSelf();
-  } else {
-    __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-  }
+  if (can_invoke_directly) {
+    __ LoadHeapObject(rdi, function);
 
-  // Setup deoptimization.
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+    // Change context if needed.
+    bool change_context =
+        (info()->closure()->context() != function->context()) ||
+        scope()->contains_with() ||
+        (scope()->num_heap_slots() > 0);
+    if (change_context) {
+      __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+    }
+
+    // Set rax to arguments count if adaption is not needed. Assumes that rax
+    // is available to write to at this point.
+    if (!function->NeedsArgumentsAdaption()) {
+      __ Set(rax, arity);
+    }
+
+    // Invoke function.
+    __ SetCallKind(rcx, call_kind);
+    if (*function == *info()->closure()) {
+      __ CallSelf();
+    } else {
+      __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+    }
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+  } else {
+    // We need to adapt arguments.
+    SafepointGenerator generator(
+        this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+  }
 
   // Restore context.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2537,7 +2734,6 @@
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
-  __ Move(rdi, instr->function());
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -2618,6 +2814,7 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -2751,65 +2948,158 @@
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done, sqrt;
+  // Check base for -Infinity.  According to IEEE-754, double-precision
+  // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
+  __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
+  __ movq(xmm_scratch, kScratchRegister);
+  __ ucomisd(xmm_scratch, input_reg);
+  // Comparing -Infinity with NaN results in "unordered", which sets the
+  // zero flag as if both were equal.  However, it also sets the carry flag.
+  __ j(not_equal, &sqrt, Label::kNear);
+  __ j(carry, &sqrt, Label::kNear);
+  // If input is -Infinity, return Infinity.
+  __ xorps(input_reg, input_reg);
+  __ subsd(input_reg, xmm_scratch);
+  __ jmp(&done, Label::kNear);
+
+  // Square root.
+  __ bind(&sqrt);
   __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
-  LOperand* left = instr->InputAt(0);
-  XMMRegister left_reg = ToDoubleRegister(left);
-  ASSERT(!left_reg.is(xmm1));
-  LOperand* right = instr->InputAt(1);
-  XMMRegister result_reg = ToDoubleRegister(instr->result());
   Representation exponent_type = instr->hydrogen()->right()->representation();
-  if (exponent_type.IsDouble()) {
-    __ PrepareCallCFunction(2);
-    // Move arguments to correct registers
-    __ movaps(xmm0, left_reg);
-    ASSERT(ToDoubleRegister(right).is(xmm1));
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 2);
-  } else if (exponent_type.IsInteger32()) {
-    __ PrepareCallCFunction(2);
-    // Move arguments to correct registers: xmm0 and edi (not rdi).
-    // On Windows, the registers are xmm0 and edx.
-    __ movaps(xmm0, left_reg);
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+
+  // Choose register conforming to calling convention (when bailing out).
 #ifdef _WIN64
-    ASSERT(ToRegister(right).is(rdx));
+  Register exponent = rdx;
 #else
-    ASSERT(ToRegister(right).is(rdi));
+  Register exponent = rdi;
 #endif
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(isolate()), 2);
-  } else {
-    ASSERT(exponent_type.IsTagged());
-    Register right_reg = ToRegister(right);
+  ASSERT(!instr->InputAt(1)->IsRegister() ||
+         ToRegister(instr->InputAt(1)).is(exponent));
+  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+         ToDoubleRegister(instr->InputAt(1)).is(xmm1));
+  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
 
-    Label non_smi, call;
-    __ JumpIfNotSmi(right_reg, &non_smi);
-    __ SmiToInteger32(right_reg, right_reg);
-    __ cvtlsi2sd(xmm1, right_reg);
-    __ jmp(&call);
-
-    __ bind(&non_smi);
-    __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
+  if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(exponent, &no_deopt);
+    __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
     DeoptimizeIf(not_equal, instr->environment());
-    __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
-    __ bind(&call);
-    __ PrepareCallCFunction(2);
-    // Move arguments to correct registers xmm0 and xmm1.
-    __ movaps(xmm0, left_reg);
-    // Right argument is already in xmm1.
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 2);
+    __ bind(&no_deopt);
+    MathPowStub stub(MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    ASSERT(exponent_type.IsDouble());
+    MathPowStub stub(MathPowStub::DOUBLE);
+    __ CallStub(&stub);
   }
-  // Return value is in xmm0.
-  __ movaps(result_reg, xmm0);
-  // Restore context register.
+}
+
+
+void LCodeGen::DoRandom(LRandom* instr) {
+  class DeferredDoRandom: public LDeferredCode {
+   public:
+    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LRandom* instr_;
+  };
+
+  DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+
+  // Having marked this instruction as a call we can use any
+  // registers.
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+
+  // Choose the right register for the first argument depending on
+  // calling convention.
+#ifdef _WIN64
+  ASSERT(ToRegister(instr->InputAt(0)).is(rcx));
+  Register global_object = rcx;
+#else
+  ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
+  Register global_object = rdi;
+#endif
+
+  static const int kSeedSize = sizeof(uint32_t);
+  STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
+
+  __ movq(global_object,
+          FieldOperand(global_object, GlobalObject::kGlobalContextOffset));
+  static const int kRandomSeedOffset =
+      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
+  __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
+  // rbx: FixedArray of the global context's random seeds
+
+  // Load state[0].
+  __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
+  // If state[0] == 0, call runtime to initialize seeds.
+  __ testl(rax, rax);
+  __ j(zero, deferred->entry());
+  // Load state[1].
+  __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
+
+  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
+  // Only operate on the lower 32 bit of rax.
+  __ movl(rdx, rax);
+  __ andl(rdx, Immediate(0xFFFF));
+  __ imull(rdx, rdx, Immediate(18273));
+  __ shrl(rax, Immediate(16));
+  __ addl(rax, rdx);
+  // Save state[0].
+  __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
+
+  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
+  __ movl(rdx, rcx);
+  __ andl(rdx, Immediate(0xFFFF));
+  __ imull(rdx, rdx, Immediate(36969));
+  __ shrl(rcx, Immediate(16));
+  __ addl(rcx, rdx);
+  // Save state[1].
+  __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
+
+  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
+  __ shll(rax, Immediate(14));
+  __ andl(rcx, Immediate(0x3FFFF));
+  __ addl(rax, rcx);
+
+  __ bind(deferred->exit());
+  // Convert 32 random bits in rax to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  __ movl(rcx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
+  __ movd(xmm2, rcx);
+  __ movd(xmm1, rax);
+  __ cvtss2sd(xmm2, xmm2);
+  __ xorps(xmm1, xmm2);
+  __ subsd(xmm1, xmm2);
+}
+
+
+void LCodeGen::DoDeferredRandom(LRandom* instr) {
+  __ PrepareCallCFunction(1);
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  // Return value is in rax.
 }
 
 
@@ -2821,6 +3111,14 @@
 }
 
 
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -2860,6 +3158,9 @@
     case kMathSin:
       DoMathSin(instr);
       break;
+    case kMathTan:
+      DoMathTan(instr);
+      break;
     case kMathLog:
       DoMathLog(instr);
       break;
@@ -2909,13 +3210,13 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(rdi));
   ASSERT(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  __ Drop(1);
 }
 
 
@@ -2933,7 +3234,6 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
-  __ Move(rdi, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
@@ -2942,9 +3242,9 @@
   ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
   ASSERT(ToRegister(instr->result()).is(rax));
 
-  Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   __ Set(rax, instr->arity());
-  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
 }
 
 
@@ -2963,21 +3263,36 @@
   }
 
   // Do the store.
+  HType type = instr->hydrogen()->value()->type();
+  SmiCheck check_needed =
+      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (instr->is_in_object()) {
     __ movq(FieldOperand(object, offset), value);
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       Register temp = ToRegister(instr->TempAt(0));
       // Update the write barrier for the object for in-object properties.
-      __ RecordWrite(object, offset, value, temp);
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          temp,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   } else {
     Register temp = ToRegister(instr->TempAt(0));
     __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
     __ movq(FieldOperand(temp, offset), value);
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWrite(temp, offset, value, object);
+      __ RecordWriteField(temp,
+                          offset,
+                          value,
+                          object,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   }
 }
@@ -2988,7 +3303,7 @@
   ASSERT(ToRegister(instr->value()).is(rax));
 
   __ Move(rcx, instr->hydrogen()->name());
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3025,6 +3340,7 @@
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3036,17 +3352,25 @@
 
 
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  if (instr->index()->IsConstantOperand()) {
-    if (instr->length()->IsRegister()) {
-      __ cmpq(ToRegister(instr->length()),
+  if (instr->length()->IsRegister()) {
+    Register reg = ToRegister(instr->length());
+    if (FLAG_debug_code) {
+      __ AbortIfNotZeroExtended(reg);
+    }
+    if (instr->index()->IsConstantOperand()) {
+      __ cmpq(reg,
               Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
     } else {
-      __ cmpq(ToOperand(instr->length()),
-              Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
+      Register reg2 = ToRegister(instr->index());
+      if (FLAG_debug_code) {
+        __ AbortIfNotZeroExtended(reg2);
+      }
+      __ cmpq(reg, reg2);
     }
   } else {
-    if (instr->length()->IsRegister()) {
-      __ cmpq(ToRegister(instr->length()), ToRegister(instr->index()));
+    if (instr->index()->IsConstantOperand()) {
+      __ cmpq(ToOperand(instr->length()),
+              Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
     } else {
       __ cmpq(ToOperand(instr->length()), ToRegister(instr->index()));
     }
@@ -3076,12 +3400,20 @@
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ lea(key, FieldOperand(elements,
                              key,
                              times_pointer_size,
                              FixedArray::kHeaderSize));
-    __ RecordWrite(elements, key, value);
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed);
   }
 }
 
@@ -3110,13 +3442,54 @@
   ASSERT(ToRegister(instr->key()).is(rcx));
   ASSERT(ToRegister(instr->value()).is(rax));
 
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register new_map_reg = ToRegister(instr->new_map_reg());
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = from_map->elements_kind();
+  ElementsKind to_kind = to_map->elements_kind();
+
+  Label not_applicable;
+  __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+  __ j(not_equal, &not_applicable);
+  __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+    // Write barrier.
+    ASSERT_NE(instr->temp_reg(), NULL);
+    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+                        ToRegister(instr->temp_reg()), kDontSaveFPRegs);
+  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(rdx));
+    ASSERT(new_map_reg.is(rbx));
+    __ movq(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+             RelocInfo::CODE_TARGET, instr);
+  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(rdx));
+    ASSERT(new_map_reg.is(rbx));
+    __ movq(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+             RelocInfo::CODE_TARGET, instr);
+  } else {
+    UNREACHABLE();
+  }
+  __ bind(&not_applicable);
+}
+
+
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   EmitPushTaggedOperand(instr->left());
   EmitPushTaggedOperand(instr->right());
@@ -3131,85 +3504,19 @@
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
 
-  Register string = ToRegister(instr->string());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
-
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  // Fetch the instance type of the receiver into result register.
-  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ testb(result, Immediate(kIsIndirectStringMask));
-  __ j(zero, &check_sequential, Label::kNear);
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ testb(result, Immediate(kSlicedNotConsMask));
-  __ j(zero, &cons_string, Label::kNear);
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
-  __ addq(index, result);
-  __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded, Label::kNear);
-
-  // Handle conses.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
-                 Heap::kEmptyStringRootIndex);
-  __ j(not_equal, deferred->entry());
-  __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // Check whether the string is sequential. The only non-sequential
-  // shapes we support have just been unwrapped above.
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(result, Immediate(kStringRepresentationMask));
-  __ j(not_zero, deferred->entry());
-
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii_string;
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ testb(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string, Label::kNear);
-
-  // Two-byte string.
-  // Load the two-byte character code into the result register.
-  Label done;
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ movzxwl(result, FieldOperand(string,
-                                  index,
-                                  times_2,
-                                  SeqTwoByteString::kHeaderSize));
-  __ jmp(&done, Label::kNear);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ movzxbl(result, FieldOperand(string,
-                                  index,
-                                  times_1,
-                                  SeqAsciiString::kHeaderSize));
-  __ bind(&done);
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
   __ bind(deferred->exit());
 }
 
@@ -3251,6 +3558,7 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3327,6 +3635,7 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3385,6 +3694,7 @@
 void LCodeGen::EmitNumberUntagD(Register input_reg,
                                 XMMRegister result_reg,
                                 bool deoptimize_on_undefined,
+                                bool deoptimize_on_minus_zero,
                                 LEnvironment* env) {
   Label load_smi, done;
 
@@ -3412,6 +3722,15 @@
   }
   // Heap number to XMM conversion.
   __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  if (deoptimize_on_minus_zero) {
+    XMMRegister xmm_scratch = xmm0;
+    __ xorps(xmm_scratch, xmm_scratch);
+    __ ucomisd(xmm_scratch, result_reg);
+    __ j(not_equal, &done, Label::kNear);
+    __ movmskpd(kScratchRegister, result_reg);
+    __ testq(kScratchRegister, Immediate(1));
+    DeoptimizeIf(not_zero, env);
+  }
   __ jmp(&done, Label::kNear);
 
   // Smi to XMM conversion
@@ -3422,16 +3741,6 @@
 }
 
 
-class DeferredTaggedToI: public LDeferredCode {
- public:
-  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-      : LDeferredCode(codegen), instr_(instr) { }
-  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
-  LTaggedToI* instr_;
-};
-
-
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
@@ -3480,6 +3789,16 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -3503,6 +3822,7 @@
 
   EmitNumberUntagD(input_reg, result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
+                   instr->hydrogen()->deoptimize_on_minus_zero(),
                    instr->environment());
 }
 
@@ -3608,20 +3928,37 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  ASSERT(instr->InputAt(0)->IsRegister());
-  Register reg = ToRegister(instr->InputAt(0));
-  __ Cmp(reg, instr->hydrogen()->target());
+  Register reg = ToRegister(instr->value());
+  Handle<JSFunction> target = instr->hydrogen()->target();
+  if (isolate()->heap()->InNewSpace(*target)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(target);
+    __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+    __ cmpq(reg, Operand(kScratchRegister, 0));
+  } else {
+    __ Cmp(reg, target);
+  }
   DeoptimizeIf(not_equal, instr->environment());
 }
 
 
+void LCodeGen::DoCheckMapCommon(Register reg,
+                                Handle<Map> map,
+                                CompareMapMode mode,
+                                LEnvironment* env) {
+  Label success;
+  __ CompareMap(reg, map, &success, mode);
+  DeoptimizeIf(not_equal, env);
+  __ bind(&success);
+}
+
+
 void LCodeGen::DoCheckMap(LCheckMap* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
-  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
-         instr->hydrogen()->map());
-  DeoptimizeIf(not_equal, instr->environment());
+  Handle<Map> map = instr->hydrogen()->map();
+  DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
 }
 
 
@@ -3676,18 +4013,6 @@
 }
 
 
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
-  if (heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        factory()->NewJSGlobalPropertyCell(object);
-    __ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
-    __ movq(result, Operand(result, 0));
-  } else {
-    __ Move(result, object);
-  }
-}
-
-
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   Register reg = ToRegister(instr->TempAt(0));
 
@@ -3695,32 +4020,139 @@
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  LoadHeapObject(reg, current_prototype);
+  __ LoadHeapObject(reg, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
-    __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
-           Handle<Map>(current_prototype->map()));
-    DeoptimizeIf(not_equal, instr->environment());
+    DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    LoadHeapObject(reg, current_prototype);
+    __ LoadHeapObject(reg, current_prototype);
   }
 
   // Check the holder map.
-  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
-         Handle<Map>(current_prototype->map()));
-  DeoptimizeIf(not_equal, instr->environment());
+    DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+}
+
+
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+  class DeferredAllocateObject: public LDeferredCode {
+   public:
+    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LAllocateObject* instr_;
+  };
+
+  DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->TempAt(0));
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+  Handle<Map> initial_map(constructor->initial_map());
+  int instance_size = initial_map->instance_size();
+  ASSERT(initial_map->pre_allocated_property_fields() +
+         initial_map->unused_property_fields() -
+         initial_map->inobject_properties() == 0);
+
+  // Allocate memory for the object.  The initial map might change when
+  // the constructor's prototype changes, but instance size and property
+  // counts remain unchanged (if slack tracking finished).
+  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
+  __ AllocateInNewSpace(instance_size,
+                        result,
+                        no_reg,
+                        scratch,
+                        deferred->entry(),
+                        TAG_OBJECT);
+
+  // Load the initial map.
+  Register map = scratch;
+  __ LoadHeapObject(scratch, constructor);
+  __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
+
+  if (FLAG_debug_code) {
+    __ AbortIfSmi(map);
+    __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
+            Immediate(instance_size >> kPointerSizeLog2));
+    __ Assert(equal, "Unexpected instance size");
+    __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
+            Immediate(initial_map->pre_allocated_property_fields()));
+    __ Assert(equal, "Unexpected pre-allocated property fields count");
+    __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
+            Immediate(initial_map->unused_property_fields()));
+    __ Assert(equal, "Unexpected unused property fields count");
+    __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
+            Immediate(initial_map->inobject_properties()));
+    __ Assert(equal, "Unexpected in-object property fields count");
+  }
+
+  // Initialize map and fields of the newly allocated object.
+  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+  __ movq(FieldOperand(result, JSObject::kMapOffset), map);
+  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+  __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
+  __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+  if (initial_map->inobject_properties() != 0) {
+    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+    for (int i = 0; i < initial_map->inobject_properties(); i++) {
+      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+      __ movq(FieldOperand(result, property_offset), scratch);
+    }
+  }
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+  Register result = ToRegister(instr->result());
+  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Set(result, 0);
+
+  PushSafepointRegistersScope scope(this);
+  __ PushHeapObject(constructor);
+  CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
+  __ StoreToSafepointRegisterSlot(result, rax);
 }
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
-  // Setup the parameters to the stub/runtime call.
+  Heap* heap = isolate()->heap();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate_elements_kind();
+
+  // Deopt if the array literal boilerplate ElementsKind is of a type different
+  // than the expected one. The check isn't necessary if the boilerplate has
+  // already been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
+    __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+    // Load the map's "bit field 2".
+    __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ and_(rbx, Immediate(Map::kElementsKindMask));
+    __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
+                           Map::kElementsKindShift));
+    DeoptimizeIf(not_equal, instr->environment());
+  }
+
+  // Set up the parameters to the stub/runtime call.
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
-  __ Push(instr->hydrogen()->constant_elements());
+  // Boilerplate already exists, constant elements are never accessed.
+  // Pass an empty fixed array.
+  __ Push(Handle<FixedArray>(heap->empty_fixed_array()));
 
   // Pick the right runtime function or stub to call.
   int length = instr->hydrogen()->length();
@@ -3736,26 +4168,160 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
 
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
-  // Setup the parameters to the stub/runtime call.
-  __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-  __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
-  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
-  __ Push(instr->hydrogen()->constant_properties());
-  __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+                            Register result,
+                            Register source,
+                            int* offset) {
+  ASSERT(!source.is(rcx));
+  ASSERT(!result.is(rcx));
 
-  // Pick the right runtime function to call.
+  // Only elements backing stores for non-COW arrays need to be copied.
+  Handle<FixedArrayBase> elements(object->elements());
+  bool has_elements = elements->length() > 0 &&
+      elements->map() != isolate()->heap()->fixed_cow_array_map();
+
+  // Increase the offset so that subsequent objects end up right after
+  // this object and its backing store.
+  int object_offset = *offset;
+  int object_size = object->map()->instance_size();
+  int elements_offset = *offset + object_size;
+  int elements_size = has_elements ? elements->Size() : 0;
+  *offset += object_size + elements_size;
+
+  // Copy object header.
+  ASSERT(object->properties()->length() == 0);
+  int inobject_properties = object->map()->inobject_properties();
+  int header_size = object_size - inobject_properties * kPointerSize;
+  for (int i = 0; i < header_size; i += kPointerSize) {
+    if (has_elements && i == JSObject::kElementsOffset) {
+      __ lea(rcx, Operand(result, elements_offset));
+    } else {
+      __ movq(rcx, FieldOperand(source, i));
+    }
+    __ movq(FieldOperand(result, object_offset + i), rcx);
+  }
+
+  // Copy in-object properties.
+  for (int i = 0; i < inobject_properties; i++) {
+    int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
+    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      __ lea(rcx, Operand(result, *offset));
+      __ movq(FieldOperand(result, total_offset), rcx);
+      __ LoadHeapObject(source, value_object);
+      EmitDeepCopy(value_object, result, source, offset);
+    } else if (value->IsHeapObject()) {
+      __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+      __ movq(FieldOperand(result, total_offset), rcx);
+    } else {
+      __ movq(rcx, value, RelocInfo::NONE);
+      __ movq(FieldOperand(result, total_offset), rcx);
+    }
+  }
+
+  if (has_elements) {
+    // Copy elements backing store header.
+    __ LoadHeapObject(source, elements);
+    for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
+      __ movq(rcx, FieldOperand(source, i));
+      __ movq(FieldOperand(result, elements_offset + i), rcx);
+    }
+
+    // Copy elements backing store content.
+    int elements_length = elements->length();
+    if (elements->IsFixedDoubleArray()) {
+      Handle<FixedDoubleArray> double_array =
+          Handle<FixedDoubleArray>::cast(elements);
+      for (int i = 0; i < elements_length; i++) {
+        int64_t value = double_array->get_representation(i);
+        int total_offset =
+            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+        __ movq(rcx, value, RelocInfo::NONE);
+        __ movq(FieldOperand(result, total_offset), rcx);
+      }
+    } else if (elements->IsFixedArray()) {
+      for (int i = 0; i < elements_length; i++) {
+        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+        Handle<Object> value = JSObject::GetElement(object, i);
+        if (value->IsJSObject()) {
+          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+          __ lea(rcx, Operand(result, *offset));
+          __ movq(FieldOperand(result, total_offset), rcx);
+          __ LoadHeapObject(source, value_object);
+          EmitDeepCopy(value_object, result, source, offset);
+        } else if (value->IsHeapObject()) {
+          __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+          __ movq(FieldOperand(result, total_offset), rcx);
+        } else {
+          __ movq(rcx, value, RelocInfo::NONE);
+          __ movq(FieldOperand(result, total_offset), rcx);
+        }
+      }
+    } else {
+      UNREACHABLE();
+    }
+  }
+}
+
+
+void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
+  int size = instr->hydrogen()->total_size();
+
+  // Allocate all objects that are part of the literal in one big
+  // allocation. This avoids multiple limit checks.
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ Push(Smi::FromInt(size));
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+  __ bind(&allocated);
+  int offset = 0;
+  __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
+  EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
+  ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+  Handle<FixedArray> literals(instr->environment()->closure()->literals());
+  Handle<FixedArray> constant_properties =
+      instr->hydrogen()->constant_properties();
+
+  // Set up the parameters to the stub/runtime call.
+  __ PushHeapObject(literals);
+  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+  __ Push(constant_properties);
+  int flags = instr->hydrogen()->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  flags |= instr->hydrogen()->has_function()
+      ? ObjectLiteral::kHasFunction
+      : ObjectLiteral::kNoFlags;
+  __ Push(Smi::FromInt(flags));
+
+  // Pick the right runtime function or stub to call.
+  int properties_count = constant_properties->length() / 2;
   if (instr->hydrogen()->depth() > 1) {
     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
@@ -3825,8 +4391,7 @@
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && shared_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(shared_info->language_mode());
     __ Push(shared_info);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
@@ -3850,7 +4415,12 @@
 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
   ASSERT(!operand->IsDoubleRegister());
   if (operand->IsConstantOperand()) {
-    __ Push(ToHandle(LConstantOperand::cast(operand)));
+    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+    if (object->IsSmi()) {
+      __ Push(Handle<Smi>::cast(object));
+    } else {
+      __ PushHeapObject(Handle<HeapObject>::cast(object));
+    }
   } else if (operand->IsRegister()) {
     __ push(ToRegister(operand));
   } else {
@@ -3866,12 +4436,11 @@
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  Condition final_branch_condition = EmitTypeofIs(true_label,
-                                                  false_label,
-                                                  input,
-                                                  instr->type_literal());
-
-  EmitBranch(true_block, false_block, final_branch_condition);
+  Condition final_branch_condition =
+      EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+  if (final_branch_condition != no_condition) {
+    EmitBranch(true_block, false_block, final_branch_condition);
+  }
 }
 
 
@@ -3916,9 +4485,12 @@
     final_branch_condition = not_zero;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
-    final_branch_condition = above_equal;
+    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+    __ j(equal, true_label);
+    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+    final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -3936,7 +4508,6 @@
     final_branch_condition = zero;
 
   } else {
-    final_branch_condition = never;
     __ jmp(false_label);
   }
 
@@ -3978,11 +4549,7 @@
   int current_pc = masm()->pc_offset();
   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
-    while (padding_size > 0) {
-      int nop_size = padding_size > 9 ? 9 : padding_size;
-      __ nop(nop_size);
-      padding_size -= nop_size;
-    }
+    __ Nop(padding_size);
   }
 }
 
@@ -4051,6 +4618,7 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
@@ -4106,6 +4674,88 @@
   osr_pc_offset_ = masm()->pc_offset();
 }
 
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  DeoptimizeIf(equal, instr->environment());
+
+  Register null_value = rdi;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ cmpq(rax, null_value);
+  DeoptimizeIf(equal, instr->environment());
+
+  Condition cc = masm()->CheckSmi(rax);
+  DeoptimizeIf(cc, instr->environment());
+
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
+  DeoptimizeIf(below_equal, instr->environment());
+
+  Label use_cache, call_runtime;
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+  __ jmp(&use_cache, Label::kNear);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(rax);
+  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kMetaMapRootIndex);
+  DeoptimizeIf(not_equal, instr->environment());
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  __ LoadInstanceDescriptors(map, result);
+  __ movq(result,
+          FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
+  __ movq(result,
+          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+  Condition cc = masm()->CheckSmi(result);
+  DeoptimizeIf(cc, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  __ cmpq(ToRegister(instr->map()),
+          FieldOperand(object, HeapObject::kMapOffset));
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+
+  Label out_of_object, done;
+  __ SmiToInteger32(index, index);
+  __ cmpl(index, Immediate(0));
+  __ j(less, &out_of_object);
+  __ movq(object, FieldOperand(object,
+                               index,
+                               times_pointer_size,
+                               JSObject::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&out_of_object);
+  __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
+  __ negl(index);
+  // Index is now equal to out of object property index plus 1.
+  __ movq(object, FieldOperand(object,
+                               index,
+                               times_pointer_size,
+                               FixedArray::kHeaderSize - kPointerSize));
+  __ bind(&done);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 43c045f..f5045b6 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -78,6 +78,7 @@
   XMMRegister ToDoubleRegister(LOperand* op) const;
   bool IsInteger32Constant(LConstantOperand* op) const;
   int ToInteger32(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
   bool IsTaggedConstant(LConstantOperand* op) const;
   Handle<Object> ToHandle(LConstantOperand* op) const;
   Operand ToOperand(LOperand* op) const;
@@ -96,12 +97,17 @@
   void DoDeferredTaggedToI(LTaggedToI* instr);
   void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
   void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredRandom(LRandom* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocateObject(LAllocateObject* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
 
-  // Parallel move support.
+  void DoCheckMapCommon(Register reg, Handle<Map> map,
+                        CompareMapMode mode, LEnvironment* env);
+
+// Parallel move support.
   void DoParallelMove(LParallelMove* move);
   void DoGap(LGap* instr);
 
@@ -126,8 +132,8 @@
   bool is_done() const { return status_ == DONE; }
   bool is_aborted() const { return status_ == ABORTED; }
 
-  int strict_mode_flag() const {
-    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+  StrictModeFlag strict_mode_flag() const {
+    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -140,7 +146,8 @@
                        Label* if_false,
                        Handle<String> class_name,
                        Register input,
-                       Register temporary);
+                       Register temporary,
+                       Register scratch);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
   int GetParameterCount() const { return scope()->num_parameters(); }
@@ -189,15 +196,13 @@
                                int argc,
                                LInstruction* instr);
 
-
   // Generate a direct call to a known function.  Expects the function
-  // to be in edi.
+  // to be in rdi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
                          LInstruction* instr,
                          CallKind call_kind);
 
-  void LoadHeapObject(Register result, Handle<HeapObject> object);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode,
@@ -230,6 +235,7 @@
   void DoMathSqrt(LUnaryMathOperation* instr);
   void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
   void DoMathSin(LUnaryMathOperation* instr);
 
@@ -248,17 +254,19 @@
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
-  void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
                         XMMRegister result,
                         bool deoptimize_on_undefined,
+                        bool deoptimize_on_minus_zero,
                         LEnvironment* env);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label, Label* false_label,
-                         Register input, Handle<String> type_name);
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
@@ -267,6 +275,13 @@
                          Label* is_not_object,
                          Label* is_object);
 
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
@@ -280,6 +295,13 @@
   // register, or a stack slot operand.
   void EmitPushTaggedOperand(LOperand* operand);
 
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset);
+
   struct JumpTableEntry {
     explicit inline JumpTableEntry(Address entry)
         : label(),
@@ -346,16 +368,20 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen), external_exit_(NULL) {
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
 
-  void SetExit(Label *exit) { external_exit_ = exit; }
+  void SetExit(Label* exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -366,6 +392,7 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index c3c617c..877ea8c 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -198,16 +198,18 @@
       if (cgen_->IsInteger32Constant(constant_source)) {
         __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
       } else {
-        __ Move(dst, cgen_->ToHandle(constant_source));
+        __ LoadObject(dst, cgen_->ToHandle(constant_source));
       }
     } else {
       ASSERT(destination->IsStackSlot());
       Operand dst = cgen_->ToOperand(destination);
       if (cgen_->IsInteger32Constant(constant_source)) {
-        // Allow top 32 bits of an untagged Integer32 to be arbitrary.
-        __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+        // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
+        // value.
+        __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
       } else {
-        __ Move(dst, cgen_->ToHandle(constant_source));
+        __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
+        __ movq(dst, kScratchRegister);
       }
     }
 
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 5fc5646..593e778 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -214,10 +214,11 @@
 }
 
 
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -229,6 +230,13 @@
 }
 
 
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -243,6 +251,14 @@
 }
 
 
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(0)->PrintTo(stream);
+  InputAt(1)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -366,7 +382,7 @@
 
 
 void LChunk::MarkEmptyBlocks() {
-  HPhase phase("Mark empty blocks", this);
+  HPhase phase("L_Mark empty blocks", this);
   for (int i = 0; i < graph()->blocks()->length(); ++i) {
     HBasicBlock* block = graph()->blocks()->at(i);
     int first = block->first_instruction_index();
@@ -446,8 +462,14 @@
 }
 
 
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new LInstructionGap(block);
+  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -522,8 +544,8 @@
 
 LChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
-  chunk_ = new LChunk(info(), graph());
-  HPhase phase("Building chunk", chunk_);
+  chunk_ = new(zone()) LChunk(info(), graph());
+  HPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -552,20 +574,15 @@
 }
 
 
-LRegister* LChunkBuilder::ToOperand(Register reg) {
-  return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
-  return new LUnallocated(LUnallocated::FIXED_REGISTER,
-                          Register::ToAllocationIndex(reg));
+  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+                                  Register::ToAllocationIndex(reg));
 }
 
 
 LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
-  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-                          XMMRegister::ToAllocationIndex(reg));
+  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                                  XMMRegister::ToAllocationIndex(reg));
 }
 
 
@@ -580,29 +597,29 @@
 
 
 LOperand* LChunkBuilder::UseRegister(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
   return Use(value,
-             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
                               LUnallocated::USED_AT_START));
 }
 
 
 LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::Use(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::NONE));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
 }
 
 
 LOperand* LChunkBuilder::UseAtStart(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::NONE,
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
                                      LUnallocated::USED_AT_START));
 }
 
@@ -638,7 +655,7 @@
 LOperand* LChunkBuilder::UseAny(HValue* value) {
   return value->IsConstant()
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      :  Use(value, new LUnallocated(LUnallocated::ANY));
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
 }
 
 
@@ -647,7 +664,7 @@
     HInstruction* instr = HInstruction::cast(value);
     VisitInstruction(instr);
   }
-  allocator_->RecordUse(value, operand);
+  operand->set_virtual_register(value->id());
   return operand;
 }
 
@@ -655,22 +672,17 @@
 template<int I, int T>
 LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
                                     LUnallocated* result) {
-  allocator_->RecordDefinition(current_instruction_, result);
+  result->set_virtual_register(current_instruction_->id());
   instr->set_result(result);
   return instr;
 }
 
 
 template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
 LInstruction* LChunkBuilder::DefineAsRegister(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
@@ -678,14 +690,16 @@
 LInstruction* LChunkBuilder::DefineAsSpilled(
     LTemplateInstruction<1, I, T>* instr,
     int index) {
-  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineSameAsFirst(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
 }
 
 
@@ -706,7 +720,9 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  instr->set_environment(CreateEnvironment(hydrogen_env));
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
   return instr;
 }
 
@@ -736,7 +752,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasSideEffects()) {
+  if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -748,7 +764,8 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -765,66 +782,46 @@
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
-  instr->set_pointer_map(new LPointerMap(position_));
+  instr->set_pointer_map(new(zone()) LPointerMap(position_));
   return instr;
 }
 
 
 LUnallocated* LChunkBuilder::TempRegister() {
-  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
-  allocator_->RecordTemporary(operand);
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  operand->set_virtual_register(allocator_->GetVirtualRegister());
+  if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  allocator_->RecordTemporary(operand);
+  ASSERT(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  allocator_->RecordTemporary(operand);
+  ASSERT(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
-  return new LLabel(instr->block());
+  return new(zone()) LLabel(instr->block());
 }
 
 
 LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new LDeoptimize);
+  return AssignEnvironment(new(zone()) LDeoptimize);
 }
 
 
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
-  return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
-                                   HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineSameAsFirst(new LBitI(op, left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* left = UseFixed(instr->left(), rdx);
-    LOperand* right = UseFixed(instr->right(), rax);
-    LArithmeticT* result = new LArithmeticT(op, left, right);
-    return MarkAsCall(DefineFixed(result, rax), instr);
-  }
+  return AssignEnvironment(new(zone()) LDeoptimize);
 }
 
 
@@ -836,7 +833,7 @@
 
     LOperand* left = UseFixed(instr->left(), rdx);
     LOperand* right = UseFixed(instr->right(), rax);
-    LArithmeticT* result = new LArithmeticT(op, left, right);
+    LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
     return MarkAsCall(DefineFixed(result, rax), instr);
   }
 
@@ -870,7 +867,7 @@
   }
 
   LInstruction* result =
-      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+      DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
   return does_deopt ? AssignEnvironment(result) : result;
 }
 
@@ -883,7 +880,7 @@
   ASSERT(op != Token::MOD);
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LArithmeticD* result = new LArithmeticD(op, left, right);
+  LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
   return DefineSameAsFirst(result);
 }
 
@@ -901,7 +898,8 @@
   ASSERT(right->representation().IsTagged());
   LOperand* left_operand = UseFixed(left, rdx);
   LOperand* right_operand = UseFixed(right, rax);
-  LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, left_operand, right_operand);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -989,20 +987,26 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
   int ast_id = hydrogen_env->ast_id();
-  ASSERT(ast_id != AstNode::kNoNumber);
+  ASSERT(ast_id != AstNode::kNoNumber ||
+         hydrogen_env->frame_type() != JS_FUNCTION);
   int value_count = hydrogen_env->length();
-  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
-                                          ast_id,
-                                          hydrogen_env->parameter_count(),
-                                          argument_count_,
-                                          value_count,
-                                          outer);
-  int argument_index = 0;
+  LEnvironment* result = new(zone()) LEnvironment(
+      hydrogen_env->closure(),
+      hydrogen_env->frame_type(),
+      ast_id,
+      hydrogen_env->parameter_count(),
+      argument_count_,
+      value_count,
+      outer);
+  int argument_index = *argument_index_accumulator;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1011,57 +1015,70 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new LArgument(argument_index++);
+      op = new(zone()) LArgument(argument_index++);
     } else {
       op = UseAny(value);
     }
     result->AddValue(op, value->representation());
   }
 
+  if (hydrogen_env->frame_type() == JS_FUNCTION) {
+    *argument_index_accumulator = argument_index;
+  }
+
   return result;
 }
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  return new LGoto(instr->FirstSuccessor()->block_id());
+  return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
 LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
-  HValue* v = instr->value();
-  if (v->EmitAtUses()) {
-    ASSERT(v->IsConstant());
-    ASSERT(!v->representation().IsDouble());
-    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+  HValue* value = instr->value();
+  if (value->EmitAtUses()) {
+    ASSERT(value->IsConstant());
+    ASSERT(!value->representation().IsDouble());
+    HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
         ? instr->FirstSuccessor()
         : instr->SecondSuccessor();
-    return new LGoto(successor->block_id());
+    return new(zone()) LGoto(successor->block_id());
   }
-  return AssignEnvironment(new LBranch(UseRegister(v)));
+
+  LBranch* result = new(zone()) LBranch(UseRegister(value));
+  // Tagged values that are not known smis or booleans require a
+  // deoptimization environment.
+  Representation rep = value->representation();
+  HType type = value->type();
+  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+    return AssignEnvironment(result);
+  }
+  return result;
 }
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
-  return new LCmpMapAndBranch(value);
+  return new(zone()) LCmpMapAndBranch(value);
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
-  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+  return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
-  return DefineAsRegister(new LArgumentsElements);
+  return DefineAsRegister(new(zone()) LArgumentsElements);
 }
 
 
 LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
   LOperand* left = UseFixed(instr->left(), rax);
   LOperand* right = UseFixed(instr->right(), rdx);
-  LInstanceOf* result = new LInstanceOf(left, right);
+  LInstanceOf* result = new(zone()) LInstanceOf(left, right);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -1069,18 +1086,26 @@
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
-      new LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
-                                 FixedTemp(rdi));
+      new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
+                                         FixedTemp(rdi));
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegister(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
 LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
   LOperand* function = UseFixed(instr->function(), rdi);
   LOperand* receiver = UseFixed(instr->receiver(), rax);
   LOperand* length = UseFixed(instr->length(), rbx);
   LOperand* elements = UseFixed(instr->elements(), rcx);
-  LApplyArguments* result = new LApplyArguments(function,
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
                                                 receiver,
                                                 length,
                                                 elements);
@@ -1091,61 +1116,68 @@
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
   ++argument_count_;
   LOperand* argument = UseOrConstant(instr->argument());
-  return new LPushArgument(argument);
+  return new(zone()) LPushArgument(argument);
 }
 
 
 LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
 }
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
 }
 
 
 LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LOuterContext(context));
+  return DefineAsRegister(new(zone()) LOuterContext(context));
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  return MarkAsCall(new(zone()) LDeclareGlobals, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
-  return DefineAsRegister(new LGlobalObject);
+  return DefineAsRegister(new(zone()) LGlobalObject);
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
   LOperand* global_object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LGlobalReceiver(global_object));
+  return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
 }
 
 
 LInstruction* LChunkBuilder::DoCallConstantFunction(
     HCallConstantFunction* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallConstantFunction, rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
   LOperand* function = UseFixed(instr->function(), rdi);
   argument_count_ -= instr->argument_count();
-  LInvokeFunction* result = new LInvokeFunction(function);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(function);
   return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
 
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
-  if (op == kMathLog || op == kMathSin || op == kMathCos) {
+  if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
     LOperand* input = UseFixedDouble(instr->value(), xmm1);
-    LUnaryMathOperation* result = new LUnaryMathOperation(input);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
-    LUnaryMathOperation* result = new LUnaryMathOperation(input);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
     switch (op) {
       case kMathAbs:
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1169,47 +1201,48 @@
   ASSERT(instr->key()->representation().IsTagged());
   LOperand* key = UseFixed(instr->key(), rcx);
   argument_count_ -= instr->argument_count();
-  LCallKeyed* result = new LCallKeyed(key);
+  LCallKeyed* result = new(zone()) LCallKeyed(key);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallNamed, rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallNamed, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallGlobal, rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallKnownGlobal, rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
   LOperand* constructor = UseFixed(instr->constructor(), rdi);
   argument_count_ -= instr->argument_count();
-  LCallNew* result = new LCallNew(constructor);
+  LCallNew* result = new(zone()) LCallNew(constructor);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), rdi);
   argument_count_ -= instr->argument_count();
-  LCallFunction* result = new LCallFunction();
+  LCallFunction* result = new(zone()) LCallFunction(function);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallRuntime, rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, rax), instr);
 }
 
 
@@ -1228,8 +1261,24 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
-  return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineSameAsFirst(new(zone()) LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), rdx);
+    LOperand* right = UseFixed(instr->right(), rax);
+    LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
+    return MarkAsCall(DefineFixed(result, rax), instr);
+  }
 }
 
 
@@ -1237,21 +1286,11 @@
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
   LOperand* input = UseRegisterAtStart(instr->value());
-  LBitNotI* result = new LBitNotI(input);
+  LBitNotI* result = new(zone()) LBitNotI(input);
   return DefineSameAsFirst(result);
 }
 
 
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
-  return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
-  return DoBit(Token::BIT_XOR, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
@@ -1261,7 +1300,7 @@
     LOperand* temp = FixedTemp(rdx);
     LOperand* dividend = UseFixed(instr->left(), rax);
     LOperand* divisor = UseRegister(instr->right());
-    LDivI* result = new LDivI(dividend, divisor, temp);
+    LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
     return AssignEnvironment(DefineFixed(result, rax));
   } else {
     ASSERT(instr->representation().IsTagged());
@@ -1279,7 +1318,8 @@
     if (instr->HasPowerOf2Divisor()) {
       ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
       LOperand* value = UseRegisterAtStart(instr->left());
-      LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
+      LModI* mod =
+          new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
       result = DefineSameAsFirst(mod);
     } else {
       // The temporary operand is necessary to ensure that right is not
@@ -1287,7 +1327,7 @@
       LOperand* temp = FixedTemp(rdx);
       LOperand* value = UseFixed(instr->left(), rax);
       LOperand* divisor = UseRegister(instr->right());
-      LModI* mod = new LModI(value, divisor, temp);
+      LModI* mod = new(zone()) LModI(value, divisor, temp);
       result = DefineFixed(mod, rdx);
     }
 
@@ -1304,7 +1344,7 @@
     // TODO(fschneider): Allow any register as input registers.
     LOperand* left = UseFixedDouble(instr->left(), xmm2);
     LOperand* right = UseFixedDouble(instr->right(), xmm1);
-    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   }
 }
@@ -1316,8 +1356,12 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstant(instr->MostConstantOperand());
-    LMulI* mul = new LMulI(left, right);
-    return AssignEnvironment(DefineSameAsFirst(mul));
+    LMulI* mul = new(zone()) LMulI(left, right);
+    if (instr->CheckFlag(HValue::kCanOverflow) ||
+        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineSameAsFirst(mul);
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MUL, instr);
   } else {
@@ -1333,7 +1377,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    LSubI* sub = new LSubI(left, right);
+    LSubI* sub = new(zone()) LSubI(left, right);
     LInstruction* result = DefineSameAsFirst(sub);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1354,7 +1398,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    LAddI* add = new LAddI(left, right);
+    LAddI* add = new(zone()) LAddI(left, right);
     LInstruction* result = DefineSameAsFirst(add);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1384,20 +1428,31 @@
 #else
       UseFixed(instr->right(), rdi);
 #endif
-  LPower* result = new LPower(left, right);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr,
+  LPower* result = new(zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
                     CAN_DEOPTIMIZE_EAGERLY);
 }
 
 
+LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->global_object()->representation().IsTagged());
+#ifdef _WIN64
+  LOperand* global_object = UseFixed(instr->global_object(), rcx);
+#else
+  LOperand* global_object = UseFixed(instr->global_object(), rdi);
+#endif
+  LRandom* result = new(zone()) LRandom(global_object);
+  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  Token::Value op = instr->token();
   ASSERT(instr->left()->representation().IsTagged());
   ASSERT(instr->right()->representation().IsTagged());
-  bool reversed = (op == Token::GT || op == Token::LTE);
-  LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
-  LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
-  LCmpT* result = new LCmpT(left, right);
+  LOperand* left = UseFixed(instr->left(), rdx);
+  LOperand* right = UseFixed(instr->right(), rax);
+  LCmpT* result = new(zone()) LCmpT(left, right);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -1408,16 +1463,23 @@
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    return new LCmpIDAndBranch(left, right);
+    return new(zone()) LCmpIDAndBranch(left, right);
   } else {
     ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
-    LOperand* left = UseRegisterAtStart(instr->left());
-    LOperand* right = UseRegisterAtStart(instr->right());
-    return new LCmpIDAndBranch(left, right);
+    LOperand* left;
+    LOperand* right;
+    if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+      left = UseRegisterOrConstantAtStart(instr->left());
+      right = UseRegisterOrConstantAtStart(instr->right());
+    } else {
+      left = UseRegisterAtStart(instr->left());
+      right = UseRegisterAtStart(instr->right());
+    }
+    return new(zone()) LCmpIDAndBranch(left, right);
   }
 }
 
@@ -1426,47 +1488,72 @@
     HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  return new LCmpObjectEqAndBranch(left, right);
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
 }
 
 
 LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
     HCompareConstantEqAndBranch* instr) {
-  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpConstantEqAndBranch(value);
 }
 
 
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* temp = instr->is_strict() ? NULL : TempRegister();
-  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+  LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
+  return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
+  return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(value, temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsSmiAndBranch(Use(instr->value()));
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
-                                      TempRegister());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsUndetectableAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), rdx);
+  LOperand* right = UseFixed(instr->right(), rax);
+  LStringCompareAndBranch* result =
+      new(zone()) LStringCompareAndBranch(left, right);
+
+  return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasInstanceTypeAndBranch(value);
 }
 
 
@@ -1475,54 +1562,64 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
-  return DefineAsRegister(new LGetCachedArrayIndex(value));
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
 }
 
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasCachedArrayIndexAndBranch(value);
 }
 
 
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
-  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
-                                   TempRegister());
+  LOperand* value = UseRegister(instr->value());
+  return new(zone()) LClassOfTestAndBranch(value,
+                                           TempRegister(),
+                                           TempRegister());
 }
 
 
 LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LJSArrayLength(array));
+  return DefineAsRegister(new(zone()) LJSArrayLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
     HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LFixedArrayBaseLength(array));
+  return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
   LOperand* object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LElementsKind(object));
+  return DefineAsRegister(new(zone()) LElementsKind(object));
 }
 
 
 LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
   LOperand* object = UseRegister(instr->value());
-  LValueOf* result = new LValueOf(object);
-  return AssignEnvironment(DefineSameAsFirst(result));
+  LValueOf* result = new(zone()) LValueOf(object);
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+  LOperand* object = UseFixed(instr->value(), rax);
+  LDateField* result = new LDateField(object, instr->index());
+  return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  return AssignEnvironment(new LBoundsCheck(
-      UseRegisterOrConstantAtStart(instr->index()),
-      Use(instr->length())));
+  LOperand* value = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = Use(instr->length());
+  return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
 }
 
 
@@ -1535,7 +1632,7 @@
 
 LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
   LOperand* value = UseFixed(instr->value(), rax);
-  return MarkAsCall(new LThrow(value), instr);
+  return MarkAsCall(new(zone()) LThrow(value), instr);
 }
 
 
@@ -1558,7 +1655,7 @@
   if (from.IsTagged()) {
     if (to.IsDouble()) {
       LOperand* value = UseRegister(instr->value());
-      LNumberUntagD* res = new LNumberUntagD(value);
+      LNumberUntagD* res = new(zone()) LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
     } else {
       ASSERT(to.IsInteger32());
@@ -1567,10 +1664,10 @@
       if (needs_check) {
         bool truncating = instr->CanTruncateToInt32();
         LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
-        LTaggedToI* res = new LTaggedToI(value, xmm_temp);
+        LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
         return AssignEnvironment(DefineSameAsFirst(res));
       } else {
-        return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+        return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
       }
     }
   } else if (from.IsDouble()) {
@@ -1580,26 +1677,27 @@
 
       // Make sure that temp and result_temp are different registers.
       LUnallocated* result_temp = TempRegister();
-      LNumberTagD* result = new LNumberTagD(value, temp);
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
       return AssignPointerMap(Define(result, result_temp));
     } else {
       ASSERT(to.IsInteger32());
       LOperand* value = UseRegister(instr->value());
-      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
+      return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
     }
   } else if (from.IsInteger32()) {
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
       if (val->HasRange() && val->range()->IsInSmiRange()) {
-        return DefineSameAsFirst(new LSmiTag(value));
+        return DefineSameAsFirst(new(zone()) LSmiTag(value));
       } else {
-        LNumberTagI* result = new LNumberTagI(value);
+        LNumberTagI* result = new(zone()) LNumberTagI(value);
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
       }
     } else {
       ASSERT(to.IsDouble());
-      return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+      LOperand* value = Use(instr->value());
+      return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
     }
   }
   UNREACHABLE();
@@ -1609,39 +1707,39 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckNonSmi(value));
+  return AssignEnvironment(new(zone()) LCheckNonSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LCheckInstanceType* result = new LCheckInstanceType(value);
+  LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
   LOperand* temp = TempRegister();
-  LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
+  LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value));
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckFunction(value));
+  return AssignEnvironment(new(zone()) LCheckFunction(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LCheckMap* result = new LCheckMap(value);
+  LCheckMap* result = new(zone()) LCheckMap(value);
   return AssignEnvironment(result);
 }
 
@@ -1651,62 +1749,36 @@
   Representation input_rep = value->representation();
   LOperand* reg = UseRegister(value);
   if (input_rep.IsDouble()) {
-    return DefineAsRegister(new LClampDToUint8(reg,
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg,
                                                TempRegister()));
   } else if (input_rep.IsInteger32()) {
-    return DefineSameAsFirst(new LClampIToUint8(reg));
+    return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
   } else {
     ASSERT(input_rep.IsTagged());
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve xmm1 explicitly.
-    LClampTToUint8* result = new LClampTToUint8(reg,
-                                                TempRegister(),
-                                                FixedTemp(xmm1));
+    LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
+                                                        TempRegister(),
+                                                        FixedTemp(xmm1));
     return AssignEnvironment(DefineSameAsFirst(result));
   }
 }
 
 
-LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
-  HValue* value = instr->value();
-  Representation input_rep = value->representation();
-  LOperand* reg = UseRegister(value);
-  if (input_rep.IsDouble()) {
-    return AssignEnvironment(DefineAsRegister(new LDoubleToI(reg)));
-  } else if (input_rep.IsInteger32()) {
-    // Canonicalization should already have removed the hydrogen instruction in
-    // this case, since it is a noop.
-    UNREACHABLE();
-    return NULL;
-  } else {
-    ASSERT(input_rep.IsTagged());
-    LOperand* reg = UseRegister(value);
-    // Register allocator doesn't (yet) support allocation of double
-    // temps. Reserve xmm1 explicitly.
-    LOperand* xmm_temp =
-        CpuFeatures::IsSupported(SSE3)
-        ? NULL
-        : FixedTemp(xmm1);
-    return AssignEnvironment(
-        DefineSameAsFirst(new LTaggedToI(reg, xmm_temp)));
-  }
-}
-
-
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  return new LReturn(UseFixed(instr->value(), rax));
+  return new(zone()) LReturn(UseFixed(instr->value(), rax));
 }
 
 
 LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
   Representation r = instr->representation();
   if (r.IsInteger32()) {
-    return DefineAsRegister(new LConstantI);
+    return DefineAsRegister(new(zone()) LConstantI);
   } else if (r.IsDouble()) {
     LOperand* temp = TempRegister();
-    return DefineAsRegister(new LConstantD(temp));
+    return DefineAsRegister(new(zone()) LConstantD(temp));
   } else if (r.IsTagged()) {
-    return DefineAsRegister(new LConstantT);
+    return DefineAsRegister(new(zone()) LConstantT);
   } else {
     UNREACHABLE();
     return NULL;
@@ -1715,8 +1787,8 @@
 
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
-  LLoadGlobalCell* result = new LLoadGlobalCell;
-  return instr->check_hole_value()
+  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+  return instr->RequiresHoleCheck()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1724,29 +1796,35 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object = UseFixed(instr->global_object(), rax);
-  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
-  LStoreGlobalCell* result =
-      new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
-  return instr->check_hole_value() ? AssignEnvironment(result) : result;
+  LOperand* value = UseRegister(instr->value());
+  // Use a temp to avoid reloading the cell value address in the case where
+  // we perform a hole check.
+  return instr->RequiresHoleCheck()
+      ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
+      : new(zone()) LStoreGlobalCell(value, NULL);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
   LOperand* global_object = UseFixed(instr->global_object(), rdx);
   LOperand* value = UseFixed(instr->value(), rax);
-  LStoreGlobalGeneric* result =  new LStoreGlobalGeneric(global_object, value);
+  LStoreGlobalGeneric* result =  new(zone()) LStoreGlobalGeneric(global_object,
+                                                                 value);
   return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadContextSlot(context));
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1763,14 +1841,15 @@
     value = UseRegister(instr->value());
     temp = NULL;
   }
-  return new LStoreContextSlot(context, value, temp);
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   ASSERT(instr->representation().IsTagged());
   LOperand* obj = UseRegisterAtStart(instr->object());
-  return DefineAsRegister(new LLoadNamedField(obj));
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
 }
 
 
@@ -1779,11 +1858,13 @@
   ASSERT(instr->representation().IsTagged());
   if (instr->need_generic()) {
     LOperand* obj = UseFixed(instr->object(), rax);
-    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+    LLoadNamedFieldPolymorphic* result =
+        new(zone()) LLoadNamedFieldPolymorphic(obj);
     return MarkAsCall(DefineFixed(result, rax), instr);
   } else {
     LOperand* obj = UseRegisterAtStart(instr->object());
-    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+    LLoadNamedFieldPolymorphic* result =
+        new(zone()) LLoadNamedFieldPolymorphic(obj);
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
@@ -1791,7 +1872,7 @@
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object = UseFixed(instr->object(), rax);
-  LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
+  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(object);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -1799,20 +1880,20 @@
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
-      new LLoadFunctionPrototype(UseRegister(instr->function()))));
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadElements(input));
+  return DefineAsRegister(new(zone()) LLoadElements(input));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
     HLoadExternalArrayPointer* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadExternalArrayPointer(input));
+  return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
 }
 
 
@@ -1822,8 +1903,9 @@
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
-  return AssignEnvironment(DefineAsRegister(result));
+  LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+  if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+  return DefineAsRegister(result);
 }
 
 
@@ -1834,7 +1916,7 @@
   LOperand* elements = UseRegisterAtStart(instr->elements());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LLoadKeyedFastDoubleElement* result =
-      new LLoadKeyedFastDoubleElement(elements, key);
+      new(zone()) LLoadKeyedFastDoubleElement(elements, key);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
@@ -1842,19 +1924,18 @@
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
   ElementsKind elements_kind = instr->elements_kind();
-  Representation representation(instr->representation());
   ASSERT(
-      (representation.IsInteger32() &&
+      (instr->representation().IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (representation.IsDouble() &&
+      (instr->representation().IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
   LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
   // has an environment.
@@ -1867,7 +1948,7 @@
   LOperand* object = UseFixed(instr->object(), rdx);
   LOperand* key = UseFixed(instr->key(), rax);
 
-  LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
+  LLoadKeyedGeneric* result = new(zone()) LLoadKeyedGeneric(object, key);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -1886,8 +1967,7 @@
   LOperand* key = needs_write_barrier
       ? UseTempRegister(instr->key())
       : UseRegisterOrConstantAtStart(instr->key());
-
-  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+  return new(zone()) LStoreKeyedFastElement(obj, key, val);
 }
 
 
@@ -1901,19 +1981,18 @@
   LOperand* val = UseTempRegister(instr->value());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
 
-  return new LStoreKeyedFastDoubleElement(elements, key, val);
+  return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
-  Representation representation(instr->value()->representation());
   ElementsKind elements_kind = instr->elements_kind();
   ASSERT(
-      (representation.IsInteger32() &&
+      (instr->value()->representation().IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (representation.IsDouble() &&
+      (instr->value()->representation().IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -1928,9 +2007,9 @@
       : UseRegister(instr->value());
   LOperand* key = UseRegisterOrConstant(instr->key());
 
-  return new LStoreKeyedSpecializedArrayElement(external_pointer,
-                                                key,
-                                                val);
+  return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                        key,
+                                                        val);
 }
 
 
@@ -1943,11 +2022,35 @@
   ASSERT(instr->key()->representation().IsTagged());
   ASSERT(instr->value()->representation().IsTagged());
 
-  LStoreKeyedGeneric* result = new LStoreKeyedGeneric(object, key, value);
+  LStoreKeyedGeneric* result =
+      new(zone()) LStoreKeyedGeneric(object, key, value);
   return MarkAsCall(result, instr);
 }
 
 
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LOperand* temp_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
+    return DefineSameAsFirst(result);
+  } else {
+    LOperand* object = UseFixed(instr->object(), rax);
+    LOperand* fixed_object_reg = FixedTemp(rdx);
+    LOperand* new_map_reg = FixedTemp(rbx);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object,
+                                            new_map_reg,
+                                            fixed_object_reg);
+    return MarkAsCall(DefineFixed(result, rax), instr);
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   bool needs_write_barrier = instr->NeedsWriteBarrier();
 
@@ -1964,7 +2067,7 @@
   LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
       ? TempRegister() : NULL;
 
-  return new LStoreNamedField(obj, val, temp);
+  return new(zone()) LStoreNamedField(obj, val, temp);
 }
 
 
@@ -1972,7 +2075,7 @@
   LOperand* object = UseFixed(instr->object(), rdx);
   LOperand* value = UseFixed(instr->value(), rax);
 
-  LStoreNamedGeneric* result = new LStoreNamedGeneric(object, value);
+  LStoreNamedGeneric* result = new(zone()) LStoreNamedGeneric(object, value);
   return MarkAsCall(result, instr);
 }
 
@@ -1980,55 +2083,67 @@
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* left = UseOrConstantAtStart(instr->left());
   LOperand* right = UseOrConstantAtStart(instr->right());
-  return MarkAsCall(DefineFixed(new LStringAdd(left, right), rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), rax),
+                    instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseTempRegister(instr->string());
   LOperand* index = UseTempRegister(instr->index());
-  LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+  LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
 
 
 LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
   LOperand* char_code = UseRegister(instr->value());
-  LStringCharFromCode* result = new LStringCharFromCode(char_code);
+  LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
   return AssignPointerMap(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
   LOperand* string = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LStringLength(string));
+  return DefineAsRegister(new(zone()) LStringLength(string));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+  LAllocateObject* result = new LAllocateObject(TempRegister());
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
+  return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LArrayLiteral, rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LRegExpLiteral, rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LFunctionLiteral, rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
-  LDeleteProperty* result =
-      new LDeleteProperty(UseAtStart(instr->object()),
-                          UseOrConstantAtStart(instr->key()));
+  LOperand* object = UseAtStart(instr->object());
+  LOperand* key = UseOrConstantAtStart(instr->key());
+  LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -2036,13 +2151,13 @@
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
-  return AssignEnvironment(new LOsrEntry);
+  return AssignEnvironment(new(zone()) LOsrEntry);
 }
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new LParameter, spill_index);
+  return DefineAsSpilled(new(zone()) LParameter, spill_index);
 }
 
 
@@ -2052,13 +2167,13 @@
     Abort("Too many spill slots needed for OSR");
     spill_index = 0;
   }
-  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallStub, rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallStub, rax), instr);
 }
 
 
@@ -2075,32 +2190,33 @@
   LOperand* arguments = UseRegister(instr->arguments());
   LOperand* length = UseTempRegister(instr->length());
   LOperand* index = Use(instr->index());
-  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+  LAccessArgumentsAt* result =
+      new(zone()) LAccessArgumentsAt(arguments, length, index);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
   LOperand* object = UseFixed(instr->value(), rax);
-  LToFastProperties* result = new LToFastProperties(object);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
-  LTypeof* result = new LTypeof(UseAtStart(instr->value()));
+  LTypeof* result = new(zone()) LTypeof(UseAtStart(instr->value()));
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
-  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
     HIsConstructCallAndBranch* instr) {
-  return new LIsConstructCallAndBranch(TempRegister());
+  return new(zone()) LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2123,7 +2239,7 @@
   // If there is an instruction pending deoptimization environment create a
   // lazy bailout instruction to capture the environment.
   if (pending_deoptimization_ast_id_ == instr->ast_id()) {
-    LLazyBailout* lazy_bailout = new LLazyBailout;
+    LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
     LInstruction* result = AssignEnvironment(lazy_bailout);
     instruction_pending_deoptimization_environment_->
         set_deoptimization_environment(result->environment());
@@ -2137,10 +2253,10 @@
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
   if (instr->is_function_entry()) {
-    return MarkAsCall(new LStackCheck, instr);
+    return MarkAsCall(new(zone()) LStackCheck, instr);
   } else {
     ASSERT(instr->is_backwards_branch());
-    return AssignEnvironment(AssignPointerMap(new LStackCheck));
+    return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
   }
 }
 
@@ -2149,9 +2265,14 @@
   HEnvironment* outer = current_block_->last_environment();
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
                                                instr->function(),
                                                undefined,
-                                               instr->call_kind());
+                                               instr->call_kind(),
+                                               instr->is_construct());
+  if (instr->arguments() != NULL) {
+    inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+  }
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2159,7 +2280,8 @@
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
-  HEnvironment* outer = current_block_->last_environment()->outer();
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
   current_block_->UpdateEnvironment(outer);
   return NULL;
 }
@@ -2168,11 +2290,39 @@
 LInstruction* LChunkBuilder::DoIn(HIn* instr) {
   LOperand* key = UseOrConstantAtStart(instr->key());
   LOperand* object = UseOrConstantAtStart(instr->object());
-  LIn* result = new LIn(key, object);
+  LIn* result = new(zone()) LIn(key, object);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* object = UseFixed(instr->enumerable(), rax);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+  return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index d169bf6..2d8fd2e 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,6 +49,7 @@
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
+  V(AllocateObject)                             \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
   V(ArgumentsLength)                            \
@@ -87,11 +88,13 @@
   V(ConstantI)                                  \
   V(ConstantT)                                  \
   V(Context)                                    \
+  V(DeclareGlobals)                             \
   V(DeleteProperty)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
   V(ElementsKind)                               \
+  V(FastLiteral)                                \
   V(FixedArrayBaseLength)                       \
   V(FunctionLiteral)                            \
   V(GetCachedArrayIndex)                        \
@@ -107,10 +110,12 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNullAndBranch)                            \
+  V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -138,6 +143,7 @@
   V(Parameter)                                  \
   V(Power)                                      \
   V(PushArgument)                               \
+  V(Random)                                     \
   V(RegExpLiteral)                              \
   V(Return)                                     \
   V(ShiftI)                                     \
@@ -162,11 +168,18 @@
   V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
+  V(TransitionElementsKind)                     \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
-  V(ValueOf)
+  V(ValueOf)                                    \
+  V(ForInPrepareMap)                            \
+  V(ForInCacheArray)                            \
+  V(CheckMapValue)                              \
+  V(LoadFieldByIndex)                           \
+  V(DateField)                                  \
+  V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -457,6 +470,20 @@
 };
 
 
+class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
 class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
@@ -609,17 +636,18 @@
 };
 
 
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
+class LIsNilAndBranch: public LControlInstruction<1, 1> {
  public:
-  LIsNullAndBranch(LOperand* value, LOperand* temp) {
+  LIsNilAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
 
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -638,6 +666,20 @@
 };
 
 
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -666,6 +708,23 @@
 };
 
 
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+  explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -705,11 +764,12 @@
 };
 
 
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
  public:
-  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
     inputs_[0] = value;
     temps_[0] = temp;
+    temps_[1] = temp2;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
@@ -790,18 +850,15 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
+  LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return hydrogen()->op(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
-  Token::Value op_;
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
 };
 
 
@@ -946,6 +1003,22 @@
 };
 
 
+class LDateField: public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDateField(LOperand* date, Smi* index) : index_(index) {
+    inputs_[0] = date;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+
+  Smi* index() const { return index_; }
+
+ private:
+  Smi* index_;
+};
+
+
 class LThrow: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LThrow(LOperand* value) {
@@ -990,6 +1063,17 @@
 };
 
 
+class LRandom: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LRandom(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Random, "random")
+  DECLARE_HYDROGEN_ACCESSOR(Random)
+};
+
+
 class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1206,6 +1290,8 @@
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+
+  LOperand* value() { return inputs_[0]; }
 };
 
 
@@ -1223,7 +1309,7 @@
   LOperand* global_object() { return InputAt(0); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(1); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1257,7 +1343,6 @@
   LOperand* context() { return InputAt(0); }
   LOperand* value() { return InputAt(1); }
   int slot_index() { return hydrogen()->slot_index(); }
-  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1274,7 +1359,9 @@
 
 
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
@@ -1296,6 +1383,13 @@
 };
 
 
+class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
 class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
@@ -1372,14 +1466,17 @@
 };
 
 
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
  public:
-  LCallFunction() {}
+  explicit LCallFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
 
   DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
   DECLARE_HYDROGEN_ACCESSOR(CallFunction)
 
-  int arity() const { return hydrogen()->argument_count() - 2; }
+  LOperand* function() { return inputs_[0]; }
+  int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
@@ -1548,7 +1645,6 @@
   Handle<Object> name() const { return hydrogen()->name(); }
   bool is_in_object() { return hydrogen()->is_in_object(); }
   int offset() { return hydrogen()->offset(); }
-  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
   Handle<Map> transition() const { return hydrogen()->transition(); }
 };
 
@@ -1568,7 +1664,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1653,7 +1749,31 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* new_map_temp,
+                          LOperand* temp_reg) {
+    inputs_[0] = object;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp_reg;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_reg() { return temps_[0]; }
+  LOperand* temp_reg() { return temps_[1]; }
+  Handle<Map> original_map() { return hydrogen()->original_map(); }
+  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
 };
 
 
@@ -1719,6 +1839,8 @@
     inputs_[0] = value;
   }
 
+  LOperand* value() { return InputAt(0); }
+
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
@@ -1821,6 +1943,24 @@
 };
 
 
+class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
+ public:
+  explicit LAllocateObject(LOperand* temp) {
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+};
+
+
+class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
+};
+
+
 class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@@ -1948,6 +2088,62 @@
 };
 
 
+class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInPrepareMap(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
@@ -2021,6 +2217,7 @@
       : chunk_(NULL),
         info_(info),
         graph_(graph),
+        zone_(graph->isolate()->zone()),
         status_(UNUSED),
         current_instruction_(NULL),
         current_block_(NULL),
@@ -2050,6 +2247,7 @@
   LChunk* chunk() const { return chunk_; }
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
+  Zone* zone() const { return zone_; }
 
   bool is_unused() const { return status_ == UNUSED; }
   bool is_building() const { return status_ == BUILDING; }
@@ -2059,7 +2257,6 @@
   void Abort(const char* format, ...);
 
   // Methods for getting operands for Use / Define / Temp.
-  LRegister* ToOperand(Register reg);
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(XMMRegister reg);
 
@@ -2110,8 +2307,6 @@
       LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
                            LUnallocated* result);
   template<int I, int T>
-      LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
-  template<int I, int T>
       LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
   template<int I, int T>
       LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
@@ -2146,12 +2341,12 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
 
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
@@ -2161,6 +2356,7 @@
   LChunk* chunk_;
   CompilationInfo* info_;
   HGraph* const graph_;
+  Zone* zone_;
   Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 8fcad23..f7db250 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,6 +44,7 @@
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
       allow_stub_calls_(true),
+      has_frame_(false),
       root_array_available_(true) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -54,7 +55,7 @@
 
 static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
   Address roots_register_value = kRootRegisterBias +
-      reinterpret_cast<Address>(isolate->heap()->roots_address());
+      reinterpret_cast<Address>(isolate->heap()->roots_array_start());
   intptr_t delta = other.address() - roots_register_value;
   return delta;
 }
@@ -196,28 +197,47 @@
 }
 
 
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register addr,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register addr,
+                                         Register scratch,
+                                         SaveFPRegsMode save_fp,
+                                         RememberedSetFinalAction and_then) {
+  if (FLAG_debug_code) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+    int3();
+    bind(&ok);
   }
-
-  // Compute the page start address from the heap object pointer, and reuse
-  // the 'object' register for it.
-  and_(object, Immediate(~Page::kPageAlignmentMask));
-
-  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
-  // method for more details.
-  shrl(addr, Immediate(Page::kRegionSizeLog2));
-  andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
-
-  // Set dirty mark for region.
-  bts(Operand(object, Page::kDirtyFlagOffset), addr);
+  // Load store buffer top.
+  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
+  // Store pointer to buffer.
+  movq(Operand(scratch, 0), addr);
+  // Increment buffer top.
+  addq(scratch, Immediate(kPointerSize));
+  // Write back new top of buffer.
+  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
+  // Call stub on end of buffer.
+  Label done;
+  // Check for end of buffer.
+  testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kReturnAtEnd) {
+    Label buffer_overflowed;
+    j(not_equal, &buffer_overflowed, Label::kNear);
+    ret(0);
+    bind(&buffer_overflowed);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    j(equal, &done, Label::kNear);
+  }
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(save_fp);
+  CallStub(&store_buffer_overflow);
+  if (and_then == kReturnAtEnd) {
+    ret(0);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    bind(&done);
+  }
 }
 
 
@@ -225,7 +245,7 @@
                                 Register scratch,
                                 Condition cc,
                                 Label* branch,
-                                Label::Distance near_jump) {
+                                Label::Distance distance) {
   if (Serializer::enabled()) {
     // Can't do arithmetic on external references if it might get serialized.
     // The mask isn't really an address.  We load it as an external reference in
@@ -240,7 +260,7 @@
     }
     movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
     cmpq(scratch, kScratchRegister);
-    j(cc, branch, near_jump);
+    j(cc, branch, distance);
   } else {
     ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
     intptr_t new_space_start =
@@ -252,35 +272,88 @@
       lea(scratch, Operand(object, kScratchRegister, times_1, 0));
     }
     and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
-    j(cc, branch, near_jump);
+    j(cc, branch, distance);
   }
 }
 
 
-void MacroAssembler::RecordWrite(Register object,
-                                 int offset,
-                                 Register value,
-                                 Register index) {
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are rsi.
-  ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
+  ASSERT(!value.is(rsi) && !dst.is(rsi));
 
   // First, check if a write barrier is even needed. The tests below
-  // catch stores of smis and stores into the young generation.
+  // catch stores of Smis.
   Label done;
-  JumpIfSmi(value, &done);
 
-  RecordWriteNonSmi(object, offset, value, index);
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
+
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
+
+  lea(dst, FieldOperand(object, offset));
+  if (emit_debug_code()) {
+    Label ok;
+    testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
-  // turned on to provoke errors. This clobbering repeats the
-  // clobbering done inside RecordWriteNonSmi but it's necessary to
-  // avoid having the fast case for smis leave the registers
-  // unchanged.
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
   if (emit_debug_code()) {
-    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+  }
+}
+
+
+void MacroAssembler::RecordWriteArray(Register object,
+                                      Register value,
+                                      Register index,
+                                      SaveFPRegsMode save_fp,
+                                      RememberedSetAction remembered_set_action,
+                                      SmiCheck smi_check) {
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
+
+  // Array access: calculate the destination address. Index is not a smi.
+  Register dst = index;
+  lea(dst, Operand(object, index, times_pointer_size,
+                   FixedArray::kHeaderSize - kHeapObjectTag));
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
+  bind(&done);
+
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
@@ -289,90 +362,72 @@
 
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register value) {
+                                 Register value,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are rsi.
-  ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
+  ASSERT(!value.is(rsi) && !address.is(rsi));
+
+  ASSERT(!object.is(value));
+  ASSERT(!object.is(address));
+  ASSERT(!value.is(address));
+  if (emit_debug_code()) {
+    AbortIfSmi(object);
+  }
+
+  if (remembered_set_action == OMIT_REMEMBERED_SET &&
+      !FLAG_incremental_marking) {
+    return;
+  }
+
+  if (FLAG_debug_code) {
+    Label ok;
+    cmpq(value, Operand(address, 0));
+    j(equal, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
 
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
-  JumpIfSmi(value, &done);
 
-  InNewSpace(object, value, equal, &done);
+  if (smi_check == INLINE_SMI_CHECK) {
+    // Skip barrier if writing a smi.
+    JumpIfSmi(value, &done);
+  }
 
-  RecordWriteHelper(object, address, value);
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
+
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
+
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
 }
 
 
-void MacroAssembler::RecordWriteNonSmi(Register object,
-                                       int offset,
-                                       Register scratch,
-                                       Register index) {
-  Label done;
-
-  if (emit_debug_code()) {
-    Label okay;
-    JumpIfNotSmi(object, &okay, Label::kNear);
-    Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
-    bind(&okay);
-
-    if (offset == 0) {
-      // index must be int32.
-      Register tmp = index.is(rax) ? rbx : rax;
-      push(tmp);
-      movl(tmp, index);
-      cmpq(tmp, index);
-      Check(equal, "Index register for RecordWrite must be untagged int32.");
-      pop(tmp);
-    }
-  }
-
-  // Test that the object address is not in the new space. We cannot
-  // update page dirty marks for new space pages.
-  InNewSpace(object, scratch, equal, &done);
-
-  // The offset is relative to a tagged or untagged HeapObject pointer,
-  // so either offset or offset + kHeapObjectTag must be a
-  // multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize) ||
-         IsAligned(offset + kHeapObjectTag, kPointerSize));
-
-  Register dst = index;
-  if (offset != 0) {
-    lea(dst, Operand(object, offset));
-  } else {
-    // array access: calculate the destination address in the same manner as
-    // KeyedStoreIC::GenerateGeneric.
-    lea(dst, FieldOperand(object,
-                          index,
-                          times_pointer_size,
-                          FixedArray::kHeaderSize));
-  }
-  RecordWriteHelper(object, dst, scratch);
-
-  bind(&done);
-
-  // Clobber all input registers when running with the debug-code flag
-  // turned on to provoke errors.
-  if (emit_debug_code()) {
-    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-  }
-}
-
 void MacroAssembler::Assert(Condition cc, const char* msg) {
   if (emit_debug_code()) Check(cc, msg);
 }
@@ -400,7 +455,7 @@
   Label L;
   j(cc, &L, Label::kNear);
   Abort(msg);
-  // will not return here
+  // Control will not return here.
   bind(&L);
 }
 
@@ -440,7 +495,7 @@
   // from the real pointer as a smi.
   intptr_t p1 = reinterpret_cast<intptr_t>(msg);
   intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
-  // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
+  // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
   ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
 #ifdef DEBUG
   if (msg != NULL) {
@@ -448,9 +503,6 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
-
   push(rax);
   movq(kScratchRegister, p0, RelocInfo::NONE);
   push(kScratchRegister);
@@ -458,52 +510,44 @@
        reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
        RelocInfo::NONE);
   push(kScratchRegister);
-  CallRuntime(Runtime::kAbort, 2);
-  // will not return here
+
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
+  // Control will not return here.
   int3();
 }
 
 
 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
-  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
+  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
-  MaybeObject* result = stub->TryGetCode();
-  if (!result->IsFailure()) {
-    call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
-         RelocInfo::CODE_TARGET);
-  }
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
-  MaybeObject* result = stub->TryGetCode();
-  if (!result->IsFailure()) {
-    jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
-        RelocInfo::CODE_TARGET);
-  }
-  return result;
-}
-
-
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
 }
 
 
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
     addq(rsp, Immediate(num_arguments * kPointerSize));
@@ -540,18 +584,11 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(rax, function->nargs);
   LoadAddress(rbx, ExternalReference(function, isolate()));
-  CEntryStub ces(1);
-  ces.SaveDoubles();
+  CEntryStub ces(1, kSaveFPRegs);
   CallStub(&ces);
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
-                                            int num_arguments) {
-  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // If the expected number of arguments of the runtime function is
@@ -573,26 +610,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
-                                            int num_arguments) {
-  if (f->nargs >= 0 && f->nargs != num_arguments) {
-    IllegalOperation(num_arguments);
-    // Since we did not call the stub, there was no allocation failure.
-    // Return some non-failure object.
-    return HEAP->undefined_value();
-  }
-
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(rax, num_arguments);
-  LoadAddress(rbx, ExternalReference(f, isolate()));
-  CEntryStub ces(f->result_size);
-  return TryCallStub(&ces);
-}
-
-
 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
                                            int num_arguments) {
   Set(rax, num_arguments);
@@ -622,24 +639,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // ----------- S t a t e -------------
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : argument num_arguments - 1
-  //  ...
-  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
-  // -----------------------------------
-
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(rax, num_arguments);
-  return TryJumpToExternalReference(ext, result_size);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -649,15 +648,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
-                                                int num_arguments,
-                                                int result_size) {
-  return TryTailCallExternalReference(ExternalReference(fid, isolate()),
-                                      num_arguments,
-                                      result_size);
-}
-
-
 static int Offset(ExternalReference ref0, ExternalReference ref1) {
   int64_t offset = (ref0.address() - ref1.address());
   // Check that fits into int.
@@ -680,8 +670,8 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
-    ApiFunction* function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+                                              int stack_space) {
   Label empty_result;
   Label prologue;
   Label promote_scheduled_exception;
@@ -711,8 +701,7 @@
   movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
   addl(Operand(base_reg, kLevelOffset), Immediate(1));
   // Call the api function!
-  movq(rax,
-       reinterpret_cast<int64_t>(function->address()),
+  movq(rax, reinterpret_cast<int64_t>(function_address),
        RelocInfo::RUNTIME_ENTRY);
   call(rax);
 
@@ -744,11 +733,7 @@
   ret(stack_space * kPointerSize);
 
   bind(&promote_scheduled_exception);
-  MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
-                                           0, 1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
 
   bind(&empty_result);
   // It was zero; the result is undefined.
@@ -769,8 +754,6 @@
   call(rax);
   movq(rax, prev_limit_reg);
   jmp(&leave_exit_frame);
-
-  return result;
 }
 
 
@@ -783,20 +766,11 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& ext, int result_size) {
-  // Set the entry point and jump to the C entry runtime stub.
-  LoadAddress(rbx, ext);
-  CEntryStub ces(result_size);
-  return TryTailCallStub(&ces);
-}
-
-
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
-  // Calls are not allowed in some stubs.
-  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -825,6 +799,64 @@
 }
 
 
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+static const Register saved_regs[] = {
+  REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
+  REG(r9), REG(r10), REG(r11)
+};
+
+#undef REG
+
+static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+                                     Register exclusion1,
+                                     Register exclusion2,
+                                     Register exclusion3) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  for (int i = 0; i < kNumberOfSavedRegs; i++) {
+    Register reg = saved_regs[i];
+    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+      push(reg);
+    }
+  }
+  // R12 to r15 are callee save on all platforms.
+  if (fp_mode == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movsd(Operand(rsp, i * kDoubleSize), reg);
+    }
+  }
+}
+
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
+                                    Register exclusion1,
+                                    Register exclusion2,
+                                    Register exclusion3) {
+  if (fp_mode == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movsd(reg, Operand(rsp, i * kDoubleSize));
+    }
+    addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+  }
+  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+    Register reg = saved_regs[i];
+    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+      pop(reg);
+    }
+  }
+}
+
+
 void MacroAssembler::Set(Register dst, int64_t x) {
   if (x == 0) {
     xorl(dst, dst);
@@ -2089,7 +2121,7 @@
   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
 
-  // Check that both are flat ascii strings.
+  // Check that both are flat ASCII strings.
   ASSERT(kNotStringTag != 0);
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
@@ -2135,7 +2167,7 @@
   movq(scratch1, first_object_instance_type);
   movq(scratch2, second_object_instance_type);
 
-  // Check that both are flat ascii strings.
+  // Check that both are flat ASCII strings.
   ASSERT(kNotStringTag != 0);
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
@@ -2213,6 +2245,43 @@
 }
 
 
+void MacroAssembler::LoadHeapObject(Register result,
+                                    Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+    movq(result, Operand(result, 0));
+  } else {
+    Move(result, object);
+  }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+    movq(kScratchRegister, Operand(kScratchRegister, 0));
+    push(kScratchRegister);
+  } else {
+    Push(object);
+  }
+}
+
+
+void MacroAssembler::LoadGlobalCell(Register dst,
+                                    Handle<JSGlobalPropertyCell> cell) {
+  if (dst.is(rax)) {
+    load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
+  } else {
+    movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+    movq(dst, Operand(dst, 0));
+  }
+}
+
+
 void MacroAssembler::Push(Smi* source) {
   intptr_t smi = reinterpret_cast<intptr_t>(source);
   if (is_int32(smi)) {
@@ -2236,6 +2305,13 @@
 }
 
 
+void MacroAssembler::TestBit(const Operand& src, int bits) {
+  int byte_offset = bits / kBitsPerByte;
+  int bit_in_byte = bits & (kBitsPerByte - 1);
+  testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
+}
+
+
 void MacroAssembler::Jump(ExternalReference ext) {
   LoadAddress(kScratchRegister, ext);
   jmp(kScratchRegister);
@@ -2349,7 +2425,8 @@
 
 // Order general registers are pushed by Pushad:
 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
+const int
+MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
     0,
     1,
     2,
@@ -2384,146 +2461,146 @@
 }
 
 
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
-                                    HandlerType type) {
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+                                    int handler_index) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // The pc (return address) is already on TOS.  This code pushes state,
-  // frame pointer, context, and current handler.
-  if (try_location == IN_JAVASCRIPT) {
-    if (type == TRY_CATCH_HANDLER) {
-      push(Immediate(StackHandler::TRY_CATCH));
-    } else {
-      push(Immediate(StackHandler::TRY_FINALLY));
-    }
-    push(rbp);
-    push(rsi);
-  } else {
-    ASSERT(try_location == IN_JS_ENTRY);
-    // The frame pointer does not point to a JS frame so we save NULL
-    // for rbp. We expect the code throwing an exception to check rbp
-    // before dereferencing it to restore the context.
-    push(Immediate(StackHandler::ENTRY));
+  // We will build up the handler from the bottom by pushing on the stack.
+  // First push the frame pointer and context.
+  if (kind == StackHandler::JS_ENTRY) {
+    // The frame pointer does not point to a JS frame so we save NULL for
+    // rbp. We expect the code throwing an exception to check rbp before
+    // dereferencing it to restore the context.
     push(Immediate(0));  // NULL frame pointer.
     Push(Smi::FromInt(0));  // No context.
+  } else {
+    push(rbp);
+    push(rsi);
   }
-  // Save the current handler.
-  Operand handler_operand =
-      ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
-  push(handler_operand);
-  // Link this handler.
-  movq(handler_operand, rsp);
+
+  // Push the state and the code object.
+  unsigned state =
+      StackHandler::IndexField::encode(handler_index) |
+      StackHandler::KindField::encode(kind);
+  push(Immediate(state));
+  Push(CodeObject());
+
+  // Link the current handler as the next handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+  push(ExternalOperand(handler_address));
+  // Set this new handler as the current one.
+  movq(ExternalOperand(handler_address), rsp);
 }
 
 
 void MacroAssembler::PopTryHandler() {
-  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
-  // Unlink this handler.
-  Operand handler_operand =
-      ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
-  pop(handler_operand);
-  // Remove the remaining fields.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+  pop(ExternalOperand(handler_address));
   addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
 }
 
 
+void MacroAssembler::JumpToHandlerEntry() {
+  // Compute the handler entry address and jump to it.  The handler table is
+  // a fixed array of (smi-tagged) code offsets.
+  // rax = exception, rdi = code object, rdx = state.
+  movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
+  shr(rdx, Immediate(StackHandler::kKindWidth));
+  movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
+  SmiToInteger64(rdx, rdx);
+  lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+  jmp(rdi);
+}
+
+
 void MacroAssembler::Throw(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // Keep thrown value in rax.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in rax.
   if (!value.is(rax)) {
     movq(rax, value);
   }
-
+  // Drop the stack pointer to the top of the top handler.
   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
-  Operand handler_operand = ExternalOperand(handler_address);
-  movq(rsp, handler_operand);
-  // get next in chain
-  pop(handler_operand);
+  movq(rsp, ExternalOperand(handler_address));
+  // Restore the next handler.
+  pop(ExternalOperand(handler_address));
+
+  // Remove the code object and state, compute the handler address in rdi.
+  pop(rdi);  // Code object.
+  pop(rdx);  // Offset and state.
+
+  // Restore the context and frame pointer.
   pop(rsi);  // Context.
   pop(rbp);  // Frame pointer.
-  pop(rdx);  // State.
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (rdx == ENTRY) == (rbp == 0) == (rsi == 0), so we could test any
-  // of them.
+  // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
+  // rbp or rsi.
   Label skip;
-  cmpq(rdx, Immediate(StackHandler::ENTRY));
-  j(equal, &skip, Label::kNear);
+  testq(rsi, rsi);
+  j(zero, &skip, Label::kNear);
   movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
   bind(&skip);
 
-  ret(0);
+  JumpToHandlerEntry();
 }
 
 
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
-                                      Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // Keep thrown value in rax.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in rax.
   if (!value.is(rax)) {
     movq(rax, value);
   }
-  // Fetch top stack handler.
+  // Drop the stack pointer to the top of the top stack handler.
   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   Load(rsp, handler_address);
 
-  // Unwind the handlers until the ENTRY handler is found.
-  Label loop, done;
-  bind(&loop);
-  // Load the type of the current stack handler.
-  const int kStateOffset = StackHandlerConstants::kStateOffset;
-  cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
-  j(equal, &done, Label::kNear);
-  // Fetch the next handler in the list.
-  const int kNextOffset = StackHandlerConstants::kNextOffset;
-  movq(rsp, Operand(rsp, kNextOffset));
-  jmp(&loop);
-  bind(&done);
+  // Unwind the handlers until the top ENTRY handler is found.
+  Label fetch_next, check_kind;
+  jmp(&check_kind, Label::kNear);
+  bind(&fetch_next);
+  movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
 
-  // Set the top handler address to next handler past the current ENTRY handler.
-  Operand handler_operand = ExternalOperand(handler_address);
-  pop(handler_operand);
+  bind(&check_kind);
+  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+  testl(Operand(rsp, StackHandlerConstants::kStateOffset),
+        Immediate(StackHandler::KindField::kMask));
+  j(not_zero, &fetch_next);
 
-  if (type == OUT_OF_MEMORY) {
-    // Set external caught exception to false.
-    ExternalReference external_caught(
-        Isolate::kExternalCaughtExceptionAddress, isolate());
-    Set(rax, static_cast<int64_t>(false));
-    Store(external_caught, rax);
+  // Set the top handler address to next handler past the top ENTRY handler.
+  pop(ExternalOperand(handler_address));
 
-    // Set pending exception and rax to out of memory exception.
-    ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
-                                        isolate());
-    movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
-    Store(pending_exception, rax);
-  }
+  // Remove the code object and state, compute the handler address in rdi.
+  pop(rdi);  // Code object.
+  pop(rdx);  // Offset and state.
 
-  // Discard the context saved in the handler and clear the context pointer.
-  pop(rdx);
-  Set(rsi, 0);
+  // Clear the context pointer and frame pointer (0 was saved in the handler).
+  pop(rsi);
+  pop(rbp);
 
-  pop(rbp);  // Restore frame pointer.
-  pop(rdx);  // Discard state.
-
-  ret(0);
+  JumpToHandlerEntry();
 }
 
 
@@ -2567,22 +2644,133 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Label* fail,
                                        Label::Distance distance) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
        Immediate(Map::kMaximumBitField2FastElementValue));
   j(above, fail, distance);
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Label* fail,
+                                             Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  j(below_equal, fail, distance);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Immediate(Map::kMaximumBitField2FastElementValue));
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Label* fail,
+                                              Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+    Register maybe_number,
+    Register elements,
+    Register index,
+    XMMRegister xmm_scratch,
+    Label* fail) {
+  Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
+
+  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+  CheckMap(maybe_number,
+           isolate()->factory()->heap_number_map(),
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Double value, canonicalize NaN.
+  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+  cmpl(FieldOperand(maybe_number, offset),
+       Immediate(kNaNOrInfinityLowerBoundUpper32));
+  j(greater_equal, &maybe_nan, Label::kNear);
+
+  bind(&not_nan);
+  movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+  bind(&have_double_value);
+  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+        xmm_scratch);
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  j(greater, &is_nan, Label::kNear);
+  cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+  j(zero, &not_nan);
+  bind(&is_nan);
+  // Convert all NaNs to the same canonical NaN value when they are stored in
+  // the double array.
+  Set(kScratchRegister, BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+  movq(xmm_scratch, kScratchRegister);
+  jmp(&have_double_value, Label::kNear);
+
+  bind(&smi_value);
+  // Value is a smi. convert to a double and store.
+  // Preserve original value.
+  SmiToInteger32(kScratchRegister, maybe_number);
+  cvtlsi2sd(xmm_scratch, kScratchRegister);
+  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+        xmm_scratch);
+  bind(&done);
+}
+
+
+void MacroAssembler::CompareMap(Register obj,
+                                Handle<Map> map,
+                                Label* early_success,
+                                CompareMapMode mode) {
+  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+    Map* transitioned_fast_element_map(
+        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+    ASSERT(transitioned_fast_element_map == NULL ||
+           map->elements_kind() != FAST_ELEMENTS);
+    if (transitioned_fast_element_map != NULL) {
+      j(equal, early_success, Label::kNear);
+      Cmp(FieldOperand(obj, HeapObject::kMapOffset),
+          Handle<Map>(transitioned_fast_element_map));
+    }
+
+    Map* transitioned_double_map(
+        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+    ASSERT(transitioned_double_map == NULL ||
+           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+    if (transitioned_double_map != NULL) {
+      j(equal, early_success, Label::kNear);
+      Cmp(FieldOperand(obj, HeapObject::kMapOffset),
+          Handle<Map>(transitioned_double_map));
+    }
+  }
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
-                              SmiCheckType smi_check_type) {
+                              SmiCheckType smi_check_type,
+                              CompareMapMode mode) {
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
-  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+
+  Label success;
+  CompareMap(obj, map, &success, mode);
   j(not_equal, fail);
+  bind(&success);
 }
 
 
@@ -2672,6 +2860,14 @@
 }
 
 
+void MacroAssembler::AbortIfNotZeroExtended(Register int32_register) {
+  ASSERT(!int32_register.is(kScratchRegister));
+  movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
+  cmpq(kScratchRegister, int32_register);
+  Assert(above_equal, "32 bit value in register is not zero-extended");
+}
+
+
 void MacroAssembler::AbortIfNotString(Register object) {
   testb(object, Immediate(kSmiTagMask));
   Assert(not_equal, "Operand is not a string");
@@ -2707,7 +2903,8 @@
 
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
-                                             Label* miss) {
+                                             Label* miss,
+                                             bool miss_on_bound_function) {
   // Check that the receiver isn't a smi.
   testl(function, Immediate(kSmiTagMask));
   j(zero, miss);
@@ -2716,6 +2913,17 @@
   CmpObjectType(function, JS_FUNCTION_TYPE, result);
   j(not_equal, miss);
 
+  if (miss_on_bound_function) {
+    movq(kScratchRegister,
+         FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
+    // field).
+    TestBit(FieldOperand(kScratchRegister,
+                         SharedFunctionInfo::kCompilerHintsOffset),
+            SharedFunctionInfo::kBoundFunction);
+    j(not_zero, miss);
+  }
+
   // Make sure that the function has an instance prototype.
   Label non_instance;
   testb(FieldOperand(result, Map::kBitFieldOffset),
@@ -2787,10 +2995,10 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
-  ASSERT(allow_stub_calls());
   Set(rax, 0);  // No arguments.
   LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
   CEntryStub ces(1);
+  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 #endif  // ENABLE_DEBUGGER_SUPPORT
@@ -2816,27 +3024,34 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
+  bool definitely_mismatches = false;
   InvokePrologue(expected,
                  actual,
                  Handle<Code>::null(),
                  code,
                  &done,
+                 &definitely_mismatches,
                  flag,
                  Label::kNear,
                  call_wrapper,
                  call_kind);
-  if (flag == CALL_FUNCTION) {
-    call_wrapper.BeforeCall(CallSize(code));
-    SetCallKind(rcx, call_kind);
-    call(code);
-    call_wrapper.AfterCall();
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(rcx, call_kind);
-    jmp(code);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      SetCallKind(rcx, call_kind);
+      call(code);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(rcx, call_kind);
+      jmp(code);
+    }
+    bind(&done);
   }
-  bind(&done);
 }
 
 
@@ -2847,28 +3062,35 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
+  bool definitely_mismatches = false;
   Register dummy = rax;
   InvokePrologue(expected,
                  actual,
                  code,
                  dummy,
                  &done,
+                 &definitely_mismatches,
                  flag,
                  Label::kNear,
                  call_wrapper,
                  call_kind);
-  if (flag == CALL_FUNCTION) {
-    call_wrapper.BeforeCall(CallSize(code));
-    SetCallKind(rcx, call_kind);
-    Call(code, rmode);
-    call_wrapper.AfterCall();
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(rcx, call_kind);
-    Jump(code, rmode);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      SetCallKind(rcx, call_kind);
+      Call(code, rmode);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(rcx, call_kind);
+      Jump(code, rmode);
+    }
+    bind(&done);
   }
-  bind(&done);
 }
 
 
@@ -2877,6 +3099,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   ASSERT(function.is(rdi));
   movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -2891,34 +3116,24 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  ASSERT(function->is_compiled());
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   // Get the function and setup the context.
-  Move(rdi, Handle<JSFunction>(function));
+  LoadHeapObject(rdi, function);
   movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  if (V8::UseCrankshaft()) {
-    // Since Crankshaft can recompile a function, we need to load
-    // the Code object every time we call the function.
-    movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-    ParameterCount expected(function->shared()->formal_parameter_count());
-    InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
-  } else {
-    // Invoke the cached code.
-    Handle<Code> code(function->code());
-    ParameterCount expected(function->shared()->formal_parameter_count());
-    InvokeCode(code,
-               expected,
-               actual,
-               RelocInfo::CODE_TARGET,
-               flag,
-               call_wrapper,
-               call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
@@ -2927,11 +3142,13 @@
                                     Handle<Code> code_constant,
                                     Register code_register,
                                     Label* done,
+                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     Label::Distance near_jump,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
+  *definitely_mismatches = false;
   Label invoke;
   if (expected.is_immediate()) {
     ASSERT(actual.is_immediate());
@@ -2947,6 +3164,7 @@
         // arguments.
         definitely_matches = true;
       } else {
+        *definitely_mismatches = true;
         Set(rbx, expected.immediate());
       }
     }
@@ -2983,7 +3201,9 @@
       SetCallKind(rcx, call_kind);
       Call(adaptor, RelocInfo::CODE_TARGET);
       call_wrapper.AfterCall();
-      jmp(done, near_jump);
+      if (!*definitely_mismatches) {
+        jmp(done, near_jump);
+      }
     } else {
       SetCallKind(rcx, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -3022,7 +3242,7 @@
 
 
 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
-  // Setup the frame structure on the stack.
+  // Set up the frame structure on the stack.
   // All constants are relative to the frame pointer of the exit frame.
   ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -3082,7 +3302,7 @@
 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
   EnterExitFramePrologue(true);
 
-  // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
+  // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
   // so it must be retained across the C-call.
   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
   lea(r15, Operand(rbp, r14, times_pointer_size, offset));
@@ -3616,7 +3836,7 @@
     subq(scratch1, Immediate(kHeaderAlignment));
   }
 
-  // Allocate ascii string in new space.
+  // Allocate ASCII string in new space.
   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
                      times_1,
                      scratch1,
@@ -3772,6 +3992,20 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  jmp(&entry);
+  bind(&loop);
+  movq(Operand(start_offset, 0), filler);
+  addq(start_offset, Immediate(kPointerSize));
+  bind(&entry);
+  cmpq(start_offset, end_offset);
+  j(less, &loop);
+}
+
+
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
@@ -3797,6 +4031,46 @@
   }
 }
 
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+    ElementsKind expected_kind,
+    ElementsKind transitioned_kind,
+    Register map_in_out,
+    Register scratch,
+    Label* no_map_match) {
+  // Load the global or builtins object from the current context.
+  movq(scratch, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+  // Check that the function's map is the same as the expected cached map.
+  int expected_index =
+      Context::GetContextMapIndexFromElementsKind(expected_kind);
+  cmpq(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
+  j(not_equal, no_map_match);
+
+  // Use the transitioned cached map.
+  int trans_index =
+      Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+  movq(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(
+    Register function_in, Register scratch, Register map_out) {
+  ASSERT(!function_in.is(map_out));
+  Label done;
+  movq(map_out, FieldOperand(function_in,
+                             JSFunction::kPrototypeOrInitialMapOffset));
+  if (!FLAG_smi_only_arrays) {
+    LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                        FAST_ELEMENTS,
+                                        map_out,
+                                        scratch,
+                                        &done);
+  }
+  bind(&done);
+}
+
 #ifdef _WIN64
 static const int kRegisterPassedArguments = 4;
 #else
@@ -3871,6 +4145,7 @@
 
 
 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+  ASSERT(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
@@ -3885,6 +4160,17 @@
 }
 
 
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
@@ -3905,6 +4191,241 @@
   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
+
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met,
+    Label::Distance condition_met_distance) {
+  ASSERT(cc == zero || cc == not_zero);
+  if (scratch.is(object)) {
+    and_(scratch, Immediate(~Page::kPageAlignmentMask));
+  } else {
+    movq(scratch, Immediate(~Page::kPageAlignmentMask));
+    and_(scratch, object);
+  }
+  if (mask < (1 << kBitsPerByte)) {
+    testb(Operand(scratch, MemoryChunk::kFlagsOffset),
+          Immediate(static_cast<uint8_t>(mask)));
+  } else {
+    testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+  }
+  j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register bitmap_scratch,
+                                 Register mask_scratch,
+                                 Label* on_black,
+                                 Label::Distance on_black_distance) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  // The mask_scratch register contains a 1 at the position of the first bit
+  // and a 0 at all other positions, including the position of the second bit.
+  movq(rcx, mask_scratch);
+  // Make rcx into a mask that covers both marking bits using the operation
+  // rcx = mask | (mask << 1).
+  lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+  // Note that we are using a 4-byte aligned 8-byte load.
+  and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  cmpq(mask_scratch, rcx);
+  j(equal, on_black, on_black_distance);
+}
+
+
+// Detect some, but not all, common pointer-free objects.  This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(
+    Register value,
+    Register scratch,
+    Label* not_data_object,
+    Label::Distance not_data_object_distance) {
+  Label is_data_object;
+  movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+  j(equal, &is_data_object, Label::kNear);
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+        Immediate(kIsIndirectStringMask | kIsNotStringMask));
+  j(not_zero, not_data_object, not_data_object_distance);
+  bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+  movq(bitmap_reg, addr_reg);
+  // Sign extended 32 bit immediate.
+  and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+  movq(rcx, addr_reg);
+  int shift =
+      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+  shrl(rcx, Immediate(shift));
+  and_(rcx,
+       Immediate((Page::kPageAlignmentMask >> shift) &
+                 ~(Bitmap::kBytesPerCell - 1)));
+
+  addq(bitmap_reg, rcx);
+  movq(rcx, addr_reg);
+  shrl(rcx, Immediate(kPointerSizeLog2));
+  and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+  movl(mask_reg, Immediate(1));
+  shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Label* value_is_white_and_not_data,
+    Label::Distance distance) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+  j(not_zero, &done, Label::kNear);
+
+  if (FLAG_debug_code) {
+    // Check for impossible bit pattern.
+    Label ok;
+    push(mask_scratch);
+    // shl.  May overflow making the check conservative.
+    addq(mask_scratch, mask_scratch);
+    testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+    pop(mask_scratch);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = rcx;  // Holds map while checking type.
+  Register length = rcx;  // Holds length of object after checking type.
+  Label not_heap_number;
+  Label is_data_object;
+
+  // Check for heap-number
+  movq(map, FieldOperand(value, HeapObject::kMapOffset));
+  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+  j(not_equal, &not_heap_number, Label::kNear);
+  movq(length, Immediate(HeapNumber::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_heap_number);
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = rcx;
+  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+  testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
+  j(not_zero, value_is_white_and_not_data);
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  Label not_external;
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  testb(instance_type, Immediate(kExternalStringTag));
+  j(zero, &not_external, Label::kNear);
+  movq(length, Immediate(ExternalString::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_external);
+  // Sequential string, either ASCII or UC16.
+  ASSERT(kAsciiStringTag == 0x04);
+  and_(length, Immediate(kStringEncodingMask));
+  xor_(length, Immediate(kStringEncodingMask));
+  addq(length, Immediate(0x04));
+  // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
+  imul(length, FieldOperand(value, String::kLengthOffset));
+  shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+  addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+  and_(length, Immediate(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+  addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
+
+  bind(&done);
+}
+
+
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+  Label next;
+  Register empty_fixed_array_value = r8;
+  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Register empty_descriptor_array_value = r9;
+  LoadRoot(empty_descriptor_array_value,
+              Heap::kEmptyDescriptorArrayRootIndex);
+  movq(rcx, rax);
+  bind(&next);
+
+  // Check that there are no elements.  Register rcx contains the
+  // current JS object we've reached through the prototype chain.
+  cmpq(empty_fixed_array_value,
+       FieldOperand(rcx, JSObject::kElementsOffset));
+  j(not_equal, call_runtime);
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in rbx for the subsequent
+  // prototype load.
+  movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+  movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
+  JumpIfSmi(rdx, call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (rdx).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
+  JumpIfSmi(rdx, call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  cmpq(rcx, rax);
+  j(equal, &check_prototype, Label::kNear);
+  movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  cmpq(rdx, empty_fixed_array_value);
+  j(not_equal, call_runtime);
+
+  // Load the prototype from the map and loop if non-null.
+  bind(&check_prototype);
+  movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+  cmpq(rcx, null_value);
+  j(not_equal, &next);
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index ff6edc5..6bb5cfe 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_X64_MACRO_ASSEMBLER_X64_H_
 
 #include "assembler.h"
+#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -49,18 +50,23 @@
 // Default scratch register used by MacroAssembler (and other code that needs
 // a spare register). The register isn't callee save, and not used by the
 // function calling convention.
-static const Register kScratchRegister = { 10 };      // r10.
-static const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
-static const Register kRootRegister = { 13 };         // r13 (callee save).
+const Register kScratchRegister = { 10 };      // r10.
+const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
+const Register kRootRegister = { 13 };         // r13 (callee save).
 // Value of smi in kSmiConstantRegister.
-static const int kSmiConstantRegisterValue = 1;
+const int kSmiConstantRegisterValue = 1;
 // Actual value of root register is offset from the root array's start
 // to take advantage of negitive 8-bit displacement values.
-static const int kRootRegisterBias = 128;
+const int kRootRegisterBias = 128;
 
 // Convenience for platform-independent signatures.
 typedef Operand MemOperand;
 
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
 // Forward declaration.
 class JumpTarget;
 
@@ -72,6 +78,7 @@
   ScaleFactor scale;
 };
 
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -134,56 +141,145 @@
   void CompareRoot(const Operand& with, Heap::RootListIndex index);
   void PushRoot(Heap::RootListIndex index);
 
-  // ---------------------------------------------------------------------------
-  // GC Support
+  // These functions do not arrange the registers in any particular order so
+  // they are not useful for calls that can cause a GC.  The caller can
+  // exclude up to 3 registers that do not need to be saved and restored.
+  void PushCallerSaved(SaveFPRegsMode fp_mode,
+                       Register exclusion1 = no_reg,
+                       Register exclusion2 = no_reg,
+                       Register exclusion3 = no_reg);
+  void PopCallerSaved(SaveFPRegsMode fp_mode,
+                      Register exclusion1 = no_reg,
+                      Register exclusion2 = no_reg,
+                      Register exclusion3 = no_reg);
 
-  // For page containing |object| mark region covering |addr| dirty.
-  // RecordWriteHelper only works if the object is not in new
-  // space.
-  void RecordWriteHelper(Register object,
-                         Register addr,
-                         Register scratch);
+// ---------------------------------------------------------------------------
+// GC Support
 
-  // Check if object is in new space. The condition cc can be equal or
-  // not_equal. If it is equal a jump will be done if the object is on new
-  // space. The register scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,
-                  Label* branch,
-                  Label::Distance near_jump = Label::kFar);
 
-  // For page containing |object| mark region covering [object+offset]
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
+
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
+
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met,
+                     Label::Distance condition_met_distance = Label::kFar);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch,
+                           Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, not_equal, branch, distance);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch,
+                        Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, equal, branch, distance);
+  }
+
+  // Check if an object has the black incremental marking color.  Also uses rcx!
+  void JumpIfBlack(Register object,
+                   Register scratch0,
+                   Register scratch1,
+                   Label* on_black,
+                   Label::Distance on_black_distance = Label::kFar);
+
+  // Detects conservatively whether an object is data-only, i.e. it does need to
+  // be scanned by the garbage collector.
+  void JumpIfDataObject(Register value,
+                        Register scratch,
+                        Label* not_data_object,
+                        Label::Distance not_data_object_distance);
+
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Label* object_is_white_and_not_data,
+                      Label::Distance distance);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // Operand(reg, off).
+  void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // Notify the garbage collector that we wrote a pointer into a fixed array.
+  // |array| is the array being stored into, |value| is the
+  // object being stored.  |index| is the array index represented as a non-smi.
+  // All registers are clobbered by the operation RecordWriteArray
+  // filters out smis so it does not update the write barrier if the
+  // value is a smi.
+  void RecordWriteArray(
+      Register array,
+      Register value,
+      Register index,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // For page containing |object| mark region covering |address|
   // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. If |offset| is zero, then the |scratch|
-  // register contains the array index into the elements array
-  // represented as an untagged 32-bit integer. All registers are
-  // clobbered by the operation. RecordWrite filters out smis so it
-  // does not update the write barrier if the value is a smi.
-  void RecordWrite(Register object,
-                   int offset,
-                   Register value,
-                   Register scratch);
-
-  // For page containing |object| mark region covering [address]
-  // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. All registers are clobbered by the
+  // object being stored. The address and value registers are clobbered by the
   // operation.  RecordWrite filters out smis so it does not update
   // the write barrier if the value is a smi.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register value);
-
-  // For page containing |object| mark region covering [object+offset] dirty.
-  // The value is known to not be a smi.
-  // object is the object being stored into, value is the object being stored.
-  // If offset is zero, then the scratch register contains the array index into
-  // the elements array represented as an untagged 32-bit integer.
-  // All registers are clobbered by the operation.
-  void RecordWriteNonSmi(Register object,
-                         int offset,
-                         Register value,
-                         Register scratch);
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
@@ -192,15 +288,6 @@
   void DebugBreak();
 #endif
 
-  // ---------------------------------------------------------------------------
-  // Activation frames
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter specific kind of exit frame; either in normal or
   // debug mode. Expects the number of arguments in register rax and
   // sets up the number of arguments in register rdi and the pointer
@@ -232,16 +319,16 @@
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
   void InitializeRootRegister() {
-    ExternalReference roots_address =
-        ExternalReference::roots_address(isolate());
-    movq(kRootRegister, roots_address);
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(isolate());
+    movq(kRootRegister, roots_array_start);
     addq(kRootRegister, Immediate(kRootRegisterBias));
   }
 
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
-  // Setup call kind marking in rcx. The method takes rcx as an
+  // Set up call kind marking in rcx. The method takes rcx as an
   // explicit first parameter to make the code more readable at the
   // call sites.
   void SetCallKind(Register dst, CallKind kind);
@@ -270,7 +357,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
@@ -639,6 +726,7 @@
   void Push(Smi* smi);
   void Test(const Operand& dst, Smi* source);
 
+
   // ---------------------------------------------------------------------------
   // String macros.
 
@@ -657,7 +745,7 @@
       Label* on_not_both_flat_ascii,
       Label::Distance near_jump = Label::kFar);
 
-  // Check whether the instance type represents a flat ascii string. Jump to the
+  // Check whether the instance type represents a flat ASCII string. Jump to the
   // label if not. If the instance type can be scratched specify same register
   // for both instance type and scratch.
   void JumpIfInstanceTypeIsNotSequentialAscii(
@@ -684,6 +772,9 @@
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
+  // Bit-field support.
+  void TestBit(const Operand& dst, int bit_index);
+
   // Handle support
   void Move(Register dst, Handle<Object> source);
   void Move(const Operand& dst, Handle<Object> source);
@@ -693,6 +784,22 @@
   void Cmp(const Operand& dst, Smi* src);
   void Push(Handle<Object> source);
 
+  // Load a heap object and handle the case of new-space objects by
+  // indirecting via a global cell.
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
+  void PushHeapObject(Handle<HeapObject> object);
+
+  void LoadObject(Register result, Handle<Object> object) {
+    if (object->IsHeapObject()) {
+      LoadHeapObject(result, Handle<HeapObject>::cast(object));
+    } else {
+      Move(result, object);
+    }
+  }
+
+  // Load a global cell into a register.
+  void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the rsp register.
   void Drop(int stack_elements);
@@ -760,13 +867,46 @@
                          Label* fail,
                          Label::Distance distance = Label::kFar);
 
-  // Check if the map of an object is equal to a specified map and
-  // branch to label if not. Skip the smi check if not required
-  // (object is known to be a heap object)
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Label* fail,
+                               Label::Distance distance = Label::kFar);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Label* fail,
+                                Label::Distance distance = Label::kFar);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by index in
+  // the FastDoubleElements array elements, otherwise jump to fail.  Note that
+  // index must not be smi-tagged.
+  void StoreNumberToDoubleElements(Register maybe_number,
+                                   Register elements,
+                                   Register index,
+                                   XMMRegister xmm_scratch,
+                                   Label* fail);
+
+  // Compare an object's map with the specified map and its transitioned
+  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
+  // result of map compare. If multiple map compares are required, the compare
+  // sequences branches to early_success.
+  void CompareMap(Register obj,
+                  Handle<Map> map,
+                  Label* early_success,
+                  CompareMapMode mode = REQUIRE_EXACT_MAP);
+
+  // Check if the map of an object is equal to a specified map and branch to
+  // label if not. Skip the smi check if not required (object is known to be a
+  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+  // against maps that are ElementsKind transition maps of the specified map.
   void CheckMap(Register obj,
                 Handle<Map> map,
                 Label* fail,
-                SmiCheckType smi_check_type);
+                SmiCheckType smi_check_type,
+                CompareMapMode mode = REQUIRE_EXACT_MAP);
 
   // Check if the map of an object is equal to a specified map and branch to a
   // specified target if equal. Skip the smi check if not required (object is
@@ -809,6 +949,10 @@
   void AbortIfNotSmi(Register object);
   void AbortIfNotSmi(const Operand& object);
 
+  // Abort execution if a 64 bit register containing a 32 bit payload does not
+  // have zeros in the top 32 bits.
+  void AbortIfNotZeroExtended(Register reg);
+
   // Abort execution if argument is a string. Used in debug code.
   void AbortIfNotString(Register object);
 
@@ -820,9 +964,8 @@
   // ---------------------------------------------------------------------------
   // Exception handling
 
-  // Push a new try handler and link into try handler chain.  The return
-  // address must be pushed before calling this helper.
-  void PushTryHandler(CodeLocation try_location, HandlerType type);
+  // Push a new try handler and link it into try handler chain.
+  void PushTryHandler(StackHandler::Kind kind, int handler_index);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   void PopTryHandler();
@@ -832,7 +975,7 @@
   void Throw(Register value);
 
   // Propagate an uncatchable exception out of the current JS stack.
-  void ThrowUncatchable(UncatchableExceptionType type, Register value);
+  void ThrowUncatchable(Register value);
 
   // ---------------------------------------------------------------------------
   // Inline caching support
@@ -966,7 +1109,8 @@
   // clobbered.
   void TryGetFunctionPrototype(Register function,
                                Register result,
-                               Label* miss);
+                               Label* miss,
+                               bool miss_on_bound_function = false);
 
   // Generates code for reporting that an illegal operation has
   // occurred.
@@ -981,6 +1125,22 @@
   // Find the function context up the context chain.
   void LoadContext(Register dst, int context_chain_length);
 
+  // Conditionally load the cached Array transitioned map of type
+  // transitioned_kind from the global context if the map in register
+  // map_in_out is the cached Array map in the global context of
+  // expected_kind.
+  void LoadTransitionedArrayMapConditional(
+      ElementsKind expected_kind,
+      ElementsKind transitioned_kind,
+      Register map_in_out,
+      Register scratch,
+      Label* no_map_match);
+
+  // Load the initial map for new Arrays from a JSFunction.
+  void LoadInitialArrayMap(Register function_in,
+                           Register scratch,
+                           Register map_out);
+
   // Load the global function with the given index.
   void LoadGlobalFunction(int index, Register function);
 
@@ -994,19 +1154,9 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -1016,19 +1166,9 @@
   // Call a runtime function and save the value of XMM registers.
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
-  // Call a runtime function, returning the CodeStub object called.
-  // Try to generate the stub code if necessary.  Do not perform a GC
-  // but instead return a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
-                                              int num_arguments);
-
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
-  // Convenience function: Same as above, but takes the fid instead.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
-                                              int num_arguments);
-
   // Convenience function: call an external reference.
   void CallExternalReference(const ExternalReference& ext,
                              int num_arguments);
@@ -1040,38 +1180,26 @@
                                  int num_arguments,
                                  int result_size);
 
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
-  MUST_USE_RESULT  MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
-                                                   int num_arguments,
-                                                   int result_size);
-
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& ext, int result_size);
 
-  // Jump to a runtime routine.
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext,
-                                          int result_size);
-
-  // Prepares stack to put arguments (aligns and so on).
-  // WIN64 calling convention requires to put the pointer to the return value
-  // slot into rcx (rcx must be preserverd until TryCallApiFunctionAndReturn).
-  // Saves context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
+  // Prepares stack to put arguments (aligns and so on).  WIN64 calling
+  // convention requires to put the pointer to the return value slot into
+  // rcx (rcx must be preserverd until CallApiFunctionAndReturn).  Saves
+  // context (rsi).  Clobbers rax.  Allocates arg_stack_space * kPointerSize
   // inside the exit frame (not GCed) accessible via StackSpaceOperand.
   void PrepareCallApiFunction(int arg_stack_space);
 
-  // Calls an API function. Allocates HandleScope, extracts
-  // returned value from handle and propagates exceptions.
-  // Clobbers r14, r15, rbx and caller-save registers. Restores context.
-  // On return removes stack_space * kPointerSize (GCed).
-  MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
-      ApiFunction* function, int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Clobbers r14, r15, rbx and
+  // caller-save registers.  Restores context.  On return removes
+  // stack_space * kPointerSize (GCed).
+  void CallApiFunctionAndReturn(Address function_address, int stack_space);
 
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -1120,6 +1248,13 @@
                  int min_length = 0,
                  Register scratch = kScratchRegister);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
 
   // ---------------------------------------------------------------------------
   // StatsCounter support
@@ -1152,20 +1287,33 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   static int SafepointRegisterStackIndex(Register reg) {
     return SafepointRegisterStackIndex(reg.code());
   }
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
+  // Expects object in rax and returns map with validated enum cache
+  // in rax.  Assumes that any other register can be used as a scratch.
+  void CheckEnumCache(Register null_value,
+                      Label* call_runtime);
+
  private:
   // Order general registers are pushed by Pushad.
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-  static int kSafepointPushRegisterIndices[Register::kNumRegisters];
+  static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
   static const int kNumSafepointSavedRegisters = 11;
   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
 
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   bool root_array_available_;
 
   // Returns a register holding the smi value. The register MUST NOT be
@@ -1184,15 +1332,12 @@
                       Handle<Code> code_constant,
                       Register code_register,
                       Label* done,
+                      bool* definitely_mismatches,
                       InvokeFlag flag,
                       Label::Distance near_jump = Label::kFar,
                       const CallWrapper& call_wrapper = NullCallWrapper(),
                       CallKind call_kind = CALL_AS_METHOD);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void EnterExitFramePrologue(bool save_rax);
 
   // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
@@ -1219,6 +1364,24 @@
                                Register scratch,
                                bool gc_allowed);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,
+                  Label* branch,
+                  Label::Distance distance = Label::kFar);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Uses rcx as scratch and leaves addr_reg
+  // unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
+
+  // Helper for throwing exceptions.  Compute a handler address and jump to
+  // it.  See the implementation for register usage.
+  void JumpToHandlerEntry();
 
   // Compute memory operands for safepoint stack slots.
   Operand SafepointRegisterSlot(Register reg);
@@ -1256,32 +1419,32 @@
 // Static helper functions.
 
 // Generate an Operand for loading a field from an object.
-static inline Operand FieldOperand(Register object, int offset) {
+inline Operand FieldOperand(Register object, int offset) {
   return Operand(object, offset - kHeapObjectTag);
 }
 
 
 // Generate an Operand for loading an indexed field from an object.
-static inline Operand FieldOperand(Register object,
-                                   Register index,
-                                   ScaleFactor scale,
-                                   int offset) {
+inline Operand FieldOperand(Register object,
+                            Register index,
+                            ScaleFactor scale,
+                            int offset) {
   return Operand(object, index, scale, offset - kHeapObjectTag);
 }
 
 
-static inline Operand ContextOperand(Register context, int index) {
+inline Operand ContextOperand(Register context, int index) {
   return Operand(context, Context::SlotOffset(index));
 }
 
 
-static inline Operand GlobalObjectOperand() {
+inline Operand GlobalObjectOperand() {
   return ContextOperand(rsi, Context::GLOBAL_INDEX);
 }
 
 
 // Provides access to exit frame stack space (not GCed).
-static inline Operand StackSpaceOperand(int index) {
+inline Operand StackSpaceOperand(int index) {
 #ifdef _WIN64
   const int kShaddowSpace = 4;
   return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index a782bd7..837c254 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -193,7 +193,7 @@
 void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
   Label not_at_start;
   // Did we start the match at the start of the string at all?
-  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+  __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
   BranchOrBacktrack(not_equal, &not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -205,7 +205,7 @@
 
 void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
   // Did we start the match at the start of the string at all?
-  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+  __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
   BranchOrBacktrack(not_equal, on_not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -226,7 +226,7 @@
                                               bool check_end_of_string) {
 #ifdef DEBUG
   // If input is ASCII, don't even bother calling here if the string to
-  // match contains a non-ascii character.
+  // match contains a non-ASCII character.
   if (mode_ == ASCII) {
     ASSERT(String::IsAscii(str.start(), str.length()));
   }
@@ -431,9 +431,14 @@
     // Isolate.
     __ LoadAddress(rcx, ExternalReference::isolate_address());
 #endif
-    ExternalReference compare =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
-    __ CallCFunction(compare, num_arguments);
+
+    { // NOLINT: Can't find a way to open this scope without confusing the
+      // linter.
+      AllowExternalCallThatCantCauseGC scope(&masm_);
+      ExternalReference compare =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
+      __ CallCFunction(compare, num_arguments);
+    }
 
     // Restore original values before reacting on result value.
     __ Move(code_object_pointer(), masm_.CodeObject());
@@ -559,7 +564,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUC16CharCode);
+  ASSERT(minus < String::kMaxUtf16CodeUnit);
   __ lea(rax, Operand(current_character(), -minus));
   __ and_(rax, Immediate(mask));
   __ cmpl(rax, Immediate(c));
@@ -706,7 +711,12 @@
   // registers we need.
   // Entry code:
   __ bind(&entry_label_);
-  // Start new stack frame.
+
+  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
+  // is generated.
+  FrameScope scope(&masm_, StackFrame::MANUAL);
+
+  // Actually emit code to start a new stack frame.
   __ push(rbp);
   __ movq(rbp, rsp);
   // Save parameters and callee-save registers. Order here should correspond
@@ -1182,7 +1192,7 @@
   ASSERT(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
-  MaybeObject* result = Execution::HandleStackGuardInterrupt();
+  MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
 
   if (*code_handle != re_code) {  // Return address no longer valid
     intptr_t delta = code_handle->address() - re_code->address();
@@ -1238,6 +1248,11 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+    // Subject string might have been a ConsString that underwent
+    // short-circuiting during GC. That will not change start_address but
+    // will change pointer inside the subject handle.
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 76d2555..f07f6b6 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,32 +43,61 @@
                        MacroAssembler* masm,
                        Code::Flags flags,
                        StubCache::Table table,
+                       Register receiver,
                        Register name,
+                       // The offset is scaled by 4, based on
+                       // kHeapObjectTagSize, which is two bits
                        Register offset) {
-  ASSERT_EQ(8, kPointerSize);
-  ASSERT_EQ(16, sizeof(StubCache::Entry));
+  // We need to scale up the pointer by 2 because the offset is scaled by less
+  // than the pointer size.
+  ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
+  ScaleFactor scale_factor = times_2;
+
+  ASSERT_EQ(24, sizeof(StubCache::Entry));
   // The offset register holds the entry offset times four (due to masking
   // and shifting optimizations).
   ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
   Label miss;
 
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ lea(offset, Operand(offset, offset, times_2, 0));
+
   __ LoadAddress(kScratchRegister, key_offset);
+
   // Check that the key in the entry matches the name.
   // Multiply entry offset by 16 to get the entry address. Since the
   // offset register already holds the entry offset times four, multiply
   // by a further four.
-  __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
+  __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
   __ j(not_equal, &miss);
-  // Get the code entry from the cache.
-  // Use key_offset + kPointerSize, rather than loading value_offset.
+
+  // Get the map entry from the cache.
+  // Use key_offset + kPointerSize * 2, rather than loading map_offset.
   __ movq(kScratchRegister,
-          Operand(kScratchRegister, offset, times_4, kPointerSize));
+          Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
+  __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ j(not_equal, &miss);
+
+  // Get the code entry from the cache.
+  __ LoadAddress(kScratchRegister, value_offset);
+  __ movq(kScratchRegister,
+          Operand(kScratchRegister, offset, scale_factor, 0));
+
   // Check that the flags match what we're looking for.
   __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
   __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
   __ cmpl(offset, Immediate(flags));
   __ j(not_equal, &miss);
 
+#ifdef DEBUG
+    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+      __ jmp(&miss);
+    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+      __ jmp(&miss);
+    }
+#endif
+
   // Jump to the first instruction in the code stub.
   __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
   __ jmp(kScratchRegister);
@@ -82,13 +111,12 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss_label,
-    Register receiver,
-    String* name,
-    Register r0,
-    Register r1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                             Label* miss_label,
+                                             Register receiver,
+                                             Handle<String> name,
+                                             Register r0,
+                                             Register r1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1);
@@ -118,19 +146,14 @@
   __ j(not_equal, miss_label);
 
   Label done;
-  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
-      masm,
-      miss_label,
-      &done,
-      properties,
-      name,
-      r1);
-  if (result->IsFailure()) return result;
-
+  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     properties,
+                                                     name,
+                                                     r1);
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
-
-  return result;
 }
 
 
@@ -140,14 +163,16 @@
                               Register name,
                               Register scratch,
                               Register extra,
-                              Register extra2) {
+                              Register extra2,
+                              Register extra3) {
   Isolate* isolate = masm->isolate();
   Label miss;
   USE(extra);   // The register extra is not used on the X64 platform.
   USE(extra2);  // The register extra2 is not used on the X64 platform.
-  // Make sure that code is valid. The shifting code relies on the
-  // entry size being 16.
-  ASSERT(sizeof(Entry) == 16);
+  USE(extra3);  // The register extra2 is not used on the X64 platform.
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 24.
+  ASSERT(sizeof(Entry) == 24);
 
   // Make sure the flags do not name a specific type.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -159,6 +184,10 @@
   // Check scratch register is valid, extra and extra2 are unused.
   ASSERT(!scratch.is(no_reg));
   ASSERT(extra2.is(no_reg));
+  ASSERT(extra3.is(no_reg));
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
@@ -168,10 +197,12 @@
   // Use only the low 32 bits of the map pointer.
   __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
   __ xor_(scratch, Immediate(flags));
+  // We mask out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.  Also in the two 'and' instructions below.
   __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, kPrimary, name, scratch);
+  ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
 
   // Primary miss: Compute hash for secondary probe.
   __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -183,11 +214,12 @@
   __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, kSecondary, name, scratch);
+  ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
   __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
 }
 
 
@@ -211,7 +243,10 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ Move(prototype, isolate->global());
@@ -219,8 +254,8 @@
           prototype);
   __ j(not_equal, miss);
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(isolate->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ Move(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -312,8 +347,10 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst, Register src,
-                                            JSObject* holder, int index) {
+                                            Register dst,
+                                            Register src,
+                                            Handle<JSObject> holder,
+                                            int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -333,11 +370,11 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
-  __ Move(kScratchRegister, Handle<Object>(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  __ Move(kScratchRegister, interceptor);
   __ push(kScratchRegister);
   __ push(receiver);
   __ push(holder);
@@ -345,11 +382,12 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
@@ -403,9 +441,9 @@
 
 
 // Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
-                                        const CallOptimization& optimization,
-                                        int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+                                const CallOptimization& optimization,
+                                int argc) {
   // ----------- S t a t e -------------
   //  -- rsp[0]              : return address
   //  -- rsp[8]              : object passing the type check
@@ -420,29 +458,25 @@
   //  -- rsp[(argc + 4) * 8] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ Move(rdi, Handle<JSFunction>(function));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ LoadHeapObject(rdi, function);
   __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // Pass the additional arguments.
   __ movq(Operand(rsp, 2 * kPointerSize), rdi);
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ Move(rcx, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ Move(rcx, api_call_info);
     __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
     __ movq(Operand(rsp, 3 * kPointerSize), rbx);
   } else {
-    __ Move(Operand(rsp, 3 * kPointerSize), Handle<Object>(call_data));
+    __ Move(Operand(rsp, 3 * kPointerSize), call_data);
   }
 
   // Prepare arguments.
   __ lea(rbx, Operand(rsp, 3 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
 #ifdef _WIN64
   // Win64 uses first register--rcx--for returned value.
   Register arguments_arg = rdx;
@@ -465,12 +499,11 @@
 
   // v8::InvocationCallback's argument.
   __ lea(arguments_arg, StackSpaceOperand(0));
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
-  return masm->TryCallApiFunctionAndReturn(&fun,
-                                           argc + kFastApiCallArguments + 1);
+
+  // Function address is a foreign pointer outside V8's heap.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  __ CallApiFunctionAndReturn(function_address,
+                              argc + kFastApiCallArguments + 1);
 }
 
 
@@ -485,16 +518,16 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -502,45 +535,27 @@
     __ JumpIfSmi(receiver, miss);
 
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();  // Success.
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -549,16 +564,14 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 =
-          optimization.GetPrototypeDepthOfExpectedType(object,
-                                                       interceptor_holder);
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 =
-            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                         lookup->holder());
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
       }
-      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                             (depth2 != kInvalidProtoDepth);
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     Counters* counters = masm->isolate()->counters();
@@ -574,9 +587,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver,
-                                        interceptor_holder, scratch1,
-                                        scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -589,10 +602,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -603,10 +617,7 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result = GenerateFastApiCall(masm,
-                                                optimization,
-                                                arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
@@ -627,33 +638,27 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
     }
-
-    return masm->isolate()->heap()->undefined_value();  // Success.
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -662,27 +667,30 @@
 
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
-    __ push(holder);  // Save the holder.
-    __ push(name_);  // Save the name.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(holder);  // Save the holder.
+      __ push(name_);  // Save the name.
 
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
 
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+      // Leave the internal frame.
+    }
 
     __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
     __ j(not_equal, interceptor_succeeded);
@@ -697,43 +705,33 @@
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Code* code = NULL;
-  if (kind == Code::LOAD_IC) {
-    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
-  } else {
-    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
-  }
-
-  Handle<Code> ic(code);
-  __ Jump(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code = (kind == Code::LOAD_IC)
+      ? masm->isolate()->builtins()->LoadIC_Miss()
+      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
 void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
-  Code* code = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  Handle<Code> ic(code);
-  __ Jump(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
 // but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      JSObject* object,
+                                      Handle<JSObject> object,
                                       int index,
-                                      Map* transition,
+                                      Handle<Map> transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
                                       Label* miss_label) {
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver_reg, miss_label);
-
   // Check that the map of the object hasn't changed.
-  __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
-         Handle<Map>(object->map()));
-  __ j(not_equal, miss_label);
+  __ CheckMap(receiver_reg, Handle<Map>(object->map()),
+              miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -745,12 +743,12 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ pop(scratch);  // Return address.
     __ push(receiver_reg);
-    __ Push(Handle<Map>(transition));
+    __ Push(transition);
     __ push(rax);
     __ push(scratch);
     __ TailCallExternalReference(
@@ -761,11 +759,10 @@
     return;
   }
 
-  if (transition != NULL) {
+  if (!transition.is_null()) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
-    __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
-            Handle<Map>(transition));
+    __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset), transition);
   }
 
   // Adjust for the number of properties stored in the object. Even in the
@@ -781,7 +778,8 @@
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
     __ movq(name_reg, rax);
-    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+    __ RecordWriteField(
+        receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -792,7 +790,8 @@
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
     __ movq(name_reg, rax);
-    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+    __ RecordWriteField(
+        scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs);
   }
 
   // Return the value (register rax).
@@ -803,37 +802,53 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+                                      Handle<GlobalObject> global,
+                                      Handle<String> name,
+                                      Register scratch,
+                                      Label* miss) {
+  Handle<JSGlobalPropertyCell> cell =
+      GlobalObject::EnsurePropertyCell(global, name);
   ASSERT(cell->value()->IsTheHole());
-  __ Move(scratch, Handle<Object>(cell));
+  __ Move(scratch, cell);
   __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
          masm->isolate()->factory()->the_hole_value());
   __ j(not_equal, miss);
-  return cell;
 }
 
 
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+                                       Handle<JSObject> object,
+                                       Handle<JSObject> holder,
+                                       Handle<String> name,
+                                       Register scratch,
+                                       Label* miss) {
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
+    if (current->IsGlobalObject()) {
+      GenerateCheckPropertyCell(masm,
+                                Handle<GlobalObject>::cast(current),
+                                name,
+                                scratch,
+                                miss);
+    }
+    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
+  }
+}
+
 #undef __
 #define __ ACCESS_MASM((masm()))
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
                                        Register object_reg,
-                                       JSObject* holder,
+                                       Handle<JSObject> holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       String* name,
+                                       Handle<String> name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -853,80 +868,56 @@
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
-  JSObject* current = object;
-  while (current != holder) {
-    depth++;
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
+    ++depth;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        MaybeObject* lookup_result = heap()->LookupSymbol(name);
-        if (lookup_result->IsFailure()) {
-          set_failure(Failure::cast(lookup_result));
-          return reg;
-        } else {
-          name = String::cast(lookup_result->ToObjectUnchecked());
-        }
+        name = factory()->LookupSymbol(name);
       }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
+      ASSERT(current->property_dictionary()->FindEntry(*name) ==
              StringDictionary::kNotFound);
 
-      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
-                                                                      miss,
-                                                                      reg,
-                                                                      name,
-                                                                      scratch1,
-                                                                      scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+                                       scratch1, scratch2);
 
       __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // from now the object is in holder_reg
+      reg = holder_reg;  // From now on the object will be in holder_reg.
       __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      __ Cmp(scratch1, Handle<Map>(current->map()));
-      // Branch on the result of the map check.
-      __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
-        // Restore scratch register to be the map of the object.
-        // We load the prototype from the map in the scratch register.
+    } else {
+      bool in_new_space = heap()->InNewSpace(*prototype);
+      Handle<Map> current_map(current->map());
+      if (in_new_space) {
+        // Save the map in scratch1 for later.
         __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
       }
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+      __ CheckMap(reg, Handle<Map>(current_map),
+                  miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
-    } else {
-      // Check the map of the current object.
-      __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
-          Handle<Map>(current->map()));
-      // Branch on the result of the map check.
-      __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
       }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ Move(reg, Handle<JSObject>(prototype));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (in_new_space) {
+        // The prototype is in new space; we cannot store a reference to it
+        // in the code.  Load it from the map.
+        __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        // The prototype is in old space; load it directly.
+        __ Move(reg, prototype);
+      }
     }
 
     if (save_at_depth == depth) {
@@ -936,62 +927,46 @@
     // Go to the next object in the prototype chain.
     current = prototype;
   }
-
-  // Check the holder map.
-  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
-  __ j(not_equal, miss);
+  ASSERT(current.is_identical_to(holder));
 
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  // Perform security check for access to the global object and return
-  // the holder register.
-  ASSERT(current == holder);
+  // Check the holder map.
+  __ CheckMap(reg, Handle<Map>(holder->map()),
+              miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+
+  // Perform security check for access to the global object.
   ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
   if (current->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
   }
 
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-  current = object;
-  while (current != holder) {
-    if (current->IsGlobalObject()) {
-      MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                    GlobalObject::cast(current),
-                                                    name,
-                                                    scratch1,
-                                                    miss);
-      if (cell->IsFailure()) {
-        set_failure(Failure::cast(cell));
-        return reg;
-      }
-    }
-    current = JSObject::cast(current->GetPrototype());
-  }
+  // If we've skipped any global objects, it's not enough to verify that
+  // their maps haven't changed.  We also need to check that the property
+  // cell for the property is still empty.
+  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(JSObject* object,
-                                     JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+                                     Handle<JSObject> holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     String* name,
+                                     Handle<String> name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check the prototype chain.
-  Register reg =
-      CheckPrototypes(object, receiver, holder,
-                      scratch1, scratch2, scratch3, name, miss);
+  Register reg = CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Get the value from the properties.
   GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
@@ -999,25 +974,22 @@
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1,
-                      scratch2, scratch3, name, miss);
-
-  Handle<AccessorInfo> callback_handle(callback);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Insert additional parameters into the stack frame above return address.
   ASSERT(!scratch2.is(reg));
@@ -1025,11 +997,11 @@
 
   __ push(receiver);  // receiver
   __ push(reg);  // holder
-  if (heap()->InNewSpace(callback_handle->data())) {
-    __ Move(scratch1, callback_handle);
+  if (heap()->InNewSpace(callback->data())) {
+    __ Move(scratch1, callback);
     __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));  // data
   } else {
-    __ Push(Handle<Object>(callback_handle->data()));
+    __ Push(Handle<Object>(callback->data()));
   }
   __ push(name_reg);  // name
   // Save a pointer to where we pushed the arguments pointer.
@@ -1048,11 +1020,7 @@
   __ movq(name_arg, rsp);
   __ push(scratch2);  // Restore return address.
 
-  // Do call through the api.
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
-  // 3 elements array for v8::Agruments::values_ and handler for name.
+  // 3 elements array for v8::Arguments::values_ and handler for name.
   const int kStackSpace = 4;
 
   // Allocate v8::AccessorInfo in non-GCed stack space.
@@ -1068,45 +1036,42 @@
   // could be used to pass arguments.
   __ lea(accessor_info_arg, StackSpaceOperand(0));
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
-  return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  __ CallApiFunctionAndReturn(getter_address, kStackSpace);
 }
 
 
-void StubCompiler::GenerateLoadConstant(JSObject* object,
-                                        JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Object* value,
-                                        String* name,
+                                        Handle<JSFunction> value,
+                                        Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(object, receiver, holder,
-                  scratch1, scratch2, scratch3, name, miss);
+  CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ Move(rax, Handle<Object>(value));
+  __ LoadHeapObject(rax, value);
   __ ret(0);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1118,13 +1083,13 @@
   // and CALLBACKS, so inline only them, other cases may be added
   // later.
   bool compile_followup_inline = false;
-  if (lookup->IsProperty() && lookup->IsCacheable()) {
+  if (lookup->IsFound() && lookup->IsCacheable()) {
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+               lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1139,47 +1104,49 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ push(receiver);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ push(receiver);
+      }
+      __ push(holder_reg);
+      __ push(name_reg);
+
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method.)
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+      __ j(equal, &interceptor_failed);
+      frame_scope.GenerateLeaveFrame();
+      __ ret(0);
+
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+
+      // Leave the internal frame.
     }
-    __ push(holder_reg);
-    __ push(name_reg);
-
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method.)
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ j(equal, &interceptor_failed);
-    __ LeaveInternalFrame();
-    __ ret(0);
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1191,15 +1158,15 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), rax, holder_reg,
-                               lookup->holder(), lookup->GetFieldIndex());
+                               Handle<JSObject>(lookup->holder()),
+                               lookup->GetFieldIndex());
       __ ret(0);
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1208,7 +1175,7 @@
       __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(holder_reg);
-      __ Move(holder_reg, Handle<AccessorInfo>(callback));
+      __ Move(holder_reg, callback);
       __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
       __ push(holder_reg);
       __ push(name_reg);
@@ -1237,17 +1204,17 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ Cmp(rcx, Handle<String>(name));
+    __ Cmp(rcx, name);
     __ j(not_equal, miss);
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1257,27 +1224,23 @@
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  // If the object is the holder then we know that it's a global
-  // object which can only happen for contextual calls. In this case,
-  // the receiver cannot be a smi.
-  if (object != holder) {
-    __ JumpIfSmi(rdx, miss);
-  }
 
   // Check that the maps haven't changed.
+  __ JumpIfSmi(rdx, miss);
   CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss);
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
-  __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
+  __ Move(rdi, cell);
   __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(function)) {
+  if (heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1290,30 +1253,26 @@
     // Check the shared function info. Make sure it hasn't changed.
     __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
     __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
-    __ j(not_equal, miss);
   } else {
-    __ Cmp(rdi, Handle<JSFunction>(function));
-    __ j(not_equal, miss);
+    __ Cmp(rdi, function);
   }
+  __ j(not_equal, miss);
 }
 
 
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+  Handle<Code> code =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_ic_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
+                                               extra_state_);
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -1353,7 +1312,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -1361,19 +1320,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : name
   //  -- rsp[0]              : return address
@@ -1383,10 +1342,9 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -1396,14 +1354,8 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  CheckPrototypes(JSObject::cast(object),
-                  rdx,
-                  holder,
-                  rbx,
-                  rax,
-                  rdi,
-                  name,
-                  &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                  name, &miss);
 
   if (argc == 0) {
     // Noop, return the length.
@@ -1412,53 +1364,85 @@
   } else {
     Label call_builtin;
 
-    // Get the elements array of the object.
-    __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
-
-    // Check that the elements are in fast mode and writable.
-    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
-           factory()->fixed_array_map());
-    __ j(not_equal, &call_builtin);
-
     if (argc == 1) {  // Otherwise fall through to call builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements, with_write_barrier;
+
+      // Get the elements array of the object.
+      __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
+
+      // Check that the elements are in fast mode and writable.
+      __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
+             factory()->fixed_array_map());
+      __ j(not_equal, &call_builtin);
 
       // Get the array's length into rax and calculate new length.
       __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
       STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
       __ addl(rax, Immediate(argc));
 
-      // Get the element's length into rcx.
-      __ SmiToInteger32(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+      // Get the elements' length into rcx.
+      __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
       __ cmpl(rax, rcx);
       __ j(greater, &attempt_to_grow_elements);
 
+      // Check if value is a smi.
+      __ movq(rcx, Operand(rsp, argc * kPointerSize));
+      __ JumpIfNotSmi(rcx, &with_write_barrier);
+
       // Save new length.
       __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
 
-      // Push the element.
-      __ movq(rcx, Operand(rsp, argc * kPointerSize));
-      __ lea(rdx, FieldOperand(rbx,
-                               rax, times_pointer_size,
-                               FixedArray::kHeaderSize - argc * kPointerSize));
-      __ movq(Operand(rdx, 0), rcx);
+      // Store the value.
+      __ movq(FieldOperand(rdi,
+                           rax,
+                           times_pointer_size,
+                           FixedArray::kHeaderSize - argc * kPointerSize),
+              rcx);
 
-      // Check if value is a smi.
       __ Integer32ToSmi(rax, rax);  // Return new length as smi.
-
-      __ JumpIfNotSmi(rcx, &with_write_barrier);
-
-      __ bind(&exit);
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&with_write_barrier);
 
-      __ InNewSpace(rbx, rcx, equal, &exit);
+      __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
 
-      __ RecordWriteHelper(rbx, rdx, rcx);
+      if (FLAG_smi_only_arrays  && !FLAG_trace_elements_transitions) {
+        Label fast_object, not_fast_object;
+        __ CheckFastObjectElements(rbx, &not_fast_object, Label::kNear);
+        __ jmp(&fast_object);
+        // In case of fast smi-only, convert to fast object, otherwise bail out.
+        __ bind(&not_fast_object);
+        __ CheckFastSmiOnlyElements(rbx, &call_builtin);
+        // rdx: receiver
+        // rbx: map
+        __ movq(r9, rdi);  // Backup rdi as it is going to be trashed.
+        __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+                                               FAST_ELEMENTS,
+                                               rbx,
+                                               rdi,
+                                               &call_builtin);
+        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+        __ movq(rdi, r9);
+        __ bind(&fast_object);
+      } else {
+        __ CheckFastObjectElements(rbx, &call_builtin);
+      }
 
+      // Save new length.
+      __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+      // Store the value.
+      __ lea(rdx, FieldOperand(rdi,
+                               rax, times_pointer_size,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ movq(Operand(rdx, 0), rcx);
+
+      __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
+
+      __ Integer32ToSmi(rax, rax);  // Return new length as smi.
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&attempt_to_grow_elements);
@@ -1466,6 +1450,15 @@
         __ jmp(&call_builtin);
       }
 
+      __ movq(rbx, Operand(rsp, argc * kPointerSize));
+      // Growing elements that are SMI-only requires special handling in case
+      // the new element is non-Smi. For now, delegate to the builtin.
+      Label no_fast_elements_check;
+      __ JumpIfSmi(rbx, &no_fast_elements_check);
+      __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
+      __ bind(&no_fast_elements_check);
+
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate());
       ExternalReference new_space_allocation_limit =
@@ -1476,7 +1469,7 @@
       __ Load(rcx, new_space_allocation_top);
 
       // Check if it's the end of elements.
-      __ lea(rdx, FieldOperand(rbx,
+      __ lea(rdx, FieldOperand(rdi,
                                rax, times_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
       __ cmpq(rdx, rcx);
@@ -1489,28 +1482,33 @@
 
       // We fit and could grow elements.
       __ Store(new_space_allocation_top, rcx);
-      __ movq(rcx, Operand(rsp, argc * kPointerSize));
 
       // Push the argument...
-      __ movq(Operand(rdx, 0), rcx);
+      __ movq(Operand(rdx, 0), rbx);
       // ... and fill the rest with holes.
       __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
         __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
       }
 
+      // We know the elements array is in new space so we don't need the
+      // remembered set, but we just pushed a value onto it so we may have to
+      // tell the incremental marker to rescan the object that we just grew.  We
+      // don't need to worry about the holes because they are in old space and
+      // already marked black.
+      __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+
       // Restore receiver to rdx as finish sequence assumes it's here.
       __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
       // Increment element's and array's sizes.
-      __ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
+      __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
                         Smi::FromInt(kAllocationDelta));
 
       // Make new length a smi before returning it.
       __ Integer32ToSmi(rax, rax);
       __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
 
-      // Elements are in new space, so write barrier is not required.
       __ ret((argc + 1) * kPointerSize);
     }
 
@@ -1522,19 +1520,19 @@
   }
 
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : name
   //  -- rsp[0]              : return address
@@ -1544,10 +1542,9 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss, return_undefined, call_builtin;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -1557,9 +1554,8 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  CheckPrototypes(JSObject::cast(object), rdx,
-                  holder, rbx,
-                  rax, rdi, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                  name, &miss);
 
   // Get the elements array of the object.
   __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1605,20 +1601,19 @@
       1);
 
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1628,7 +1623,7 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
 
@@ -1636,13 +1631,11 @@
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
-
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
   GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
@@ -1650,12 +1643,90 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             rax,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                  rbx, rdx, rdi, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  rax, holder, rbx, rdx, rdi, name, &miss);
 
   Register receiver = rbx;
   Register index = rdi;
+  Register result = rax;
+  __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+  if (argc > 0) {
+    __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
+  __ ret((argc + 1) * kPointerSize);
+
+  StubRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(rax, Heap::kNanValueRootIndex);
+    __ ret((argc + 1) * kPointerSize);
+  }
+
+  __ bind(&miss);
+  // Restore function name in rcx.
+  __ Move(rcx, name);
+  __ bind(&name_miss);
+  GenerateMissBranch();
+
+  // Return the generated code.
+  return GetCode(function);
+}
+
+
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
+  // ----------- S t a t e -------------
+  //  -- rcx                 : function name
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- ...
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
+
+  const int argc = arguments().immediate();
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            rax,
+                                            &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  rax, holder, rbx, rdx, rdi, name, &miss);
+
+  Register receiver = rax;
+  Register index = rdi;
   Register scratch = rdx;
   Register result = rax;
   __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1665,130 +1736,42 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
-
-  if (index_out_of_range.is_linked()) {
-    __ bind(&index_out_of_range);
-    __ LoadRoot(rax, Heap::kNanValueRootIndex);
-    __ ret((argc + 1) * kPointerSize);
-  }
-
-  __ bind(&miss);
-  // Restore function name in rcx.
-  __ Move(rcx, Handle<String>(name));
-  __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
-
-  // Return the generated code.
-  return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
-  // ----------- S t a t e -------------
-  //  -- rcx                 : function name
-  //  -- rsp[0]              : return address
-  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
-  //  -- ...
-  //  -- rsp[(argc + 1) * 8] : receiver
-  // -----------------------------------
-
-  // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
-
-  const int argc = arguments().immediate();
-
-  Label miss;
-  Label name_miss;
-  Label index_out_of_range;
-  Label* index_out_of_range_label = &index_out_of_range;
-
-  if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
-       DEFAULT_STRING_STUB)) {
-    index_out_of_range_label = &miss;
-  }
-
-  GenerateNameCheck(name, &name_miss);
-
-  // Check that the maps starting from the prototype haven't changed.
-  GenerateDirectLoadGlobalFunctionPrototype(masm(),
-                                            Context::STRING_FUNCTION_INDEX,
-                                            rax,
-                                            &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                  rbx, rdx, rdi, name, &miss);
-
-  Register receiver = rax;
-  Register index = rdi;
-  Register scratch1 = rbx;
-  Register scratch2 = rdx;
-  Register result = rax;
-  __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
-  if (argc > 0) {
-    __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
-  } else {
-    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
-  }
-
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
-  __ ret((argc + 1) * kPointerSize);
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
     __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
     __ ret((argc + 1) * kPointerSize);
   }
-
   __ bind(&miss);
   // Restore function name in rcx.
-  __ Move(rcx, Handle<String>(name));
+  __ Move(rcx, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1797,25 +1780,23 @@
   //  -- rsp[(argc + 1) * 8] : receiver
   // -----------------------------------
 
-  const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  const int argc = arguments().immediate();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
     __ JumpIfSmi(rdx, &miss);
-
-    CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1830,17 +1811,17 @@
   // Convert the smi code to uint16.
   __ SmiAndConstant(code, code, Smi::FromInt(0xffff));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, rax);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, rax);
+  generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1848,29 +1829,30 @@
 
   __ bind(&miss);
   // rcx: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // TODO(872): implement this.
-  return heap()->undefined_value();
+  return Handle<Code>::null();
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1879,28 +1861,25 @@
   //  -- rsp[(argc + 1) * 8] : receiver
   // -----------------------------------
 
-  const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  const int argc = arguments().immediate();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
     __ JumpIfSmi(rdx, &miss);
-
-    CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
-
   // Load the (only) argument into rax.
   __ movq(rax, Operand(rsp, 1 * kPointerSize));
 
@@ -1957,7 +1936,7 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1965,33 +1944,31 @@
 
   __ bind(&miss);
   // rcx: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
-
   GenerateNameCheck(name, &miss_before_stack_reserved);
 
   // Get the receiver from the stack.
@@ -2010,32 +1987,30 @@
   __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), rdx, holder,
-                  rbx, rax, rdi, name, depth, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                  name, depth, &miss);
 
   // Move the return address on top of the stack.
   __ movq(rax, Operand(rsp, 3 * kPointerSize));
   __ movq(Operand(rsp, 0 * kPointerSize), rax);
 
-  MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiCall(masm(), optimization, argc);
 
   __ bind(&miss);
   __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   // rcx                 : function name
@@ -2048,16 +2023,14 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -2074,14 +2047,13 @@
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
 
   Counters* counters = isolate()->counters();
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(counters->call_const(), 1);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), rdx, holder,
-                      rbx, rax, rdi, name, &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
+                      rdi, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2092,28 +2064,25 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         // Check that the object is a two-byte string or a symbol.
         __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                        rbx, rdx, rdi, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            rax, holder, rbx, rdx, rdi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(rdx, &fast);
@@ -2123,18 +2092,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                        rbx, rdx, rdi, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            rax, holder, rbx, rdx, rdi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
@@ -2145,17 +2114,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                        rbx, rdx, rdi, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            rax, holder, rbx, rdx, rdi, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
-    }
-
-    default:
-      UNREACHABLE();
   }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -2163,17 +2133,16 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -2184,30 +2153,20 @@
   // rsp[(argc + 1) * 8] : argument 0 = receiver
   // -----------------------------------
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), rcx, extra_ic_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         rdx,
-                                         rbx,
-                                         rdi,
-                                         rax,
-                                         &miss);
-  if (result->IsFailure()) return result;
+  CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
+  compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
+                   &miss);
 
   // Restore receiver.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2226,7 +2185,7 @@
 
   // Invoke the function.
   __ movq(rdi, rax);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -2234,19 +2193,19 @@
 
   // Handle load cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -2258,23 +2217,17 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy.
@@ -2283,45 +2236,37 @@
     __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
   }
 
-  // Setup the context (function already in rdi).
+  // Set up the context (function already in rdi).
   __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // Jump to the cached code (tail call).
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1);
-  ASSERT(function->is_compiled());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  if (V8::UseCrankshaft()) {
-    // TODO(kasperl): For now, we always call indirectly through the
-    // code field in the function to allow recompilation to take effect
-    // without changing any of the call sites.
-    __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-    __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
-                  NullCallWrapper(), call_kind);
-  } else {
-    Handle<Code> code(function->code());
-    __ InvokeCode(code, expected, arguments(),
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION,
-                  NullCallWrapper(), call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
+                NullCallWrapper(), call_kind);
+
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                   int index,
-                                                  Map* transition,
-                                                  String* name) {
+                                                  Handle<Map> transition,
+                                                  Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2331,12 +2276,7 @@
   Label miss;
 
   // Generate store field code.  Preserves receiver and name on jump to miss.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     rdx, rcx, rbx,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -2344,13 +2284,14 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
-                                                     AccessorInfo* callback,
-                                                     String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+    Handle<JSObject> object,
+    Handle<AccessorInfo> callback,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2359,13 +2300,9 @@
   // -----------------------------------
   Label miss;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(rdx, &miss);
-
   // Check that the map of the object hasn't changed.
-  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-         Handle<Map>(object->map()));
-  __ j(not_equal, &miss);
+  __ CheckMap(rdx, Handle<Map>(object->map()), &miss,
+              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -2378,7 +2315,7 @@
 
   __ pop(rbx);  // remove the return address
   __ push(rdx);  // receiver
-  __ Push(Handle<AccessorInfo>(callback));  // callback info
+  __ Push(callback);  // callback info
   __ push(rcx);  // name
   __ push(rax);  // value
   __ push(rbx);  // restore return address
@@ -2398,8 +2335,9 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
-                                                        String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+    Handle<JSObject> receiver,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2408,13 +2346,9 @@
   // -----------------------------------
   Label miss;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(rdx, &miss);
-
   // Check that the map of the object hasn't changed.
-  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-         Handle<Map>(receiver->map()));
-  __ j(not_equal, &miss);
+  __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss,
+              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
@@ -2447,9 +2381,10 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
-                                                   JSGlobalPropertyCell* cell,
-                                                   String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+    Handle<GlobalObject> object,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2463,17 +2398,20 @@
          Handle<Map>(object->map()));
   __ j(not_equal, &miss);
 
+  // Compute the cell operand to use.
+  __ Move(rbx, cell);
+  Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
+
   // Check that the value in the cell is not the hole. If it is, this
   // cell could have been deleted and reintroducing the global needs
   // to update the property details in the property dictionary of the
   // global object. We bail out to the runtime system to do that.
-  __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
-  __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
-                 Heap::kTheHoleValueRootIndex);
+  __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
   __ j(equal, &miss);
 
   // Store the value in the cell.
-  __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
+  __ movq(cell_operand, rax);
+  // Cells are always rescanned, so no write barrier here.
 
   // Return the value (register rax).
   Counters* counters = isolate()->counters();
@@ -2491,10 +2429,10 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                        int index,
-                                                       Map* transition,
-                                                       String* name) {
+                                                       Handle<Map> transition,
+                                                       Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax     : value
   //  -- rcx     : key
@@ -2507,16 +2445,11 @@
   __ IncrementCounter(counters->keyed_store_field(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rcx, Handle<String>(name));
+  __ Cmp(rcx, name);
   __ j(not_equal, &miss);
 
   // Generate store field code.  Preserves receiver and name on jump to miss.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     rdx, rcx, rbx,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -2525,39 +2458,38 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Code* stub;
+
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  MaybeObject* maybe_stub =
-      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(rdx,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub =
+      KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
+
+  __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
@@ -2565,18 +2497,22 @@
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-  __ JumpIfSmi(rdx, &miss);
+  __ JumpIfSmi(rdx, &miss, Label::kNear);
 
-  Register map_reg = rbx;
-  __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
   int receiver_count = receiver_maps->length();
-  for (int current = 0; current < receiver_count; ++current) {
+  for (int i = 0; i < receiver_count; ++i) {
     // Check map and tail call if there's a match
-    Handle<Map> map(receiver_maps->at(current));
-    __ Cmp(map_reg, map);
-    __ j(equal,
-         Handle<Code>(handler_ics->at(current)),
-         RelocInfo::CODE_TARGET);
+    __ Cmp(rdi, receiver_maps->at(i));
+    if (transitioned_maps->at(i).is_null()) {
+      __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
+    } else {
+      Label next_map;
+      __ j(not_equal, &next_map, Label::kNear);
+      __ movq(rbx, transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT);
+      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
   }
 
   __ bind(&miss);
@@ -2584,13 +2520,13 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
-                                                      JSObject* object,
-                                                      JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+                                                      Handle<JSObject> object,
+                                                      Handle<JSObject> last) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2609,15 +2545,8 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                  GlobalObject::cast(last),
-                                                  name,
-                                                  rdx,
-                                                  &miss);
-    if (cell->IsFailure()) {
-      miss.Unuse();
-      return cell;
-    }
+    GenerateCheckPropertyCell(
+        masm(), Handle<GlobalObject>::cast(last), name, rdx, &miss);
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2629,14 +2558,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, heap()->empty_string());
+  return GetCode(NONEXISTENT, factory()->empty_string());
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2653,24 +2582,19 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-
-  MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
-                                             rdi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, callback,
+                       name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2679,10 +2603,10 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value,
-                                                   String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> value,
+                                                   Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2699,32 +2623,22 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          rax,
-                          rcx,
-                          rdx,
-                          rbx,
-                          rdi,
-                          name,
-                          &miss);
-
+  GenerateLoadInterceptor(receiver, holder, &lookup, rax, rcx, rdx, rbx, rdi,
+                          name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2733,11 +2647,12 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 String* name,
-                                                 bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name,
+    bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2745,18 +2660,12 @@
   // -----------------------------------
   Label miss;
 
-  // If the object is the holder then we know that it's a global
-  // object which can only happen for contextual loads. In this case,
-  // the receiver cannot be a smi.
-  if (object != holder) {
-    __ JumpIfSmi(rax, &miss);
-  }
-
   // Check that the maps haven't changed.
+  __ JumpIfSmi(rax, &miss);
   CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
 
   // Get the value from the cell.
-  __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+  __ Move(rbx, cell);
   __ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -2782,9 +2691,9 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
-                                                     JSObject* receiver,
-                                                     JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+                                                     Handle<JSObject> receiver,
+                                                     Handle<JSObject> holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- rax     : key
@@ -2797,7 +2706,7 @@
   __ IncrementCounter(counters->keyed_load_field(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
@@ -2811,34 +2720,27 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- rax     : key
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
   Label miss;
-
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->keyed_load_callback(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
-                                             rcx, rdi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, callback,
+                       name, &miss);
   __ bind(&miss);
-
   __ DecrementCounter(counters->keyed_load_callback(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2847,10 +2749,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
-                                                        JSObject* receiver,
-                                                        JSObject* holder,
-                                                        Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<JSFunction> value) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2862,7 +2765,7 @@
   __ IncrementCounter(counters->keyed_load_constant_function(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
@@ -2876,35 +2779,27 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
   Label miss;
-
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->keyed_load_interceptor(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          rdx,
-                          rax,
-                          rcx,
-                          rbx,
-                          rdi,
-                          name,
-                          &miss);
+  GenerateLoadInterceptor(receiver, holder, &lookup, rdx, rax, rcx, rbx, rdi,
+                          name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_interceptor(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2914,7 +2809,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2926,7 +2822,7 @@
   __ IncrementCounter(counters->keyed_load_array_length(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
@@ -2939,7 +2835,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2951,7 +2848,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
@@ -2964,7 +2861,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2976,7 +2874,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
@@ -2989,32 +2887,29 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
-  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(rdx,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+  __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -3028,24 +2923,22 @@
   int receiver_count = receiver_maps->length();
   for (int current = 0; current < receiver_count; ++current) {
     // Check map and tail call if there's a match
-    Handle<Map> map(receiver_maps->at(current));
-    __ Cmp(map_reg, map);
-    __ j(equal,
-         Handle<Code>(handler_ics->at(current)),
-         RelocInfo::CODE_TARGET);
+    __ Cmp(map_reg, receiver_maps->at(current));
+    __ j(equal, handler_ics->at(current), RelocInfo::CODE_TARGET);
   }
 
   __  bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // ----------- S t a t e -------------
   //  -- rax : argc
   //  -- rdi : constructor
@@ -3088,12 +2981,8 @@
   // rbx: initial map
   __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
   __ shl(rcx, Immediate(kPointerSizeLog2));
-  __ AllocateInNewSpace(rcx,
-                        rdx,
-                        rcx,
-                        no_reg,
-                        &generic_stub_call,
-                        NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(rcx, rdx, rcx, no_reg,
+                        &generic_stub_call, NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields and add the heap tag.
   // rbx: initial map
@@ -3118,7 +3007,7 @@
   // r9: first in-object property of the JSObject
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       // Check if the argument assigned to the property is actually passed.
@@ -3166,10 +3055,8 @@
   // Jump to the generic stub in case the specialized code cannot handle the
   // construction.
   __ bind(&generic_stub_call);
-  Code* code =
-      isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
-  Handle<Code> generic_construct_stub(code);
-  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+  Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
   return GetCode();
@@ -3436,6 +3323,7 @@
       __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3503,6 +3391,7 @@
         case EXTERNAL_FLOAT_ELEMENTS:
         case EXTERNAL_DOUBLE_ELEMENTS:
         case FAST_ELEMENTS:
+        case FAST_SMI_ONLY_ELEMENTS:
         case FAST_DOUBLE_ELEMENTS:
         case DICTIONARY_ELEMENTS:
         case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3634,15 +3523,19 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind,
+    KeyedAccessGrowMode grow_mode) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label miss_force_generic;
+  Label miss_force_generic, transition_elements_kind, finish_store, grow;
+  Label check_capacity, slow;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3650,28 +3543,45 @@
   // Check that the key is a smi.
   __ JumpIfNotSmi(rcx, &miss_force_generic);
 
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(rax, &transition_elements_kind);
+  }
+
   // Get the elements array and make sure it is a fast element array, not 'cow'.
   __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &miss_force_generic);
-
   // Check that the key is within bounds.
   if (is_js_array) {
     __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
-    __ j(above_equal, &miss_force_generic);
+    if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+      __ j(above_equal, &grow);
+    } else {
+      __ j(above_equal, &miss_force_generic);
+    }
   } else {
     __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
     __ j(above_equal, &miss_force_generic);
   }
 
-  // Do the store and update the write barrier. Make sure to preserve
-  // the value in register eax.
-  __ movq(rdx, rax);
-  __ SmiToInteger32(rcx, rcx);
-  __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
-          rax);
-  __ RecordWrite(rdi, 0, rdx, rcx);
+  __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &miss_force_generic);
+
+  __ bind(&finish_store);
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ SmiToInteger32(rcx, rcx);
+    __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+            rax);
+  } else {
+    // Do the store and update the write barrier.
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    __ SmiToInteger32(rcx, rcx);
+    __ lea(rcx,
+           FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
+    __ movq(Operand(rcx, 0), rax);
+    // Make sure to preserve the value in register rax.
+    __ movq(rbx, rax);
+    __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
+  }
 
   // Done.
   __ ret(0);
@@ -3681,20 +3591,93 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+    // Grow the array by a single element if possible.
+    __ bind(&grow);
+
+    // Make sure the array is only growing by a single element, anything else
+    // must be handled by the runtime. Flags are already set by previous
+    // compare.
+    __ j(not_equal, &miss_force_generic);
+
+    // Check for the empty array, and preallocate a small backing store if
+    // possible.
+    __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+    __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
+    __ j(not_equal, &check_capacity);
+
+    int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+    __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
+
+    // rax: value
+    // rcx: key
+    // rdx: receiver
+    // rdi: elements
+    // Make sure that the backing store can hold additional elements.
+    __ Move(FieldOperand(rdi, JSObject::kMapOffset),
+            masm->isolate()->factory()->fixed_array_map());
+    __ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
+            Smi::FromInt(JSArray::kPreallocatedArrayElements));
+    __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
+    for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+      __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
+    }
+
+    // Store the element at index zero.
+    __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
+
+    // Install the new backing store in the JSArray.
+    __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
+    __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
+                        kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+    // Increment the length of the array.
+    __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
+    __ ret(0);
+
+    __ bind(&check_capacity);
+    // Check for cow elements, in general they are not handled by this stub.
+    __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+                   Heap::kFixedCOWArrayMapRootIndex);
+    __ j(equal, &miss_force_generic);
+
+    // rax: value
+    // rcx: key
+    // rdx: receiver
+    // rdi: elements
+    // Make sure that the backing store can hold additional elements.
+    __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+    __ j(above_equal, &slow);
+
+    // Grow the array and finish the store.
+    __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
+                      Smi::FromInt(1));
+    __ jmp(&finish_store);
+
+    __ bind(&slow);
+    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+    __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+  }
 }
 
 
 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
     MacroAssembler* masm,
-    bool is_js_array) {
+    bool is_js_array,
+    KeyedAccessGrowMode grow_mode) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label miss_force_generic, smi_value, is_nan, maybe_nan;
-  Label have_double_value, not_nan;
+  Label miss_force_generic, transition_elements_kind, finish_store;
+  Label grow, slow, check_capacity;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3708,57 +3691,22 @@
 
   // Check that the key is within bounds.
   if (is_js_array) {
-    __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+      __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+      if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+        __ j(above_equal, &grow);
+      } else {
+        __ j(above_equal, &miss_force_generic);
+      }
   } else {
     __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
+    __ j(above_equal, &miss_force_generic);
   }
-  __ j(above_equal, &miss_force_generic);
 
   // Handle smi values specially
-  __ JumpIfSmi(rax, &smi_value, Label::kNear);
-
-  __ CheckMap(rax,
-              masm->isolate()->factory()->heap_number_map(),
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
-
-  // Double value, canonicalize NaN.
-  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
-  __ cmpl(FieldOperand(rax, offset),
-          Immediate(kNaNOrInfinityLowerBoundUpper32));
-  __ j(greater_equal, &maybe_nan, Label::kNear);
-
-  __ bind(&not_nan);
-  __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
-  __ bind(&have_double_value);
+  __ bind(&finish_store);
   __ SmiToInteger32(rcx, rcx);
-  __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
-           xmm0);
-  __ ret(0);
-
-  __ bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  __ j(greater, &is_nan, Label::kNear);
-  __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
-  __ j(zero, &not_nan);
-  __ bind(&is_nan);
-  // Convert all NaNs to the same canonical NaN value when they are stored in
-  // the double array.
-  __ Set(kScratchRegister, BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
-  __ movq(xmm0, kScratchRegister);
-  __ jmp(&have_double_value, Label::kNear);
-
-  __ bind(&smi_value);
-  // Value is a smi. convert to a double and store.
-  // Preserve original value.
-  __ SmiToInteger32(rdx, rax);
-  __ push(rdx);
-  __ fild_s(Operand(rsp, 0));
-  __ pop(rdx);
-  __ SmiToInteger32(rcx, rcx);
-  __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
+  __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
+                                 &transition_elements_kind);
   __ ret(0);
 
   // Handle store cache miss, replacing the ic with the generic stub.
@@ -3766,6 +3714,77 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  // Restore smi-tagging of rcx.
+  __ Integer32ToSmi(rcx, rcx);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+    // Grow the array by a single element if possible.
+    __ bind(&grow);
+
+    // Make sure the array is only growing by a single element, anything else
+    // must be handled by the runtime. Flags are already set by previous
+    // compare.
+    __ j(not_equal, &miss_force_generic);
+
+    // Transition on values that can't be stored in a FixedDoubleArray.
+    Label value_is_smi;
+    __ JumpIfSmi(rax, &value_is_smi);
+    __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &transition_elements_kind);
+    __ bind(&value_is_smi);
+
+    // Check for the empty array, and preallocate a small backing store if
+    // possible.
+    __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+    __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
+    __ j(not_equal, &check_capacity);
+
+    int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+    __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
+
+    // rax: value
+    // rcx: key
+    // rdx: receiver
+    // rdi: elements
+    // Initialize the new FixedDoubleArray. Leave elements unitialized for
+    // efficiency, they are guaranteed to be initialized before use.
+    __ Move(FieldOperand(rdi, JSObject::kMapOffset),
+            masm->isolate()->factory()->fixed_double_array_map());
+    __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
+            Smi::FromInt(JSArray::kPreallocatedArrayElements));
+
+    // Install the new backing store in the JSArray.
+    __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
+    __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
+                        kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+    // Increment the length of the array.
+    __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
+    __ jmp(&finish_store);
+
+    __ bind(&check_capacity);
+    // rax: value
+    // rcx: key
+    // rdx: receiver
+    // rdi: elements
+    // Make sure that the backing store can hold additional elements.
+    __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
+    __ j(above_equal, &slow);
+
+    // Grow the array and finish the store.
+    __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
+                      Smi::FromInt(1));
+    __ jmp(&finish_store);
+
+    __ bind(&slow);
+    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+    __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+  }
 }
 
 
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 4870105..ee96ec0 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,31 +28,30 @@
 #ifndef V8_ZONE_INL_H_
 #define V8_ZONE_INL_H_
 
-#include "isolate.h"
 #include "zone.h"
+
+#include "counters.h"
+#include "isolate.h"
+#include "utils.h"
 #include "v8-counters.h"
 
 namespace v8 {
 namespace internal {
 
 
-AssertNoZoneAllocation::AssertNoZoneAllocation()
-    : prev_(Isolate::Current()->zone_allow_allocation()) {
-  Isolate::Current()->set_zone_allow_allocation(false);
-}
-
-
-AssertNoZoneAllocation::~AssertNoZoneAllocation() {
-  Isolate::Current()->set_zone_allow_allocation(prev_);
-}
-
-
 inline void* Zone::New(int size) {
-  ASSERT(Isolate::Current()->zone_allow_allocation());
   ASSERT(ZoneScope::nesting() > 0);
   // Round up the requested size to fit the alignment.
   size = RoundUp(size, kAlignment);
 
+  // If the allocation size is divisible by 8 then we return an 8-byte aligned
+  // address.
+  if (kPointerSize == 4 && kAlignment == 4) {
+    position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
+  } else {
+    ASSERT(kAlignment >= kPointerSize);
+  }
+
   // Check if the requested size is available without expanding.
   Address result = position_;
 
diff --git a/src/zone.cc b/src/zone.cc
index 2d14d13..d5d05ab 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#include <string.h>
 
+#include "v8.h"
 #include "zone-inl.h"
-#include "splay-tree-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/zone.h b/src/zone.h
index f60ac0d..8648465 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,11 @@
 #define V8_ZONE_H_
 
 #include "allocation.h"
+#include "checks.h"
+#include "hashmap.h"
+#include "globals.h"
+#include "list.h"
+#include "splay-tree.h"
 
 namespace v8 {
 namespace internal {
@@ -42,6 +47,7 @@
 };
 
 class Segment;
+class Isolate;
 
 // The Zone supports very fast allocation of small chunks of
 // memory. The chunks cannot be deallocated individually, but instead
@@ -86,7 +92,9 @@
   friend class Isolate;
   friend class ZoneScope;
 
-  // All pointers returned from New() have this alignment.
+  // All pointers returned from New() have this alignment.  In addition, if the
+  // object being allocated has a size that is divisible by 8 then its alignment
+  // will be 8.
   static const int kAlignment = kPointerSize;
 
   // Never allocate segments smaller than this size in bytes.
@@ -156,15 +164,6 @@
 };
 
 
-class AssertNoZoneAllocation {
- public:
-  inline AssertNoZoneAllocation();
-  inline ~AssertNoZoneAllocation();
- private:
-  bool prev_;
-};
-
-
 // The ZoneListAllocationPolicy is used to specialize the GenericList
 // implementation to allocate ZoneLists and their elements in the
 // Zone.
@@ -241,6 +240,8 @@
 };
 
 
+typedef TemplateHashMapImpl<ZoneListAllocationPolicy> ZoneHashMap;
+
 } }  // namespace v8::internal
 
 #endif  // V8_ZONE_H_
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index 5c92671..bcd1e98 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -1,4 +1,4 @@
-# Copyright 2008 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -62,6 +62,7 @@
     'test-conversions.cc',
     'test-cpu-profiler.cc',
     'test-dataflow.cc',
+    'test-date.cc',
     'test-debug.cc',
     'test-decls.cc',
     'test-deoptimization.cc',
@@ -86,6 +87,7 @@
     'test-parsing.cc',
     'test-platform-tls.cc',
     'test-profile-generator.cc',
+    'test-random.cc',
     'test-regexp.cc',
     'test-reloc-info.cc',
     'test-serialize.cc',
@@ -111,7 +113,8 @@
   ],
   'arch:x64': ['test-assembler-x64.cc',
                'test-macro-assembler-x64.cc',
-               'test-log-stack-tracer.cc'],
+               'test-log-stack-tracer.cc',
+               'test-disasm-x64.cc'],
   'arch:mips': ['test-assembler-mips.cc',
                 'test-disasm-mips.cc'],
   'os:linux':  ['test-platform-linux.cc'],
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index 5d0cab3..a242fe3 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -57,6 +57,7 @@
         'test-conversions.cc',
         'test-cpu-profiler.cc',
         'test-dataflow.cc',
+        'test-date.cc',
         'test-debug.cc',
         'test-decls.cc',
         'test-deoptimization.cc',
@@ -68,6 +69,7 @@
         'test-fixed-dtoa.cc',
         'test-flags.cc',
         'test-func-name-inference.cc',
+        'test-hashing.cc',
         'test-hashmap.cc',
         'test-heap.cc',
         'test-heap-profiler.cc',
@@ -80,6 +82,7 @@
         'test-parsing.cc',
         'test-platform-tls.cc',
         'test-profile-generator.cc',
+        'test-random.cc',
         'test-regexp.cc',
         'test-reloc-info.cc',
         'test-serialize.cc',
@@ -91,7 +94,8 @@
         'test-threads.cc',
         'test-unbound-queue.cc',
         'test-utils.cc',
-        'test-version.cc'
+        'test-version.cc',
+        'test-weakmaps.cc'
       ],
       'conditions': [
         ['v8_target_arch=="ia32"', {
@@ -134,6 +138,12 @@
           'sources': [
             'test-platform-win32.cc',
           ],
+          'msvs_settings': {
+            'VCCLCompilerTool': {
+              # MSVS wants this for gay-{precision,shortest}.cc.
+              'AdditionalOptions': ['/bigobj'],
+            },
+          },
         }],
         ['component=="shared_library"', {
           # cctest can't be built against a shared library, so we need to
diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h
index c04d893..0b93562 100644
--- a/test/cctest/cctest.h
+++ b/test/cctest/cctest.h
@@ -104,7 +104,7 @@
                     FOURTH_PART,
                     LAST_PART = FOURTH_PART };
 
-  static void Setup(PartOfTest part);
+  static void SetUp(PartOfTest part);
   static void RunAllTests();
   static void TearDown();
   // This method switches threads if we are running the Threading test.
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 5122da5..af28be1 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -33,11 +33,22 @@
 # BUG(382): Weird test. Can't guarantee that it never times out.
 test-api/ApplyInterruption: PASS || TIMEOUT
 
+# BUG(484): This test which we thought was originally corrected in r5236
+# is re-appearing. Disabled until bug in test is fixed. This only fails
+# when snapshot is on, so I am marking it PASS || FAIL
+test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
+
 # These tests always fail.  They are here to test test.py.  If
 # they don't fail then test.py has failed.
 test-serialize/TestThatAlwaysFails: FAIL
 test-serialize/DependentTestThatAlwaysFails: FAIL
 
+# TODO(gc): Temporarily disabled in the GC branch.
+test-log/EquivalenceOfLoggingAndTraversal: PASS || FAIL
+
+# BUG(1261): Flakey test.
+test-profile-generator/RecordStackTraceAtStartProfiling: PASS || FAIL
+
 # We do not yet shrink weak maps after they have been emptied by the GC
 test-weakmaps/Shrinking: FAIL
 
@@ -73,10 +84,8 @@
 
 
 ##############################################################################
-[ $arch == mips ]
-test-deoptimization: SKIP
-test-serialize: SKIP
+[ $arch == mips && $crankshaft ]
 
-# Tests that may time out.
-test-api/ExternalArrays: PASS || TIMEOUT
-test-api/Threading: PASS || TIMEOUT
+# Tests that time out with crankshaft.
+test-debug/ThreadedDebugging: SKIP
+test-debug/DebugBreakLoop: SKIP
diff --git a/test/cctest/test-accessors.cc b/test/cctest/test-accessors.cc
index d95536d..b1900f9 100644
--- a/test/cctest/test-accessors.cc
+++ b/test/cctest/test-accessors.cc
@@ -241,7 +241,7 @@
   ApiTestFuzzer::Fuzz();
   CHECK(info.This() == info.Holder());
   CHECK(info.Data()->Equals(v8::String::New("data")));
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK(info.This() == info.Holder());
   CHECK(info.Data()->Equals(v8::String::New("data")));
   return v8::Integer::New(17);
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index 9767192..769fe7b 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -72,11 +72,29 @@
   }
   CHECK(!heap->AllocateRawAsciiString(100, TENURED)->IsFailure());
 
-  // Large object space.
-  while (!heap->OldGenerationAllocationLimitReached()) {
-    CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
+  // Old pointer space.
+  OldSpace* old_pointer_space = heap->old_pointer_space();
+  static const int kOldPointerSpaceFillerLength = 10000;
+  static const int kOldPointerSpaceFillerSize = FixedArray::SizeFor(
+      kOldPointerSpaceFillerLength);
+  while (old_pointer_space->Available() > kOldPointerSpaceFillerSize) {
+    CHECK(!heap->AllocateFixedArray(kOldPointerSpaceFillerLength, TENURED)->
+          IsFailure());
   }
-  CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
+  CHECK(!heap->AllocateFixedArray(kOldPointerSpaceFillerLength, TENURED)->
+        IsFailure());
+
+  // Large object space.
+  static const int kLargeObjectSpaceFillerLength = 300000;
+  static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
+      kLargeObjectSpaceFillerLength);
+  ASSERT(kLargeObjectSpaceFillerSize > heap->old_pointer_space()->AreaSize());
+  while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
+    CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
+          IsFailure());
+  }
+  CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
+        IsFailure());
 
   // Map space.
   MapSpace* map_space = heap->map_space();
@@ -175,20 +193,20 @@
 // Plain old data class.  Represents a block of allocated memory.
 class Block {
  public:
-  Block(void* base_arg, int size_arg)
+  Block(Address base_arg, int size_arg)
       : base(base_arg), size(size_arg) {}
 
-  void *base;
+  Address base;
   int size;
 };
 
 
 TEST(CodeRange) {
-  const int code_range_size = 16*MB;
-  OS::Setup();
+  const int code_range_size = 32*MB;
+  OS::SetUp();
   Isolate::Current()->InitializeLoggingAndCounters();
   CodeRange* code_range = new CodeRange(Isolate::Current());
-  code_range->Setup(code_range_size);
+  code_range->SetUp(code_range_size);
   int current_allocated = 0;
   int total_allocated = 0;
   List<Block> blocks(1000);
@@ -196,11 +214,15 @@
   while (total_allocated < 5 * code_range_size) {
     if (current_allocated < code_range_size / 10) {
       // Allocate a block.
-      // Geometrically distributed sizes, greater than Page::kPageSize.
-      size_t requested = (Page::kPageSize << (Pseudorandom() % 6)) +
-           Pseudorandom() % 5000 + 1;
+      // Geometrically distributed sizes, greater than
+      // Page::kMaxNonCodeHeapObjectSize (which is greater than code page area).
+      // TODO(gc): instead of using 3 use some contant based on code_range_size
+      // kMaxHeapObjectSize.
+      size_t requested =
+          (Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
+          Pseudorandom() % 5000 + 1;
       size_t allocated = 0;
-      void* base = code_range->AllocateRawMemory(requested, &allocated);
+      Address base = code_range->AllocateRawMemory(requested, &allocated);
       CHECK(base != NULL);
       blocks.Add(Block(base, static_cast<int>(allocated)));
       current_allocated += static_cast<int>(allocated);
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index c1c8aae..b1a23c1 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -80,6 +80,11 @@
   CHECK_EQ(expected, *ascii);
 }
 
+static void ExpectInt32(const char* code, int expected) {
+  Local<Value> result = CompileRun(code);
+  CHECK(result->IsInt32());
+  CHECK_EQ(expected, result->Int32Value());
+}
 
 static void ExpectBoolean(const char* code, bool expected) {
   Local<Value> result = CompileRun(code);
@@ -393,11 +398,11 @@
     CHECK(source->IsExternal());
     CHECK_EQ(resource,
              static_cast<TestResource*>(source->GetExternalStringResource()));
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   v8::internal::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, dispose_count);
 }
 
@@ -415,11 +420,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, dispose_count);
 }
 
@@ -441,11 +446,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
   CHECK_EQ(1, dispose_count);
 }
 
@@ -467,11 +472,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
   CHECK_EQ(1, dispose_count);
 }
 
@@ -484,7 +489,7 @@
   HEAP->CollectGarbage(i::NEW_SPACE);
   HEAP->CollectGarbage(i::NEW_SPACE);
 
-  uint16_t* two_byte_string = AsciiToTwoByteString("small");
+  uint16_t* two_byte_string = AsciiToTwoByteString("s1");
   Local<String> small_string = String::New(two_byte_string);
   i::DeleteArray(two_byte_string);
 
@@ -496,7 +501,7 @@
   // Old space strings should be accepted.
   CHECK(small_string->CanMakeExternal());
 
-  two_byte_string = AsciiToTwoByteString("small 2");
+  two_byte_string = AsciiToTwoByteString("small string 2");
   small_string = String::New(two_byte_string);
   i::DeleteArray(two_byte_string);
 
@@ -530,7 +535,7 @@
   HEAP->CollectGarbage(i::NEW_SPACE);
   HEAP->CollectGarbage(i::NEW_SPACE);
 
-  Local<String> small_string = String::New("small");
+  Local<String> small_string = String::New("s1");
   // We should refuse to externalize newly created small string.
   CHECK(!small_string->CanMakeExternal());
   // Trigger GCs so that the newly allocated string moves to old gen.
@@ -539,7 +544,7 @@
   // Old space strings should be accepted.
   CHECK(small_string->CanMakeExternal());
 
-  small_string = String::New("small 2");
+  small_string = String::New("small string 2");
   // We should refuse externalizing newly created small string.
   CHECK(!small_string->CanMakeExternal());
   for (int i = 0; i < 100; i++) {
@@ -572,8 +577,8 @@
     i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
     CHECK(isymbol->IsSymbol());
   }
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -590,8 +595,8 @@
     i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
     CHECK(isymbol->IsSymbol());
   }
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -672,11 +677,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllAvailableGarbage();
     CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
   CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
 
@@ -693,11 +698,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllAvailableGarbage();
     CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_count);
 }
@@ -744,8 +749,8 @@
     CHECK_EQ(68, value->Int32Value());
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -1182,9 +1187,8 @@
   templ->Set("x", v8_num(200));
   templ->SetAccessor(v8_str("m"), GetM);
   LocalContext env(0, templ);
-  v8::Handle<v8::Object> obj = env->Global();
-  v8::Handle<Script> script = v8_compile("dummy()");
-  v8::Handle<Value> result = script->Run();
+  v8::Handle<Script> script(v8_compile("dummy()"));
+  v8::Handle<Value> result(script->Run());
   CHECK_EQ(13.4, result->NumberValue());
   CHECK_EQ(200, v8_compile("x")->Run()->Int32Value());
   CHECK_EQ(876, v8_compile("m")->Run()->Int32Value());
@@ -1294,6 +1298,279 @@
   return name;
 }
 
+// Helper functions for Interceptor/Accessor interaction tests
+
+Handle<Value> SimpleAccessorGetter(Local<String> name,
+                                   const AccessorInfo& info) {
+  Handle<Object> self = info.This();
+  return self->Get(String::Concat(v8_str("accessor_"), name));
+}
+
+void SimpleAccessorSetter(Local<String> name, Local<Value> value,
+                          const AccessorInfo& info) {
+  Handle<Object> self = info.This();
+  self->Set(String::Concat(v8_str("accessor_"), name), value);
+}
+
+Handle<Value> EmptyInterceptorGetter(Local<String> name,
+                                     const AccessorInfo& info) {
+  return Handle<Value>();
+}
+
+Handle<Value> EmptyInterceptorSetter(Local<String> name,
+                                     Local<Value> value,
+                                     const AccessorInfo& info) {
+  return Handle<Value>();
+}
+
+Handle<Value> InterceptorGetter(Local<String> name,
+                                const AccessorInfo& info) {
+  // Intercept names that start with 'interceptor_'.
+  String::AsciiValue ascii(name);
+  char* name_str = *ascii;
+  char prefix[] = "interceptor_";
+  int i;
+  for (i = 0; name_str[i] && prefix[i]; ++i) {
+    if (name_str[i] != prefix[i]) return Handle<Value>();
+  }
+  Handle<Object> self = info.This();
+  return self->GetHiddenValue(v8_str(name_str + i));
+}
+
+Handle<Value> InterceptorSetter(Local<String> name,
+                                Local<Value> value,
+                                const AccessorInfo& info) {
+  // Intercept accesses that set certain integer values.
+  if (value->IsInt32() && value->Int32Value() < 10000) {
+    Handle<Object> self = info.This();
+    self->SetHiddenValue(name, value);
+    return value;
+  }
+  return Handle<Value>();
+}
+
+void AddAccessor(Handle<FunctionTemplate> templ,
+                 Handle<String> name,
+                 v8::AccessorGetter getter,
+                 v8::AccessorSetter setter) {
+  templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
+}
+
+void AddInterceptor(Handle<FunctionTemplate> templ,
+                    v8::NamedPropertyGetter getter,
+                    v8::NamedPropertySetter setter) {
+  templ->InstanceTemplate()->SetNamedPropertyHandler(getter, setter);
+}
+
+THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddAccessor(parent, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "child.age = 10;");
+  ExpectBoolean("child.hasOwnProperty('age')", false);
+  ExpectInt32("child.age", 10);
+  ExpectInt32("child.accessor_age", 10);
+}
+
+THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "var parent = child.__proto__;"
+             "Object.defineProperty(parent, 'age', "
+             "  {get: function(){ return this.accessor_age; }, "
+             "   set: function(v){ this.accessor_age = v; }, "
+             "   enumerable: true, configurable: true});"
+             "child.age = 10;");
+  ExpectBoolean("child.hasOwnProperty('age')", false);
+  ExpectInt32("child.age", 10);
+  ExpectInt32("child.accessor_age", 10);
+}
+
+THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "var parent = child.__proto__;"
+             "parent.name = 'Alice';");
+  ExpectBoolean("child.hasOwnProperty('name')", false);
+  ExpectString("child.name", "Alice");
+  CompileRun("child.name = 'Bob';");
+  ExpectString("child.name", "Bob");
+  ExpectBoolean("child.hasOwnProperty('name')", true);
+  ExpectString("parent.name", "Alice");
+}
+
+THREADED_TEST(SwitchFromInterceptorToAccessor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> templ = FunctionTemplate::New();
+  AddAccessor(templ, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+  CompileRun("var obj = new Obj;"
+             "function setAge(i){ obj.age = i; };"
+             "for(var i = 0; i <= 10000; i++) setAge(i);");
+  // All i < 10000 go to the interceptor.
+  ExpectInt32("obj.interceptor_age", 9999);
+  // The last i goes to the accessor.
+  ExpectInt32("obj.accessor_age", 10000);
+}
+
+THREADED_TEST(SwitchFromAccessorToInterceptor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> templ = FunctionTemplate::New();
+  AddAccessor(templ, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+  CompileRun("var obj = new Obj;"
+             "function setAge(i){ obj.age = i; };"
+             "for(var i = 20000; i >= 9999; i--) setAge(i);");
+  // All i >= 10000 go to the accessor.
+  ExpectInt32("obj.accessor_age", 10000);
+  // The last i goes to the interceptor.
+  ExpectInt32("obj.interceptor_age", 9999);
+}
+
+THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddAccessor(parent, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 0; i <= 10000; i++) setAge(i);");
+  // All i < 10000 go to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+  // The last i goes to the accessor.
+  ExpectInt32("child.accessor_age", 10000);
+}
+
+THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddAccessor(parent, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 20000; i >= 9999; i--) setAge(i);");
+  // All i >= 10000 go to the accessor.
+  ExpectInt32("child.accessor_age", 10000);
+  // The last i goes to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+}
+
+THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> templ = FunctionTemplate::New();
+  AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+  CompileRun("var obj = new Obj;"
+             "function setter(i) { this.accessor_age = i; };"
+             "function getter() { return this.accessor_age; };"
+             "function setAge(i) { obj.age = i; };"
+             "Object.defineProperty(obj, 'age', { get:getter, set:setter });"
+             "for(var i = 0; i <= 10000; i++) setAge(i);");
+  // All i < 10000 go to the interceptor.
+  ExpectInt32("obj.interceptor_age", 9999);
+  // The last i goes to the JavaScript accessor.
+  ExpectInt32("obj.accessor_age", 10000);
+  // The installed JavaScript getter is still intact.
+  // This last part is a regression test for issue 1651 and relies on the fact
+  // that both interceptor and accessor are being installed on the same object.
+  ExpectInt32("obj.age", 10000);
+  ExpectBoolean("obj.hasOwnProperty('age')", true);
+  ExpectUndefined("Object.getOwnPropertyDescriptor(obj, 'age').value");
+}
+
+THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> templ = FunctionTemplate::New();
+  AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+  CompileRun("var obj = new Obj;"
+             "function setter(i) { this.accessor_age = i; };"
+             "function getter() { return this.accessor_age; };"
+             "function setAge(i) { obj.age = i; };"
+             "Object.defineProperty(obj, 'age', { get:getter, set:setter });"
+             "for(var i = 20000; i >= 9999; i--) setAge(i);");
+  // All i >= 10000 go to the accessor.
+  ExpectInt32("obj.accessor_age", 10000);
+  // The last i goes to the interceptor.
+  ExpectInt32("obj.interceptor_age", 9999);
+  // The installed JavaScript getter is still intact.
+  // This last part is a regression test for issue 1651 and relies on the fact
+  // that both interceptor and accessor are being installed on the same object.
+  ExpectInt32("obj.age", 10000);
+  ExpectBoolean("obj.hasOwnProperty('age')", true);
+  ExpectUndefined("Object.getOwnPropertyDescriptor(obj, 'age').value");
+}
+
+THREADED_TEST(SwitchFromInterceptorToProperty) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 0; i <= 10000; i++) setAge(i);");
+  // All i < 10000 go to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+  // The last i goes to child's own property.
+  ExpectInt32("child.age", 10000);
+}
+
+THREADED_TEST(SwitchFromPropertyToInterceptor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 20000; i >= 9999; i--) setAge(i);");
+  // All i >= 10000 go to child's own property.
+  ExpectInt32("child.age", 10000);
+  // The last i goes to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+}
 
 THREADED_TEST(NamedPropertyHandlerGetter) {
   echo_named_call_count = 0;
@@ -1567,7 +1844,7 @@
 
   env->Global()->Set(v8_str("depth"), v8::Integer::New(0));
   call_recursively_script = v8_compile("callScriptRecursively()");
-  v8::Handle<Value> result = call_recursively_script->Run();
+  call_recursively_script->Run();
   call_recursively_script = v8::Handle<Script>();
 
   env->Global()->Set(v8_str("depth"), v8::Integer::New(0));
@@ -1666,12 +1943,12 @@
 
   // Check reading and writing aligned pointers.
   obj->SetPointerInInternalField(0, aligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
 
   // Check reading and writing unaligned pointers.
   obj->SetPointerInInternalField(0, unaligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
 
   delete[] data;
@@ -1697,19 +1974,19 @@
   CHECK_EQ(1, static_cast<int>(reinterpret_cast<uintptr_t>(unaligned) & 0x1));
 
   obj->SetPointerInInternalField(0, aligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(aligned, v8::External::Unwrap(obj->GetInternalField(0)));
 
   obj->SetPointerInInternalField(0, unaligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(unaligned, v8::External::Unwrap(obj->GetInternalField(0)));
 
   obj->SetInternalField(0, v8::External::Wrap(aligned));
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
 
   obj->SetInternalField(0, v8::External::Wrap(unaligned));
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
 
   delete[] data;
@@ -1722,7 +1999,7 @@
 
   // Ensure that the test starts with an fresh heap to test whether the hash
   // code is based on the address.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   Local<v8::Object> obj = v8::Object::New();
   int hash = obj->GetIdentityHash();
   int hash1 = obj->GetIdentityHash();
@@ -1732,7 +2009,7 @@
   // objects should not be assigned the same hash code. If the test below fails
   // the random number generator should be evaluated.
   CHECK_NE(hash, hash2);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   int hash3 = v8::Object::New()->GetIdentityHash();
   // Make sure that the identity hash is not based on the initial address of
   // the object alone. If the test below fails the random number generator
@@ -1769,7 +2046,7 @@
   v8::Local<v8::String> empty = v8_str("");
   v8::Local<v8::String> prop_name = v8_str("prop_name");
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   // Make sure delete of a non-existent hidden value works
   CHECK(obj->DeleteHiddenValue(key));
@@ -1779,7 +2056,7 @@
   CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   // Make sure we do not find the hidden property.
   CHECK(!obj->Has(empty));
@@ -1790,7 +2067,7 @@
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
   CHECK_EQ(2003, obj->Get(empty)->Int32Value());
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   // Add another property and delete it afterwards to force the object in
   // slow case.
@@ -1801,7 +2078,7 @@
   CHECK(obj->Delete(prop_name));
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   CHECK(obj->DeleteHiddenValue(key));
   CHECK(obj->GetHiddenValue(key).IsEmpty());
@@ -1908,19 +2185,30 @@
 }
 
 
-static int NumberOfWeakCalls = 0;
+class WeakCallCounter {
+ public:
+  explicit WeakCallCounter(int id) : id_(id), number_of_weak_calls_(0) { }
+  int id() { return id_; }
+  void increment() { number_of_weak_calls_++; }
+  int NumberOfWeakCalls() { return number_of_weak_calls_; }
+ private:
+  int id_;
+  int number_of_weak_calls_;
+};
+
+
 static void WeakPointerCallback(Persistent<Value> handle, void* id) {
-  CHECK_EQ(reinterpret_cast<void*>(1234), id);
-  NumberOfWeakCalls++;
+  WeakCallCounter* counter = reinterpret_cast<WeakCallCounter*>(id);
+  CHECK_EQ(1234, counter->id());
+  counter->increment();
   handle.Dispose();
 }
 
+
 THREADED_TEST(ApiObjectGroups) {
   HandleScope scope;
   LocalContext env;
 
-  NumberOfWeakCalls = 0;
-
   Persistent<Object> g1s1;
   Persistent<Object> g1s2;
   Persistent<Object> g1c1;
@@ -1928,21 +2216,23 @@
   Persistent<Object> g2s2;
   Persistent<Object> g2c1;
 
+  WeakCallCounter counter(1234);
+
   {
     HandleScope scope;
     g1s1 = Persistent<Object>::New(Object::New());
     g1s2 = Persistent<Object>::New(Object::New());
     g1c1 = Persistent<Object>::New(Object::New());
-    g1s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g1s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g1c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g1c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
     g2s1 = Persistent<Object>::New(Object::New());
     g2s2 = Persistent<Object>::New(Object::New());
     g2c1 = Persistent<Object>::New(Object::New());
-    g2s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g2s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g2c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g2c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
   }
 
   Persistent<Object> root = Persistent<Object>::New(g1s1);  // make a root.
@@ -1961,14 +2251,14 @@
     V8::AddObjectGroup(g2_objects, 2);
     V8::AddImplicitReferences(g2s2, g2_children, 1);
   }
-  // Do a full GC
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  // Do a single full GC, ensure incremental marking is stopped.
+  HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
 
   // All object should be alive.
-  CHECK_EQ(0, NumberOfWeakCalls);
+  CHECK_EQ(0, counter.NumberOfWeakCalls());
 
   // Weaken the root.
-  root.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+  root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
   // But make children strong roots---all the objects (except for children)
   // should be collectable now.
   g1c1.ClearWeak();
@@ -1986,17 +2276,17 @@
     V8::AddImplicitReferences(g2s2, g2_children, 1);
   }
 
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
 
   // All objects should be gone. 5 global handles in total.
-  CHECK_EQ(5, NumberOfWeakCalls);
+  CHECK_EQ(5, counter.NumberOfWeakCalls());
 
   // And now make children weak again and collect them.
-  g1c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-  g2c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+  g1c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+  g2c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
-  CHECK_EQ(7, NumberOfWeakCalls);
+  HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+  CHECK_EQ(7, counter.NumberOfWeakCalls());
 }
 
 
@@ -2004,7 +2294,7 @@
   HandleScope scope;
   LocalContext env;
 
-  NumberOfWeakCalls = 0;
+  WeakCallCounter counter(1234);
 
   Persistent<Object> g1s1;
   Persistent<Object> g1s2;
@@ -2017,18 +2307,18 @@
     HandleScope scope;
     g1s1 = Persistent<Object>::New(Object::New());
     g1s2 = Persistent<Object>::New(Object::New());
-    g1s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g1s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
     g2s1 = Persistent<Object>::New(Object::New());
     g2s2 = Persistent<Object>::New(Object::New());
-    g2s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g2s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
     g3s1 = Persistent<Object>::New(Object::New());
     g3s2 = Persistent<Object>::New(Object::New());
-    g3s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g3s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g3s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g3s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
   }
 
   Persistent<Object> root = Persistent<Object>::New(g1s1);  // make a root.
@@ -2050,14 +2340,14 @@
     V8::AddObjectGroup(g3_objects, 2);
     V8::AddImplicitReferences(g3s1, g3_children, 1);
   }
-  // Do a full GC
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  // Do a single full GC
+  HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
 
   // All object should be alive.
-  CHECK_EQ(0, NumberOfWeakCalls);
+  CHECK_EQ(0, counter.NumberOfWeakCalls());
 
   // Weaken the root.
-  root.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+  root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
   // Groups are deleted, rebuild groups.
   {
@@ -2075,10 +2365,10 @@
     V8::AddImplicitReferences(g3s1, g3_children, 1);
   }
 
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
 
   // All objects should be gone. 7 global handles in total.
-  CHECK_EQ(7, NumberOfWeakCalls);
+  CHECK_EQ(7, counter.NumberOfWeakCalls());
 }
 
 
@@ -2573,6 +2863,16 @@
   obj = env->Global()->Get(v8_str("obj"));
   CHECK(!obj->IsInt32());
   CHECK(!obj->IsUint32());
+  // Positive zero
+  CompileRun("var obj = 0.0;");
+  obj = env->Global()->Get(v8_str("obj"));
+  CHECK(obj->IsInt32());
+  CHECK(obj->IsUint32());
+  // Positive zero
+  CompileRun("var obj = -0.0;");
+  obj = env->Global()->Get(v8_str("obj"));
+  CHECK(!obj->IsInt32());
+  CHECK(!obj->IsUint32());
 }
 
 
@@ -4172,7 +4472,7 @@
 
   source = v8_str("undetectable.y = 2000;");
   script = Script::Compile(source);
-  Local<Value> result = script->Run();
+  script->Run();
   ExpectBoolean("undetectable.y == undefined", true);
 }
 
@@ -4305,6 +4605,47 @@
 }
 
 
+static const char* kEmbeddedExtensionSource =
+    "function Ret54321(){return 54321;}~~@@$"
+    "$%% THIS IS A SERIES OF NON-NULL-TERMINATED STRINGS.";
+static const int kEmbeddedExtensionSourceValidLen = 34;
+
+
+THREADED_TEST(ExtensionMissingSourceLength) {
+  v8::HandleScope handle_scope;
+  v8::RegisterExtension(new Extension("srclentest_fail",
+                                      kEmbeddedExtensionSource));
+  const char* extension_names[] = { "srclentest_fail" };
+  v8::ExtensionConfiguration extensions(1, extension_names);
+  v8::Handle<Context> context = Context::New(&extensions);
+  CHECK_EQ(0, *context);
+}
+
+
+THREADED_TEST(ExtensionWithSourceLength) {
+  for (int source_len = kEmbeddedExtensionSourceValidLen - 1;
+       source_len <= kEmbeddedExtensionSourceValidLen + 1; ++source_len) {
+    v8::HandleScope handle_scope;
+    i::ScopedVector<char> extension_name(32);
+    i::OS::SNPrintF(extension_name, "ext #%d", source_len);
+    v8::RegisterExtension(new Extension(extension_name.start(),
+                                        kEmbeddedExtensionSource, 0, 0,
+                                        source_len));
+    const char* extension_names[1] = { extension_name.start() };
+    v8::ExtensionConfiguration extensions(1, extension_names);
+    v8::Handle<Context> context = Context::New(&extensions);
+    if (source_len == kEmbeddedExtensionSourceValidLen) {
+      Context::Scope lock(context);
+      v8::Handle<Value> result = Script::Compile(v8_str("Ret54321()"))->Run();
+      CHECK_EQ(v8::Integer::New(54321), result);
+    } else {
+      // Anything but exactly the right length should fail to compile.
+      CHECK_EQ(0, *context);
+    }
+  }
+}
+
+
 static const char* kEvalExtensionSource1 =
   "function UseEval1() {"
   "  var x = 42;"
@@ -4483,10 +4824,11 @@
                                                     "native\nfunction foo();"));
   const char* extension_names[] = { name };
   v8::ExtensionConfiguration extensions(1, extension_names);
-  v8::Handle<Context> context = Context::New(&extensions);
-  ASSERT(context.IsEmpty());
+  v8::Handle<Context> context(Context::New(&extensions));
+  CHECK(context.IsEmpty());
 }
 
+
 THREADED_TEST(NativeFunctionDeclarationErrorEscape) {
   v8::HandleScope handle_scope;
   const char* name = "nativedeclerresc";
@@ -4497,8 +4839,8 @@
       "nativ\\u0065 function foo();"));
   const char* extension_names[] = { name };
   v8::ExtensionConfiguration extensions(1, extension_names);
-  v8::Handle<Context> context = Context::New(&extensions);
-  ASSERT(context.IsEmpty());
+  v8::Handle<Context> context(Context::New(&extensions));
+  CHECK(context.IsEmpty());
 }
 
 
@@ -4664,7 +5006,7 @@
   Local<Script> script =
       Script::Compile(String::New(js_code_causing_huge_string_flattening));
   last_location = NULL;
-  Local<Value> result = script->Run();
+  script->Run();
 
   CHECK(false);  // Should not return.
 }
@@ -4805,7 +5147,7 @@
 
 
 static void InvokeMarkSweep() {
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -4898,7 +5240,7 @@
   CHECK_EQ(v8::Integer::New(3), args[2]);
   CHECK_EQ(v8::Undefined(), args[3]);
   v8::HandleScope scope;
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   return v8::Undefined();
 }
 
@@ -5184,68 +5526,121 @@
 }
 
 
+int GetUtf8Length(Handle<String> str) {
+  int len = str->Utf8Length();
+  if (len < 0) {
+    i::Handle<i::String> istr(v8::Utils::OpenHandle(*str));
+    i::FlattenString(istr);
+    len = str->Utf8Length();
+  }
+  return len;
+}
+
+
 THREADED_TEST(StringWrite) {
+  LocalContext context;
   v8::HandleScope scope;
   v8::Handle<String> str = v8_str("abcde");
   // abc<Icelandic eth><Unicode snowman>.
   v8::Handle<String> str2 = v8_str("abc\303\260\342\230\203");
+  const int kStride = 4;  // Must match stride in for loops in JS below.
+  CompileRun(
+      "var left = '';"
+      "for (var i = 0; i < 0xd800; i += 4) {"
+      "  left = left + String.fromCharCode(i);"
+      "}");
+  CompileRun(
+      "var right = '';"
+      "for (var i = 0; i < 0xd800; i += 4) {"
+      "  right = String.fromCharCode(i) + right;"
+      "}");
+  v8::Handle<v8::Object> global = Context::GetCurrent()->Global();
+  Handle<String> left_tree = global->Get(v8_str("left")).As<String>();
+  Handle<String> right_tree = global->Get(v8_str("right")).As<String>();
 
   CHECK_EQ(5, str2->Length());
+  CHECK_EQ(0xd800 / kStride, left_tree->Length());
+  CHECK_EQ(0xd800 / kStride, right_tree->Length());
 
   char buf[100];
-  char utf8buf[100];
+  char utf8buf[0xd800 * 3];
   uint16_t wbuf[100];
   int len;
   int charlen;
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen);
   CHECK_EQ(9, len);
   CHECK_EQ(5, charlen);
   CHECK_EQ(0, strcmp(utf8buf, "abc\303\260\342\230\203"));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 8, &charlen);
   CHECK_EQ(8, len);
   CHECK_EQ(5, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\342\230\203\1", 9));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 7, &charlen);
   CHECK_EQ(5, len);
   CHECK_EQ(4, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\1", 5));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 6, &charlen);
   CHECK_EQ(5, len);
   CHECK_EQ(4, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\1", 5));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 5, &charlen);
   CHECK_EQ(5, len);
   CHECK_EQ(4, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\1", 5));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 4, &charlen);
   CHECK_EQ(3, len);
   CHECK_EQ(3, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\1", 4));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 3, &charlen);
   CHECK_EQ(3, len);
   CHECK_EQ(3, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\1", 4));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 2, &charlen);
   CHECK_EQ(2, len);
   CHECK_EQ(2, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "ab\1", 3));
 
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = GetUtf8Length(left_tree);
+  int utf8_expected =
+      (0x80 + (0x800 - 0x80) * 2 + (0xd800 - 0x800) * 3) / kStride;
+  CHECK_EQ(utf8_expected, len);
+  len = left_tree->WriteUtf8(utf8buf, utf8_expected, &charlen);
+  CHECK_EQ(utf8_expected, len);
+  CHECK_EQ(0xd800 / kStride, charlen);
+  CHECK_EQ(0xed, static_cast<unsigned char>(utf8buf[utf8_expected - 3]));
+  CHECK_EQ(0x9f, static_cast<unsigned char>(utf8buf[utf8_expected - 2]));
+  CHECK_EQ(0xc0 - kStride,
+           static_cast<unsigned char>(utf8buf[utf8_expected - 1]));
+  CHECK_EQ(1, utf8buf[utf8_expected]);
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = GetUtf8Length(right_tree);
+  CHECK_EQ(utf8_expected, len);
+  len = right_tree->WriteUtf8(utf8buf, utf8_expected, &charlen);
+  CHECK_EQ(utf8_expected, len);
+  CHECK_EQ(0xd800 / kStride, charlen);
+  CHECK_EQ(0xed, static_cast<unsigned char>(utf8buf[0]));
+  CHECK_EQ(0x9f, static_cast<unsigned char>(utf8buf[1]));
+  CHECK_EQ(0xc0 - kStride, static_cast<unsigned char>(utf8buf[2]));
+  CHECK_EQ(1, utf8buf[utf8_expected]);
+
   memset(buf, 0x1, sizeof(buf));
   memset(wbuf, 0x1, sizeof(wbuf));
   len = str->WriteAscii(buf);
@@ -5361,6 +5756,225 @@
 }
 
 
+static void Utf16Helper(
+    LocalContext& context,
+    const char* name,
+    const char* lengths_name,
+    int len) {
+  Local<v8::Array> a =
+      Local<v8::Array>::Cast(context->Global()->Get(v8_str(name)));
+  Local<v8::Array> alens =
+      Local<v8::Array>::Cast(context->Global()->Get(v8_str(lengths_name)));
+  for (int i = 0; i < len; i++) {
+    Local<v8::String> string =
+      Local<v8::String>::Cast(a->Get(i));
+    Local<v8::Number> expected_len =
+      Local<v8::Number>::Cast(alens->Get(i));
+    CHECK_EQ(expected_len->Value() != string->Length(),
+             string->MayContainNonAscii());
+    int length = GetUtf8Length(string);
+    CHECK_EQ(static_cast<int>(expected_len->Value()), length);
+  }
+}
+
+
+static uint16_t StringGet(Handle<String> str, int index) {
+  i::Handle<i::String> istring =
+      v8::Utils::OpenHandle(String::Cast(*str));
+  return istring->Get(index);
+}
+
+
+static void WriteUtf8Helper(
+    LocalContext& context,
+    const char* name,
+    const char* lengths_name,
+    int len) {
+  Local<v8::Array> b =
+      Local<v8::Array>::Cast(context->Global()->Get(v8_str(name)));
+  Local<v8::Array> alens =
+      Local<v8::Array>::Cast(context->Global()->Get(v8_str(lengths_name)));
+  char buffer[1000];
+  char buffer2[1000];
+  for (int i = 0; i < len; i++) {
+    Local<v8::String> string =
+      Local<v8::String>::Cast(b->Get(i));
+    Local<v8::Number> expected_len =
+      Local<v8::Number>::Cast(alens->Get(i));
+    int utf8_length = static_cast<int>(expected_len->Value());
+    for (int j = utf8_length + 1; j >= 0; j--) {
+      memset(reinterpret_cast<void*>(&buffer), 42, sizeof(buffer));
+      memset(reinterpret_cast<void*>(&buffer2), 42, sizeof(buffer2));
+      int nchars;
+      int utf8_written =
+          string->WriteUtf8(buffer, j, &nchars, String::NO_OPTIONS);
+      int utf8_written2 =
+          string->WriteUtf8(buffer2, j, &nchars, String::NO_NULL_TERMINATION);
+      CHECK_GE(utf8_length + 1, utf8_written);
+      CHECK_GE(utf8_length, utf8_written2);
+      for (int k = 0; k < utf8_written2; k++) {
+        CHECK_EQ(buffer[k], buffer2[k]);
+      }
+      CHECK(nchars * 3 >= utf8_written - 1);
+      CHECK(nchars <= utf8_written);
+      if (j == utf8_length + 1) {
+        CHECK_EQ(utf8_written2, utf8_length);
+        CHECK_EQ(utf8_written2 + 1, utf8_written);
+      }
+      CHECK_EQ(buffer[utf8_written], 42);
+      if (j > utf8_length) {
+        if (utf8_written != 0) CHECK_EQ(buffer[utf8_written - 1], 0);
+        if (utf8_written > 1) CHECK_NE(buffer[utf8_written - 2], 42);
+        Handle<String> roundtrip = v8_str(buffer);
+        CHECK(roundtrip->Equals(string));
+      } else {
+        if (utf8_written != 0) CHECK_NE(buffer[utf8_written - 1], 42);
+      }
+      if (utf8_written2 != 0) CHECK_NE(buffer[utf8_written - 1], 42);
+      if (nchars >= 2) {
+        uint16_t trail = StringGet(string, nchars - 1);
+        uint16_t lead = StringGet(string, nchars - 2);
+        if (((lead & 0xfc00) == 0xd800) &&
+            ((trail & 0xfc00) == 0xdc00)) {
+          unsigned char u1 = buffer2[utf8_written2 - 4];
+          unsigned char u2 = buffer2[utf8_written2 - 3];
+          unsigned char u3 = buffer2[utf8_written2 - 2];
+          unsigned char u4 = buffer2[utf8_written2 - 1];
+          CHECK_EQ((u1 & 0xf8), 0xf0);
+          CHECK_EQ((u2 & 0xc0), 0x80);
+          CHECK_EQ((u3 & 0xc0), 0x80);
+          CHECK_EQ((u4 & 0xc0), 0x80);
+          uint32_t c = 0x10000 + ((lead & 0x3ff) << 10) + (trail & 0x3ff);
+          CHECK_EQ((u4 & 0x3f), (c & 0x3f));
+          CHECK_EQ((u3 & 0x3f), ((c >> 6) & 0x3f));
+          CHECK_EQ((u2 & 0x3f), ((c >> 12) & 0x3f));
+          CHECK_EQ((u1 & 0x3), c >> 18);
+        }
+      }
+    }
+  }
+}
+
+
+THREADED_TEST(Utf16) {
+  LocalContext context;
+  v8::HandleScope scope;
+  CompileRun(
+      "var pad = '01234567890123456789';"
+      "var p = [];"
+      "var plens = [20, 3, 3];"
+      "p.push('01234567890123456789');"
+      "var lead = 0xd800;"
+      "var trail = 0xdc00;"
+      "p.push(String.fromCharCode(0xd800));"
+      "p.push(String.fromCharCode(0xdc00));"
+      "var a = [];"
+      "var b = [];"
+      "var c = [];"
+      "var alens = [];"
+      "for (var i = 0; i < 3; i++) {"
+      "  p[1] = String.fromCharCode(lead++);"
+      "  for (var j = 0; j < 3; j++) {"
+      "    p[2] = String.fromCharCode(trail++);"
+      "    a.push(p[i] + p[j]);"
+      "    b.push(p[i] + p[j]);"
+      "    c.push(p[i] + p[j]);"
+      "    alens.push(plens[i] + plens[j]);"
+      "  }"
+      "}"
+      "alens[5] -= 2;"  // Here the surrogate pairs match up.
+      "var a2 = [];"
+      "var b2 = [];"
+      "var c2 = [];"
+      "var a2lens = [];"
+      "for (var m = 0; m < 9; m++) {"
+      "  for (var n = 0; n < 9; n++) {"
+      "    a2.push(a[m] + a[n]);"
+      "    b2.push(b[m] + b[n]);"
+      "    var newc = 'x' + c[m] + c[n] + 'y';"
+      "    c2.push(newc.substring(1, newc.length - 1));"
+      "    var utf = alens[m] + alens[n];"  // And here.
+           // The 'n's that start with 0xdc.. are 6-8
+           // The 'm's that end with 0xd8.. are 1, 4 and 7
+      "    if ((m % 3) == 1 && n >= 6) utf -= 2;"
+      "    a2lens.push(utf);"
+      "  }"
+      "}");
+  Utf16Helper(context, "a", "alens", 9);
+  Utf16Helper(context, "a2", "a2lens", 81);
+  WriteUtf8Helper(context, "b", "alens", 9);
+  WriteUtf8Helper(context, "b2", "a2lens", 81);
+  WriteUtf8Helper(context, "c2", "a2lens", 81);
+}
+
+
+static bool SameSymbol(Handle<String> s1, Handle<String> s2) {
+  i::Handle<i::String> is1(v8::Utils::OpenHandle(*s1));
+  i::Handle<i::String> is2(v8::Utils::OpenHandle(*s2));
+  return *is1 == *is2;
+}
+
+
+static void SameSymbolHelper(const char* a, const char* b) {
+  Handle<String> symbol1 = v8::String::NewSymbol(a);
+  Handle<String> symbol2 = v8::String::NewSymbol(b);
+  CHECK(SameSymbol(symbol1, symbol2));
+}
+
+
+THREADED_TEST(Utf16Symbol) {
+  LocalContext context;
+  v8::HandleScope scope;
+
+  Handle<String> symbol1 = v8::String::NewSymbol("abc");
+  Handle<String> symbol2 = v8::String::NewSymbol("abc");
+  CHECK(SameSymbol(symbol1, symbol2));
+
+  SameSymbolHelper("\360\220\220\205",  // 4 byte encoding.
+                   "\355\240\201\355\260\205");  // 2 3-byte surrogates.
+  SameSymbolHelper("\355\240\201\355\260\206",  // 2 3-byte surrogates.
+                   "\360\220\220\206");  // 4 byte encoding.
+  SameSymbolHelper("x\360\220\220\205",  // 4 byte encoding.
+                   "x\355\240\201\355\260\205");  // 2 3-byte surrogates.
+  SameSymbolHelper("x\355\240\201\355\260\206",  // 2 3-byte surrogates.
+                   "x\360\220\220\206");  // 4 byte encoding.
+  CompileRun(
+      "var sym0 = 'benedictus';"
+      "var sym0b = 'S\303\270ren';"
+      "var sym1 = '\355\240\201\355\260\207';"
+      "var sym2 = '\360\220\220\210';"
+      "var sym3 = 'x\355\240\201\355\260\207';"
+      "var sym4 = 'x\360\220\220\210';"
+      "if (sym1.length != 2) throw sym1;"
+      "if (sym1.charCodeAt(1) != 0xdc07) throw sym1.charCodeAt(1);"
+      "if (sym2.length != 2) throw sym2;"
+      "if (sym2.charCodeAt(1) != 0xdc08) throw sym2.charCodeAt(2);"
+      "if (sym3.length != 3) throw sym3;"
+      "if (sym3.charCodeAt(2) != 0xdc07) throw sym1.charCodeAt(2);"
+      "if (sym4.length != 3) throw sym4;"
+      "if (sym4.charCodeAt(2) != 0xdc08) throw sym2.charCodeAt(2);");
+  Handle<String> sym0 = v8::String::NewSymbol("benedictus");
+  Handle<String> sym0b = v8::String::NewSymbol("S\303\270ren");
+  Handle<String> sym1 = v8::String::NewSymbol("\355\240\201\355\260\207");
+  Handle<String> sym2 = v8::String::NewSymbol("\360\220\220\210");
+  Handle<String> sym3 = v8::String::NewSymbol("x\355\240\201\355\260\207");
+  Handle<String> sym4 = v8::String::NewSymbol("x\360\220\220\210");
+  v8::Local<v8::Object> global = context->Global();
+  Local<Value> s0 = global->Get(v8_str("sym0"));
+  Local<Value> s0b = global->Get(v8_str("sym0b"));
+  Local<Value> s1 = global->Get(v8_str("sym1"));
+  Local<Value> s2 = global->Get(v8_str("sym2"));
+  Local<Value> s3 = global->Get(v8_str("sym3"));
+  Local<Value> s4 = global->Get(v8_str("sym4"));
+  CHECK(SameSymbol(sym0, Handle<String>(String::Cast(*s0))));
+  CHECK(SameSymbol(sym0b, Handle<String>(String::Cast(*s0b))));
+  CHECK(SameSymbol(sym1, Handle<String>(String::Cast(*s1))));
+  CHECK(SameSymbol(sym2, Handle<String>(String::Cast(*s2))));
+  CHECK(SameSymbol(sym3, Handle<String>(String::Cast(*s3))));
+  CHECK(SameSymbol(sym4, Handle<String>(String::Cast(*s4))));
+}
+
+
 THREADED_TEST(ToArrayIndex) {
   v8::HandleScope scope;
   LocalContext context;
@@ -5400,7 +6014,6 @@
   v8::Handle<String> message = v8_str("message");
   v8::Handle<Value> range_error = v8::Exception::RangeError(foo);
   CHECK(range_error->IsObject());
-  v8::Handle<v8::Object> range_obj = range_error.As<v8::Object>();
   CHECK(range_error.As<v8::Object>()->Get(message)->Equals(foo));
   v8::Handle<Value> reference_error = v8::Exception::ReferenceError(foo);
   CHECK(reference_error->IsObject());
@@ -6970,7 +7583,7 @@
     // Create new environment reusing the global object.
     LocalContext env(NULL, instance_template, global_object);
     env->Global()->Set(v8_str("foo"), foo);
-    Local<Value> value = Script::Compile(v8_str("foo()"))->Run();
+    Script::Compile(v8_str("foo()"))->Run();
   }
 }
 
@@ -7158,6 +7771,60 @@
 }
 
 
+// Getting property names of an object with a prototype chain that
+// triggers dictionary elements in GetLocalPropertyNames() shouldn't
+// crash the runtime.
+THREADED_TEST(Regress91517) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope handle_scope;
+  LocalContext context;
+
+  Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
+  t1->SetHiddenPrototype(true);
+  t1->InstanceTemplate()->Set(v8_str("foo"), v8_num(1));
+  Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New();
+  t2->SetHiddenPrototype(true);
+  t2->InstanceTemplate()->Set(v8_str("fuz1"), v8_num(2));
+  t2->InstanceTemplate()->Set(v8_str("objects"), v8::Object::New());
+  t2->InstanceTemplate()->Set(v8_str("fuz2"), v8_num(2));
+  Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New();
+  t3->SetHiddenPrototype(true);
+  t3->InstanceTemplate()->Set(v8_str("boo"), v8_num(3));
+  Local<v8::FunctionTemplate> t4 = v8::FunctionTemplate::New();
+  t4->InstanceTemplate()->Set(v8_str("baz"), v8_num(4));
+
+  // Force dictionary-based properties.
+  i::ScopedVector<char> name_buf(1024);
+  for (int i = 1; i <= 1000; i++) {
+    i::OS::SNPrintF(name_buf, "sdf%d", i);
+    t2->InstanceTemplate()->Set(v8_str(name_buf.start()), v8_num(2));
+  }
+
+  Local<v8::Object> o1 = t1->GetFunction()->NewInstance();
+  Local<v8::Object> o2 = t2->GetFunction()->NewInstance();
+  Local<v8::Object> o3 = t3->GetFunction()->NewInstance();
+  Local<v8::Object> o4 = t4->GetFunction()->NewInstance();
+
+  // Create prototype chain of hidden prototypes.
+  CHECK(o4->SetPrototype(o3));
+  CHECK(o3->SetPrototype(o2));
+  CHECK(o2->SetPrototype(o1));
+
+  // Call the runtime version of GetLocalPropertyNames() on the natively
+  // created object through JavaScript.
+  context->Global()->Set(v8_str("obj"), o4);
+  CompileRun("var names = %GetLocalPropertyNames(obj);");
+
+  ExpectInt32("names.length", 1006);
+  ExpectTrue("names.indexOf(\"baz\") >= 0");
+  ExpectTrue("names.indexOf(\"boo\") >= 0");
+  ExpectTrue("names.indexOf(\"foo\") >= 0");
+  ExpectTrue("names.indexOf(\"fuz1\") >= 0");
+  ExpectTrue("names.indexOf(\"fuz2\") >= 0");
+  ExpectFalse("names[1005] == undefined");
+}
+
+
 THREADED_TEST(FunctionReadOnlyPrototype) {
   v8::HandleScope handle_scope;
   LocalContext context;
@@ -7241,7 +7908,8 @@
   Local<Function> cons = templ->GetFunction();
   context->Global()->Set(v8_str("Fun"), cons);
   Local<v8::Object> inst = cons->NewInstance();
-  i::Handle<i::JSObject> obj = v8::Utils::OpenHandle(*inst);
+  i::Handle<i::JSObject> obj(v8::Utils::OpenHandle(*inst));
+  CHECK(obj->IsJSObject());
   Local<Value> value = CompileRun("(new Fun()).constructor === Fun");
   CHECK(value->BooleanValue());
 }
@@ -7492,9 +8160,11 @@
                            "  var bar = 2;"
                            "  with (x) { return eval('bar'); }"
                            "}"
-                           "f(this)"));
+                           "result4 = f(this)"));
   script->Run();
-  CHECK(try_catch.HasCaught());
+  CHECK(!try_catch.HasCaught());
+  CHECK_EQ(2, current->Global()->Get(v8_str("result4"))->Int32Value());
+
   try_catch.Reset();
 }
 
@@ -7508,7 +8178,7 @@
   other->SetSecurityToken(token);
   current->SetSecurityToken(token);
 
-  // Setup reference from current to other.
+  // Set up reference from current to other.
   current->Global()->Set(v8_str("other"), other->Global());
 
   // Check that new variables are introduced in other context.
@@ -7588,7 +8258,7 @@
   v8::Persistent<Context> context0 = Context::New();
   v8::Persistent<Context> context1 = Context::New();
 
-  // Setup function in context0 that uses eval from context0.
+  // Set up function in context0 that uses eval from context0.
   context0->Enter();
   v8::Handle<v8::Value> fun =
       CompileRun("var x = 42;"
@@ -7626,7 +8296,7 @@
   other->SetSecurityToken(token);
   current->SetSecurityToken(token);
 
-  // Setup reference from current to other.
+  // Set up reference from current to other.
   current->Global()->Set(v8_str("other"), other->Global());
 
   // Trigger lazy loading in other context.
@@ -7710,7 +8380,8 @@
   }
 
   { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
-    Local<ObjectTemplate> instance_template = t->InstanceTemplate();
+    Local<ObjectTemplate> instance_template(t->InstanceTemplate());
+    USE(instance_template);
     Local<v8::Object> instance = t->GetFunction()->NewInstance();
     context->Global()->Set(v8_str("obj2"), instance);
     v8::TryCatch try_catch;
@@ -7815,7 +8486,7 @@
   v8::HandleScope scope;
   if (depth == 0) return CountHandles();
   for (int i = 0; i < iterations; i++) {
-    Local<v8::Number> n = v8::Integer::New(42);
+    Local<v8::Number> n(v8::Integer::New(42));
   }
   return Recurse(depth - 1, iterations);
 }
@@ -7829,7 +8500,7 @@
     v8::HandleScope scope1;
     CHECK_EQ(0, CountHandles());
     for (int i = 0; i < kIterations; i++) {
-      Local<v8::Number> n = v8::Integer::New(42);
+      Local<v8::Number> n(v8::Integer::New(42));
       CHECK_EQ(i + 1, CountHandles());
     }
 
@@ -7837,7 +8508,7 @@
     {
       v8::HandleScope scope2;
       for (int j = 0; j < kIterations; j++) {
-        Local<v8::Number> n = v8::Integer::New(42);
+        Local<v8::Number> n(v8::Integer::New(42));
         CHECK_EQ(j + 1 + kIterations, CountHandles());
       }
     }
@@ -7883,7 +8554,7 @@
     Local<String> name,
     const AccessorInfo& info) {
   ApiTestFuzzer::Fuzz();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   return v8::Handle<Value>();
 }
 
@@ -8340,10 +9011,10 @@
                                  0, 0, 0, v8_str("data"));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
-  v8::Handle<Value> value = CompileRun(
-    "for (var i = 0; i < 1000; i++) {"
-    "  o.x = 42;"
-    "}");
+  CompileRun(
+      "for (var i = 0; i < 1000; i++) {"
+      "  o.x = 42;"
+      "}");
 }
 
 
@@ -8469,17 +9140,6 @@
 }
 
 
-static v8::Handle<Value> call_ic_function5;
-static v8::Handle<Value> InterceptorCallICGetter5(Local<String> name,
-                                                  const AccessorInfo& info) {
-  ApiTestFuzzer::Fuzz();
-  if (v8_str("x")->Equals(name))
-    return call_ic_function5;
-  else
-    return Local<Value>();
-}
-
-
 // This test checks that if interceptor doesn't provide a function,
 // cached constant function is used
 THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
@@ -8500,6 +9160,17 @@
 }
 
 
+static v8::Handle<Value> call_ic_function5;
+static v8::Handle<Value> InterceptorCallICGetter5(Local<String> name,
+                                                  const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  if (v8_str("x")->Equals(name))
+    return call_ic_function5;
+  else
+    return Local<Value>();
+}
+
+
 // This test checks that if interceptor provides a function,
 // even if we cached constant function, interceptor's function
 // is invoked
@@ -8523,6 +9194,48 @@
 }
 
 
+static v8::Handle<Value> call_ic_function6;
+static v8::Handle<Value> InterceptorCallICGetter6(Local<String> name,
+                                                  const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  if (v8_str("x")->Equals(name))
+    return call_ic_function6;
+  else
+    return Local<Value>();
+}
+
+
+// Same test as above, except the code is wrapped in a function
+// to test the optimized compiler.
+THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(InterceptorCallICGetter6);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  call_ic_function6 =
+      v8_compile("function f(x) { return x - 1; }; f")->Run();
+  v8::Handle<Value> value = CompileRun(
+    "function inc(x) { return x + 1; };"
+    "inc(1);"
+    "o.x = inc;"
+    "function test() {"
+    "  var result = 0;"
+    "  for (var i = 0; i < 1000; i++) {"
+    "    result = o.x(42);"
+    "  }"
+    "  return result;"
+    "};"
+    "test();"
+    "test();"
+    "test();"
+    "%OptimizeFunctionOnNextCall(test);"
+    "test()");
+  CHECK_EQ(41, value->Int32Value());
+}
+
+
 // Test the case when we stored constant function into
 // a stub, but it got invalidated later on
 THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
@@ -8613,7 +9326,7 @@
   int* call_count = reinterpret_cast<int*>(v8::External::Unwrap(info.Data()));
   ++(*call_count);
   if ((*call_count) % 20 == 0) {
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   }
   return v8::Handle<Value>();
 }
@@ -8769,7 +9482,7 @@
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
       "var result = 0;"
       "for (var i = 0; i < 100; i++) {"
       "  result = o.method(41);"
@@ -8796,7 +9509,7 @@
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -8826,7 +9539,7 @@
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -8862,7 +9575,7 @@
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -8899,7 +9612,7 @@
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
   v8::TryCatch try_catch;
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -8938,7 +9651,7 @@
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
   v8::TryCatch try_catch;
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -8967,12 +9680,13 @@
                                 v8::Handle<v8::Signature>());
   v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
   proto_templ->Set(v8_str("method"), method_templ);
-  v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+  v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
+  USE(templ);
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
       "var result = 0;"
       "for (var i = 0; i < 100; i++) {"
       "  result = o.method(41);"
@@ -8990,12 +9704,13 @@
                                 v8::Signature::New(fun_templ));
   v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
   proto_templ->Set(v8_str("method"), method_templ);
-  v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+  v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
+  CHECK(!templ.IsEmpty());
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -9016,12 +9731,13 @@
                                 v8::Signature::New(fun_templ));
   v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
   proto_templ->Set(v8_str("method"), method_templ);
-  v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+  v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
+  CHECK(!templ.IsEmpty());
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -9047,13 +9763,14 @@
                                 v8::Signature::New(fun_templ));
   v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
   proto_templ->Set(v8_str("method"), method_templ);
-  v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+  v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
+  CHECK(!templ.IsEmpty());
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
   v8::TryCatch try_catch;
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -9093,7 +9810,7 @@
   templ->SetNamedPropertyHandler(NoBlockGetterX);
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
     "proto = new Object();"
     "proto.y = function(x) { return x + 1; };"
     "proto.z = function(x) { return x - 1; };"
@@ -9119,7 +9836,7 @@
   context->Global()->Set(v8_str("proto1"), templ->NewInstance());
   keyed_call_ic_function =
       v8_compile("function f(x) { return x - 1; }; f")->Run();
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
     "o = new Object();"
     "proto2 = new Object();"
     "o.y = function(x) { return x + 1; };"
@@ -9144,7 +9861,7 @@
   templ->SetNamedPropertyHandler(NoBlockGetterX);
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
     "function inc(x) { return x + 1; };"
     "inc(1);"
     "function dec(x) { return x - 1; };"
@@ -9170,7 +9887,7 @@
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ_o->NewInstance());
 
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
     "function len(x) { return x.length; };"
     "o.__proto__ = this;"
     "var m = 'parseFloat';"
@@ -9194,7 +9911,7 @@
   LocalContext context;
   context->Global()->Set(v8_str("proto"), templ_o->NewInstance());
 
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
     "var o = new Object();"
     "o.__proto__ = proto;"
     "o.method = function(x) { return x + 1; };"
@@ -9216,7 +9933,7 @@
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ_o->NewInstance());
 
-  v8::Handle<Value> value = CompileRun(
+  CompileRun(
     "var proto = new Object();"
     "o.__proto__ = proto;"
     "proto.method = function(x) { return x + 1; };"
@@ -9737,7 +10454,7 @@
 static unsigned linear_congruential_generator;
 
 
-void ApiTestFuzzer::Setup(PartOfTest part) {
+void ApiTestFuzzer::SetUp(PartOfTest part) {
   linear_congruential_generator = i::FLAG_testing_prng_seed;
   fuzzing_ = true;
   int count = RegisterThreadedTest::count();
@@ -9801,25 +10518,25 @@
 
 // Lets not be needlessly self-referential.
 TEST(Threading) {
-  ApiTestFuzzer::Setup(ApiTestFuzzer::FIRST_PART);
+  ApiTestFuzzer::SetUp(ApiTestFuzzer::FIRST_PART);
   ApiTestFuzzer::RunAllTests();
   ApiTestFuzzer::TearDown();
 }
 
 TEST(Threading2) {
-  ApiTestFuzzer::Setup(ApiTestFuzzer::SECOND_PART);
+  ApiTestFuzzer::SetUp(ApiTestFuzzer::SECOND_PART);
   ApiTestFuzzer::RunAllTests();
   ApiTestFuzzer::TearDown();
 }
 
 TEST(Threading3) {
-  ApiTestFuzzer::Setup(ApiTestFuzzer::THIRD_PART);
+  ApiTestFuzzer::SetUp(ApiTestFuzzer::THIRD_PART);
   ApiTestFuzzer::RunAllTests();
   ApiTestFuzzer::TearDown();
 }
 
 TEST(Threading4) {
-  ApiTestFuzzer::Setup(ApiTestFuzzer::FOURTH_PART);
+  ApiTestFuzzer::SetUp(ApiTestFuzzer::FOURTH_PART);
   ApiTestFuzzer::RunAllTests();
   ApiTestFuzzer::TearDown();
 }
@@ -9964,6 +10681,7 @@
 
 
 static int GetGlobalObjectsCount() {
+  i::Isolate::Current()->heap()->EnsureHeapIsIterable();
   int count = 0;
   i::HeapIterator it;
   for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
@@ -9978,9 +10696,8 @@
   // the first garbage collection but some of the maps have already
   // been marked at that point.  Therefore some of the maps are not
   // collected until the second garbage collection.
-  HEAP->global_context_map();
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
   int count = GetGlobalObjectsCount();
 #ifdef DEBUG
   if (count != expected) HEAP->TracePathToGlobal();
@@ -10049,7 +10766,7 @@
   // weak callback of the first handle would be able to 'reallocate' it.
   handle1.MakeWeak(NULL, NewPersistentHandleCallback);
   handle2.Dispose();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -10057,7 +10774,7 @@
 
 void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) {
   to_be_disposed.Dispose();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   handle.Dispose();
 }
 
@@ -10073,7 +10790,7 @@
   }
   handle1.MakeWeak(NULL, DisposeAndForceGcCallback);
   to_be_disposed = handle2;
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 void DisposingCallback(v8::Persistent<v8::Value> handle, void*) {
@@ -10099,7 +10816,7 @@
   }
   handle2.MakeWeak(NULL, DisposingCallback);
   handle3.MakeWeak(NULL, HandleCreatingCallback);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -10141,7 +10858,8 @@
   v8::Persistent<Context> env = Context::New();
   env->Enter();
   v8::Handle<Value> value = NestedScope(env);
-  v8::Handle<String> str = value->ToString();
+  v8::Handle<String> str(value->ToString());
+  CHECK(!str.IsEmpty());
   env->Exit();
   env.Dispose();
 }
@@ -10149,7 +10867,8 @@
 
 THREADED_TEST(ExternalAllocatedMemory) {
   v8::HandleScope outer;
-  v8::Persistent<Context> env = Context::New();
+  v8::Persistent<Context> env(Context::New());
+  CHECK(!env.IsEmpty());
   const int kSize = 1024*1024;
   CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(kSize), kSize);
   CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(-kSize), 0);
@@ -10487,7 +11206,8 @@
   i::Handle<i::FunctionTemplateInfo> constructor(
       i::FunctionTemplateInfo::cast(internal_template->constructor()));
   CHECK(!constructor->access_check_info()->IsUndefined());
-  v8::Persistent<Context> context0 = Context::New(NULL, global_template);
+  v8::Persistent<Context> context0(Context::New(NULL, global_template));
+  CHECK(!context0.IsEmpty());
   CHECK(!constructor->access_check_info()->IsUndefined());
 }
 
@@ -10557,14 +11277,18 @@
 }
 
 
-v8::Handle<v8::String> a;
-v8::Handle<v8::String> h;
+static const char* kPropertyA = "a";
+static const char* kPropertyH = "h";
 
 static bool NamedGetAccessBlockAandH(Local<v8::Object> obj,
                                        Local<Value> name,
                                        v8::AccessType type,
                                        Local<Value> data) {
-  return !(name->Equals(a) || name->Equals(h));
+  if (!name->IsString()) return false;
+  i::Handle<i::String> name_handle =
+      v8::Utils::OpenHandle(String::Cast(*name));
+  return !name_handle->IsEqualTo(i::CStrVector(kPropertyA))
+      && !name_handle->IsEqualTo(i::CStrVector(kPropertyH));
 }
 
 
@@ -10573,9 +11297,7 @@
 
   // Create an environment with access check to the global object disabled by
   // default. When the registered access checker will block access to properties
-  // a and h
-  a = v8_str("a");
-  h = v8_str("h");
+  // a and h.
   v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
   global_template->SetAccessCheckCallbacks(NamedGetAccessBlockAandH,
                                            IndexedGetAccessBlocker,
@@ -10915,7 +11637,7 @@
       {
         v8::Locker lock;
         // TODO(lrn): Perhaps create some garbage before collecting.
-        HEAP->CollectAllGarbage(false);
+        HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
         gc_count_++;
       }
       i::OS::Sleep(1);
@@ -11037,7 +11759,7 @@
     while (gc_during_apply_ < kRequiredGCs) {
       {
         v8::Locker lock;
-        HEAP->CollectAllGarbage(false);
+        HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
         gc_count_++;
       }
       i::OS::Sleep(1);
@@ -11187,6 +11909,7 @@
 // Test that we can still flatten a string if the components it is built up
 // from have been turned into 16 bit strings in the mean time.
 THREADED_TEST(MorphCompositeStringTest) {
+  char utf_buffer[129];
   const char* c_string = "Now is the time for all good men"
                          " to come to the aid of the party";
   uint16_t* two_byte_string = AsciiToTwoByteString(c_string);
@@ -11212,9 +11935,23 @@
         "var slice = lhs.substring(1, lhs.length - 1);"
         "var slice_on_cons = (lhs + rhs).substring(1, lhs.length *2 - 1);");
 
+    CHECK(!lhs->MayContainNonAscii());
+    CHECK(!rhs->MayContainNonAscii());
+
     MorphAString(*v8::Utils::OpenHandle(*lhs), &ascii_resource, &uc16_resource);
     MorphAString(*v8::Utils::OpenHandle(*rhs), &ascii_resource, &uc16_resource);
 
+    // This should UTF-8 without flattening, since everything is ASCII.
+    Handle<String> cons = v8_compile("cons")->Run().As<String>();
+    CHECK_EQ(128, cons->Utf8Length());
+    int nchars = -1;
+    CHECK_EQ(129, cons->WriteUtf8(utf_buffer, -1, &nchars));
+    CHECK_EQ(128, nchars);
+    CHECK_EQ(0, strcmp(
+        utf_buffer,
+        "Now is the time for all good men to come to the aid of the party"
+        "Now is the time for all good men to come to the aid of the party"));
+
     // Now do some stuff to make sure the strings are flattened, etc.
     CompileRun(
         "/[^a-z]/.test(cons);"
@@ -11666,7 +12403,7 @@
                                   callback_templ->GetFunction());
   calling_context0->Exit();
 
-  // Expose context0 in context1 and setup a function that calls the
+  // Expose context0 in context1 and set up a function that calls the
   // callback function.
   calling_context1->Enter();
   calling_context1->Global()->Set(v8_str("context0"),
@@ -11753,13 +12490,15 @@
   i::Handle<i::ExternalPixelArray> pixels =
       i::Handle<i::ExternalPixelArray>::cast(
           FACTORY->NewExternalArray(kElementCount,
-                                       v8::kExternalPixelArray,
-                                       pixel_data));
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+                                    v8::kExternalPixelArray,
+                                    pixel_data));
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     pixels->set(i, i % 256);
   }
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     CHECK_EQ(i % 256, pixels->get_scalar(i));
     CHECK_EQ(i % 256, pixel_data[i]);
@@ -11822,18 +12561,21 @@
 
   i::Handle<i::Smi> value(i::Smi::FromInt(2));
   i::Handle<i::Object> no_failure;
-  no_failure = i::SetElement(jsobj, 1, value, i::kNonStrictMode);
+  no_failure =
+      i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
   ASSERT(!no_failure.is_null());
   i::USE(no_failure);
   CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value());
   *value.location() = i::Smi::FromInt(256);
-  no_failure = i::SetElement(jsobj, 1, value, i::kNonStrictMode);
+  no_failure =
+      i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
   ASSERT(!no_failure.is_null());
   i::USE(no_failure);
   CHECK_EQ(255,
            i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value());
   *value.location() = i::Smi::FromInt(-1);
-  no_failure = i::SetElement(jsobj, 1, value, i::kNonStrictMode);
+  no_failure =
+      i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
   ASSERT(!no_failure.is_null());
   i::USE(no_failure);
   CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1)->ToObjectChecked())->value());
@@ -12235,11 +12977,13 @@
   i::Handle<ExternalArrayClass> array =
       i::Handle<ExternalArrayClass>::cast(
           FACTORY->NewExternalArray(kElementCount, array_type, array_data));
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     array->set(i, static_cast<ElementType>(i));
   }
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     CHECK_EQ(static_cast<int64_t>(i),
              static_cast<int64_t>(array->get_scalar(i)));
@@ -12357,7 +13101,8 @@
                       "  }"
                       "}"
                       "sum;");
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(28, result->Int32Value());
 
   // Make sure out-of-range loads do not throw.
@@ -12392,8 +13137,15 @@
                       "}"
                       "ext_array[7];");
   CHECK_EQ(0, result->Int32Value());
-  CHECK_EQ(
-      0, static_cast<int>(jsobj->GetElement(7)->ToObjectChecked()->Number()));
+  if (array_type == v8::kExternalDoubleArray ||
+      array_type == v8::kExternalFloatArray) {
+    CHECK_EQ(
+        static_cast<int>(i::OS::nan_value()),
+        static_cast<int>(jsobj->GetElement(7)->ToObjectChecked()->Number()));
+  } else {
+    CHECK_EQ(0, static_cast<int>(
+        jsobj->GetElement(7)->ToObjectChecked()->Number()));
+  }
 
   result = CompileRun("for (var i = 0; i < 8; i++) {"
                       "  ext_array[6] = '2.3';"
@@ -12546,11 +13298,6 @@
     const int kLargeElementCount = kXSize * kYSize * 4;
     ElementType* large_array_data =
         static_cast<ElementType*>(malloc(kLargeElementCount * element_size));
-    i::Handle<ExternalArrayClass> large_array =
-        i::Handle<ExternalArrayClass>::cast(
-            FACTORY->NewExternalArray(kLargeElementCount,
-                                         array_type,
-                                         array_data));
     v8::Handle<v8::Object> large_obj = v8::Object::New();
     // Set the elements to be the external array.
     large_obj->SetIndexedPropertiesToExternalArrayData(large_array_data,
@@ -12949,10 +13696,10 @@
     "}\n"
     "var x;eval('new foo();');";
   v8::Handle<v8::String> overview_src = v8::String::New(overview_source);
-  v8::Handle<Value> overview_result =
-      v8::Script::New(overview_src, origin)->Run();
-  ASSERT(!overview_result.IsEmpty());
-  ASSERT(overview_result->IsObject());
+  v8::Handle<Value> overview_result(
+      v8::Script::New(overview_src, origin)->Run());
+  CHECK(!overview_result.IsEmpty());
+  CHECK(overview_result->IsObject());
 
   // Test getting DETAILED information.
   const char *detailed_source =
@@ -12970,9 +13717,9 @@
   v8::ScriptOrigin detailed_origin(origin, line_offset, column_offset);
   v8::Handle<v8::Script> detailed_script(
       v8::Script::New(detailed_src, &detailed_origin));
-  v8::Handle<Value> detailed_result = detailed_script->Run();
-  ASSERT(!detailed_result.IsEmpty());
-  ASSERT(detailed_result->IsObject());
+  v8::Handle<Value> detailed_result(detailed_script->Run());
+  CHECK(!detailed_result.IsEmpty());
+  CHECK(detailed_result->IsObject());
 }
 
 
@@ -13030,6 +13777,137 @@
 }
 
 
+static void RethrowStackTraceHandler(v8::Handle<v8::Message> message,
+                                     v8::Handle<v8::Value> data) {
+  // Use the frame where JavaScript is called from.
+  v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+  CHECK(!stack_trace.IsEmpty());
+  int frame_count = stack_trace->GetFrameCount();
+  CHECK_EQ(3, frame_count);
+  int line_number[] = {1, 2, 5};
+  for (int i = 0; i < frame_count; i++) {
+    CHECK_EQ(line_number[i], stack_trace->GetFrame(i)->GetLineNumber());
+  }
+}
+
+
+// Test that we only return the stack trace at the site where the exception
+// is first thrown (not where it is rethrown).
+TEST(RethrowStackTrace) {
+  v8::HandleScope scope;
+  LocalContext env;
+  // We make sure that
+  // - the stack trace of the ReferenceError in g() is reported.
+  // - the stack trace is not overwritten when e1 is rethrown by t().
+  // - the stack trace of e2 does not overwrite that of e1.
+  const char* source =
+      "function g() { error; }          \n"
+      "function f() { g(); }            \n"
+      "function t(e) { throw e; }       \n"
+      "try {                            \n"
+      "  f();                           \n"
+      "} catch (e1) {                   \n"
+      "  try {                          \n"
+      "    error;                       \n"
+      "  } catch (e2) {                 \n"
+      "    t(e1);                       \n"
+      "  }                              \n"
+      "}                                \n";
+  v8::V8::AddMessageListener(RethrowStackTraceHandler);
+  v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+  CompileRun(source);
+  v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
+  v8::V8::RemoveMessageListeners(RethrowStackTraceHandler);
+}
+
+
+static void RethrowPrimitiveStackTraceHandler(v8::Handle<v8::Message> message,
+                                              v8::Handle<v8::Value> data) {
+  v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+  CHECK(!stack_trace.IsEmpty());
+  int frame_count = stack_trace->GetFrameCount();
+  CHECK_EQ(2, frame_count);
+  int line_number[] = {3, 7};
+  for (int i = 0; i < frame_count; i++) {
+    CHECK_EQ(line_number[i], stack_trace->GetFrame(i)->GetLineNumber());
+  }
+}
+
+
+// Test that we do not recognize identity for primitive exceptions.
+TEST(RethrowPrimitiveStackTrace) {
+  v8::HandleScope scope;
+  LocalContext env;
+  // We do not capture stack trace for non Error objects on creation time.
+  // Instead, we capture the stack trace on last throw.
+  const char* source =
+      "function g() { throw 404; }      \n"
+      "function f() { g(); }            \n"
+      "function t(e) { throw e; }       \n"
+      "try {                            \n"
+      "  f();                           \n"
+      "} catch (e1) {                   \n"
+      "  t(e1)                          \n"
+      "}                                \n";
+  v8::V8::AddMessageListener(RethrowPrimitiveStackTraceHandler);
+  v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+  CompileRun(source);
+  v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
+  v8::V8::RemoveMessageListeners(RethrowPrimitiveStackTraceHandler);
+}
+
+
+static void RethrowExistingStackTraceHandler(v8::Handle<v8::Message> message,
+                                              v8::Handle<v8::Value> data) {
+  // Use the frame where JavaScript is called from.
+  v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+  CHECK(!stack_trace.IsEmpty());
+  CHECK_EQ(1, stack_trace->GetFrameCount());
+  CHECK_EQ(1, stack_trace->GetFrame(0)->GetLineNumber());
+}
+
+
+// Test that the stack trace is captured when the error object is created and
+// not where it is thrown.
+TEST(RethrowExistingStackTrace) {
+  v8::HandleScope scope;
+  LocalContext env;
+  const char* source =
+      "var e = new Error();           \n"
+      "throw e;                       \n";
+  v8::V8::AddMessageListener(RethrowExistingStackTraceHandler);
+  v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+  CompileRun(source);
+  v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
+  v8::V8::RemoveMessageListeners(RethrowExistingStackTraceHandler);
+}
+
+
+static void RethrowBogusErrorStackTraceHandler(v8::Handle<v8::Message> message,
+                                               v8::Handle<v8::Value> data) {
+  // Use the frame where JavaScript is called from.
+  v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+  CHECK(!stack_trace.IsEmpty());
+  CHECK_EQ(1, stack_trace->GetFrameCount());
+  CHECK_EQ(2, stack_trace->GetFrame(0)->GetLineNumber());
+}
+
+
+// Test that the stack trace is captured where the bogus Error object is thrown.
+TEST(RethrowBogusErrorStackTrace) {
+  v8::HandleScope scope;
+  LocalContext env;
+  const char* source =
+      "var e = {__proto__: new Error()} \n"
+      "throw e;                         \n";
+  v8::V8::AddMessageListener(RethrowBogusErrorStackTraceHandler);
+  v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+  CompileRun(source);
+  v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
+  v8::V8::RemoveMessageListeners(RethrowBogusErrorStackTraceHandler);
+}
+
+
 v8::Handle<Value> AnalyzeStackOfEvalWithSourceURL(const v8::Arguments& args) {
   v8::HandleScope scope;
   v8::Handle<v8::StackTrace> stackTrace =
@@ -13070,14 +13948,71 @@
 
 
 // Test that idle notification can be handled and eventually returns true.
+// This just checks the contract of the IdleNotification() function,
+// and does not verify that it does reasonable work.
 THREADED_TEST(IdleNotification) {
-  bool rv = false;
-  for (int i = 0; i < 100; i++) {
-    rv = v8::V8::IdleNotification();
-    if (rv)
-      break;
+  v8::HandleScope scope;
+  LocalContext env;
+  {
+    // Create garbage in old-space to generate work for idle notification.
+    i::AlwaysAllocateScope always_allocate;
+    for (int i = 0; i < 100; i++) {
+      FACTORY->NewFixedArray(1000, i::TENURED);
+    }
   }
-  CHECK(rv == true);
+  bool finshed_idle_work = false;
+  for (int i = 0; i < 100 && !finshed_idle_work; i++) {
+    finshed_idle_work = v8::V8::IdleNotification();
+  }
+  CHECK(finshed_idle_work);
+}
+
+// Test that idle notification can be handled and eventually returns true.
+// This just checks the contract of the IdleNotification() function,
+// and does not verify that it does reasonable work.
+TEST(IdleNotificationWithSmallHint) {
+  v8::HandleScope scope;
+  LocalContext env;
+  {
+    // Create garbage in old-space to generate work for idle notification.
+    i::AlwaysAllocateScope always_allocate;
+    for (int i = 0; i < 100; i++) {
+      FACTORY->NewFixedArray(1000, i::TENURED);
+    }
+  }
+  intptr_t old_size = HEAP->SizeOfObjects();
+  bool finshed_idle_work = false;
+  bool no_idle_work = v8::V8::IdleNotification(10);
+  for (int i = 0; i < 200 && !finshed_idle_work; i++) {
+    finshed_idle_work = v8::V8::IdleNotification(10);
+  }
+  intptr_t new_size = HEAP->SizeOfObjects();
+  CHECK(finshed_idle_work);
+  CHECK(no_idle_work || new_size < old_size);
+}
+
+
+// This just checks the contract of the IdleNotification() function,
+// and does not verify that it does reasonable work.
+TEST(IdleNotificationWithLargeHint) {
+  v8::HandleScope scope;
+  LocalContext env;
+  {
+    // Create garbage in old-space to generate work for idle notification.
+    i::AlwaysAllocateScope always_allocate;
+    for (int i = 0; i < 100; i++) {
+      FACTORY->NewFixedArray(1000, i::TENURED);
+    }
+  }
+  intptr_t old_size = HEAP->SizeOfObjects();
+  bool finshed_idle_work = false;
+  bool no_idle_work = v8::V8::IdleNotification(900);
+  for (int i = 0; i < 200 && !finshed_idle_work; i++) {
+    finshed_idle_work = v8::V8::IdleNotification(900);
+  }
+  intptr_t new_size = HEAP->SizeOfObjects();
+  CHECK(finshed_idle_work);
+  CHECK(no_idle_work || new_size < old_size);
 }
 
 
@@ -13168,6 +14103,63 @@
 }
 
 
+class VisitorImpl : public v8::ExternalResourceVisitor {
+ public:
+  VisitorImpl(TestResource* r1, TestResource* r2)
+      : resource1_(r1),
+        resource2_(r2),
+        found_resource1_(false),
+        found_resource2_(false) {}
+  virtual ~VisitorImpl() {}
+  virtual void VisitExternalString(v8::Handle<v8::String> string) {
+    if (!string->IsExternal()) {
+      CHECK(string->IsExternalAscii());
+      return;
+    }
+    v8::String::ExternalStringResource* resource =
+        string->GetExternalStringResource();
+    CHECK(resource);
+    if (resource1_ == resource) {
+      CHECK(!found_resource1_);
+      found_resource1_ = true;
+    }
+    if (resource2_ == resource) {
+      CHECK(!found_resource2_);
+      found_resource2_ = true;
+    }
+  }
+  void CheckVisitedResources() {
+    CHECK(found_resource1_);
+    CHECK(found_resource2_);
+  }
+
+ private:
+  v8::String::ExternalStringResource* resource1_;
+  v8::String::ExternalStringResource* resource2_;
+  bool found_resource1_;
+  bool found_resource2_;
+};
+
+TEST(VisitExternalStrings) {
+  v8::HandleScope scope;
+  LocalContext env;
+  const char* string = "Some string";
+  uint16_t* two_byte_string = AsciiToTwoByteString(string);
+  TestResource* resource1 = new TestResource(two_byte_string);
+  v8::Local<v8::String> string1 = v8::String::NewExternal(resource1);
+  TestResource* resource2 = new TestResource(two_byte_string);
+  v8::Local<v8::String> string2 = v8::String::NewExternal(resource2);
+
+  // We need to add usages for string1 and string2 to avoid warnings in GCC 4.7
+  CHECK(string1->IsExternal());
+  CHECK(string2->IsExternal());
+
+  VisitorImpl visitor(resource1, resource2);
+  v8::V8::VisitExternalResources(&visitor);
+  visitor.CheckVisitedResources();
+}
+
+
 static double DoubleFromBits(uint64_t value) {
   double target;
   memcpy(&target, &value, sizeof(target));
@@ -13250,7 +14242,13 @@
     } else {
       uint64_t stored_bits = DoubleToBits(stored_number);
       // Check if quiet nan (bits 51..62 all set).
+#if defined(V8_TARGET_ARCH_MIPS) && !defined(USE_SIMULATOR)
+      // Most significant fraction bit for quiet nan is set to 0
+      // on MIPS architecture. Allowed by IEEE-754.
+      CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
+#else
       CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff));
+#endif
     }
 
     // Check that Date::New preserves non-NaNs in the date range and
@@ -13263,7 +14261,13 @@
     } else {
       uint64_t stored_bits = DoubleToBits(stored_date);
       // Check if quiet nan (bits 51..62 all set).
+#if defined(V8_TARGET_ARCH_MIPS) && !defined(USE_SIMULATOR)
+      // Most significant fraction bit for quiet nan is set to 0
+      // on MIPS architecture. Allowed by IEEE-754.
+      CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
+#else
       CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff));
+#endif
     }
   }
 }
@@ -13272,7 +14276,8 @@
 static v8::Handle<Value> SpaghettiIncident(const v8::Arguments& args) {
   v8::HandleScope scope;
   v8::TryCatch tc;
-  v8::Handle<v8::String> str = args[0]->ToString();
+  v8::Handle<v8::String> str(args[0]->ToString());
+  USE(str);
   if (tc.HasCaught())
     return tc.ReThrow();
   return v8::Undefined();
@@ -13337,7 +14342,7 @@
     other_context->Enter();
     CompileRun(source_simple);
     other_context->Exit();
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
@@ -13359,7 +14364,7 @@
     other_context->Enter();
     CompileRun(source_eval);
     other_context->Exit();
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
@@ -13386,7 +14391,7 @@
     other_context->Enter();
     CompileRun(source_exception);
     other_context->Exit();
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
@@ -13417,6 +14422,17 @@
   CHECK_EQ(0, script_origin_g.ResourceLineOffset()->Int32Value());
 }
 
+THREADED_TEST(FunctionGetInferredName) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
+  v8::Handle<v8::String> script = v8::String::New(
+      "var foo = { bar : { baz : function() {}}}; var f = foo.bar.baz;");
+  v8::Script::Compile(script, &origin)->Run();
+  v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("f")));
+  CHECK_EQ("foo.bar.baz", *v8::String::AsciiValue(f->GetInferredName()));
+}
 
 THREADED_TEST(ScriptLineNumber) {
   v8::HandleScope scope;
@@ -13434,6 +14450,41 @@
 }
 
 
+THREADED_TEST(ScriptColumnNumber) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"),
+      v8::Integer::New(3), v8::Integer::New(2));
+  v8::Handle<v8::String> script = v8::String::New(
+      "function foo() {}\n\n     function bar() {}");
+  v8::Script::Compile(script, &origin)->Run();
+  v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("foo")));
+  v8::Local<v8::Function> bar = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("bar")));
+  CHECK_EQ(14, foo->GetScriptColumnNumber());
+  CHECK_EQ(17, bar->GetScriptColumnNumber());
+}
+
+
+THREADED_TEST(FunctionGetScriptId) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"),
+      v8::Integer::New(3), v8::Integer::New(2));
+  v8::Handle<v8::String> scriptSource = v8::String::New(
+      "function foo() {}\n\n     function bar() {}");
+  v8::Local<v8::Script> script(v8::Script::Compile(scriptSource, &origin));
+  script->Run();
+  v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("foo")));
+  v8::Local<v8::Function> bar = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("bar")));
+  CHECK_EQ(script->Id(), foo->GetScriptId());
+  CHECK_EQ(script->Id(), bar->GetScriptId());
+}
+
+
 static v8::Handle<Value> GetterWhichReturns42(Local<String> name,
                                               const AccessorInfo& info) {
   return v8_num(42);
@@ -13604,26 +14655,26 @@
   v8::V8::AddGCEpilogueCallback(EpilogueCallback);
   CHECK_EQ(0, prologue_call_count);
   CHECK_EQ(0, epilogue_call_count);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(1, prologue_call_count);
   CHECK_EQ(1, epilogue_call_count);
   v8::V8::AddGCPrologueCallback(PrologueCallbackSecond);
   v8::V8::AddGCEpilogueCallback(EpilogueCallbackSecond);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(2, prologue_call_count);
   CHECK_EQ(2, epilogue_call_count);
   CHECK_EQ(1, prologue_call_count_second);
   CHECK_EQ(1, epilogue_call_count_second);
   v8::V8::RemoveGCPrologueCallback(PrologueCallback);
   v8::V8::RemoveGCEpilogueCallback(EpilogueCallback);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(2, prologue_call_count);
   CHECK_EQ(2, epilogue_call_count);
   CHECK_EQ(2, prologue_call_count_second);
   CHECK_EQ(2, epilogue_call_count_second);
   v8::V8::RemoveGCPrologueCallback(PrologueCallbackSecond);
   v8::V8::RemoveGCEpilogueCallback(EpilogueCallbackSecond);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(2, prologue_call_count);
   CHECK_EQ(2, epilogue_call_count);
   CHECK_EQ(2, prologue_call_count_second);
@@ -13694,7 +14745,7 @@
       "  for (var i = 0; i < 16; i++) values[i] = %_GetFromCache(0, keys[i]);"
       "  for (var i = 0; i < 16; i++) {"
       "    var v = %_GetFromCache(0, keys[i]);"
-      "    if (v !== values[i])"
+      "    if (v.toString() !== values[i].toString())"
       "      return 'Wrong value for ' + "
       "          keys[i] + ': ' + v + ' vs. ' + values[i];"
       "  };"
@@ -13840,7 +14891,7 @@
 void FailedAccessCheckCallbackGC(Local<v8::Object> target,
                                  v8::AccessType type,
                                  Local<v8::Value> data) {
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -14414,7 +15465,7 @@
                  "})()",
                  "ReferenceError: cell is not defined");
     CompileRun("cell = \"new_second\";");
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     ExpectString("readCell()", "new_second");
     ExpectString("readCell()", "new_second");
   }
@@ -14535,8 +15586,8 @@
 
   // RegExps are objects on which you can set properties.
   re->Set(v8_str("property"), v8::Integer::New(32));
-  v8::Handle<v8::Value> value = CompileRun("re.property");
-  ASSERT_EQ(32, value->Int32Value());
+  v8::Handle<v8::Value> value(CompileRun("re.property"));
+  CHECK_EQ(32, value->Int32Value());
 
   v8::TryCatch try_catch;
   re = v8::RegExp::New(v8_str("foo["), v8::RegExp::kNone);
@@ -14887,10 +15938,12 @@
   LocalContext context;
 
   // eval and the Function constructor allowed by default.
+  CHECK(context->IsCodeGenerationFromStringsAllowed());
   CheckCodeGenerationAllowed();
 
   // Disallow eval and the Function constructor.
   context->AllowCodeGenerationFromStrings(false);
+  CHECK(!context->IsCodeGenerationFromStringsAllowed());
   CheckCodeGenerationDisallowed();
 
   // Allow again.
@@ -14900,10 +15953,12 @@
   // Disallow but setting a global callback that will allow the calls.
   context->AllowCodeGenerationFromStrings(false);
   V8::SetAllowCodeGenerationFromStringsCallback(&CodeGenerationAllowed);
+  CHECK(!context->IsCodeGenerationFromStringsAllowed());
   CheckCodeGenerationAllowed();
 
   // Set a callback that disallows the code generation.
   V8::SetAllowCodeGenerationFromStringsCallback(&CodeGenerationDisallowed);
+  CHECK(!context->IsCodeGenerationFromStringsAllowed());
   CheckCodeGenerationDisallowed();
 }
 
@@ -15182,3 +16237,167 @@
 
   foreign_context.Dispose();
 }
+
+
+uint8_t callback_fired = 0;
+
+
+void CallCompletedCallback1() {
+  i::OS::Print("Firing callback 1.\n");
+  callback_fired ^= 1;  // Toggle first bit.
+}
+
+
+void CallCompletedCallback2() {
+  i::OS::Print("Firing callback 2.\n");
+  callback_fired ^= 2;  // Toggle second bit.
+}
+
+
+Handle<Value> RecursiveCall(const Arguments& args) {
+  int32_t level = args[0]->Int32Value();
+  if (level < 3) {
+    level++;
+    i::OS::Print("Entering recursion level %d.\n", level);
+    char script[64];
+    i::Vector<char> script_vector(script, sizeof(script));
+    i::OS::SNPrintF(script_vector, "recursion(%d)", level);
+    CompileRun(script_vector.start());
+    i::OS::Print("Leaving recursion level %d.\n", level);
+    CHECK_EQ(0, callback_fired);
+  } else {
+    i::OS::Print("Recursion ends.\n");
+    CHECK_EQ(0, callback_fired);
+  }
+  return Undefined();
+}
+
+
+TEST(CallCompletedCallback) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::Handle<v8::FunctionTemplate> recursive_runtime =
+      v8::FunctionTemplate::New(RecursiveCall);
+  env->Global()->Set(v8_str("recursion"),
+                     recursive_runtime->GetFunction());
+  // Adding the same callback a second time has no effect.
+  v8::V8::AddCallCompletedCallback(CallCompletedCallback1);
+  v8::V8::AddCallCompletedCallback(CallCompletedCallback1);
+  v8::V8::AddCallCompletedCallback(CallCompletedCallback2);
+  i::OS::Print("--- Script (1) ---\n");
+  Local<Script> script =
+      v8::Script::Compile(v8::String::New("recursion(0)"));
+  script->Run();
+  CHECK_EQ(3, callback_fired);
+
+  i::OS::Print("\n--- Script (2) ---\n");
+  callback_fired = 0;
+  v8::V8::RemoveCallCompletedCallback(CallCompletedCallback1);
+  script->Run();
+  CHECK_EQ(2, callback_fired);
+
+  i::OS::Print("\n--- Function ---\n");
+  callback_fired = 0;
+  Local<Function> recursive_function =
+      Local<Function>::Cast(env->Global()->Get(v8_str("recursion")));
+  v8::Handle<Value> args[] = { v8_num(0) };
+  recursive_function->Call(env->Global(), 1, args);
+  CHECK_EQ(2, callback_fired);
+}
+
+
+void CallCompletedCallbackNoException() {
+  v8::HandleScope scope;
+  CompileRun("1+1;");
+}
+
+
+void CallCompletedCallbackException() {
+  v8::HandleScope scope;
+  CompileRun("throw 'second exception';");
+}
+
+
+TEST(CallCompletedCallbackOneException) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::V8::AddCallCompletedCallback(CallCompletedCallbackNoException);
+  CompileRun("throw 'exception';");
+}
+
+
+TEST(CallCompletedCallbackTwoExceptions) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::V8::AddCallCompletedCallback(CallCompletedCallbackException);
+  CompileRun("throw 'first exception';");
+}
+
+
+static int probes_counter = 0;
+static int misses_counter = 0;
+static int updates_counter = 0;
+
+
+static int* LookupCounter(const char* name) {
+  if (strcmp(name, "c:V8.MegamorphicStubCacheProbes") == 0) {
+    return &probes_counter;
+  } else if (strcmp(name, "c:V8.MegamorphicStubCacheMisses") == 0) {
+    return &misses_counter;
+  } else if (strcmp(name, "c:V8.MegamorphicStubCacheUpdates") == 0) {
+    return &updates_counter;
+  }
+  return NULL;
+}
+
+
+static const char* kMegamorphicTestProgram =
+    "function ClassA() { };"
+    "function ClassB() { };"
+    "ClassA.prototype.foo = function() { };"
+    "ClassB.prototype.foo = function() { };"
+    "function fooify(obj) { obj.foo(); };"
+    "var a = new ClassA();"
+    "var b = new ClassB();"
+    "for (var i = 0; i < 10000; i++) {"
+    "  fooify(a);"
+    "  fooify(b);"
+    "}";
+
+
+static void StubCacheHelper(bool primary) {
+  V8::SetCounterFunction(LookupCounter);
+  USE(kMegamorphicTestProgram);
+#ifdef DEBUG
+  i::FLAG_native_code_counters = true;
+  if (primary) {
+    i::FLAG_test_primary_stub_cache = true;
+  } else {
+    i::FLAG_test_secondary_stub_cache = true;
+  }
+  i::FLAG_crankshaft = false;
+  v8::HandleScope scope;
+  LocalContext env;
+  int initial_probes = probes_counter;
+  int initial_misses = misses_counter;
+  int initial_updates = updates_counter;
+  CompileRun(kMegamorphicTestProgram);
+  int probes = probes_counter - initial_probes;
+  int misses = misses_counter - initial_misses;
+  int updates = updates_counter - initial_updates;
+  CHECK_LT(updates, 10);
+  CHECK_LT(misses, 10);
+  CHECK_GE(probes, 10000);
+#endif
+}
+
+
+TEST(SecondaryStubCache) {
+  StubCacheHelper(true);
+}
+
+
+TEST(PrimaryStubCache) {
+  StubCacheHelper(false);
+}
+
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index 839b7f5..815e618 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -93,15 +93,15 @@
   Label L, C;
 
   __ mov(edx, Operand(esp, 4));
-  __ xor_(eax, Operand(eax));  // clear eax
+  __ xor_(eax, eax);  // clear eax
   __ jmp(&C);
 
   __ bind(&L);
-  __ add(eax, Operand(edx));
-  __ sub(Operand(edx), Immediate(1));
+  __ add(eax, edx);
+  __ sub(edx, Immediate(1));
 
   __ bind(&C);
-  __ test(edx, Operand(edx));
+  __ test(edx, edx);
   __ j(not_zero, &L);
   __ ret(0);
 
@@ -135,11 +135,11 @@
   __ jmp(&C);
 
   __ bind(&L);
-  __ imul(eax, Operand(edx));
-  __ sub(Operand(edx), Immediate(1));
+  __ imul(eax, edx);
+  __ sub(edx, Immediate(1));
 
   __ bind(&C);
-  __ test(edx, Operand(edx));
+  __ test(edx, edx);
   __ j(not_zero, &L);
   __ ret(0);
 
@@ -275,10 +275,10 @@
   __ subsd(xmm0, xmm1);
   __ divsd(xmm0, xmm1);
   // Copy xmm0 to st(0) using eight bytes of stack.
-  __ sub(Operand(esp), Immediate(8));
+  __ sub(esp, Immediate(8));
   __ movdbl(Operand(esp, 0), xmm0);
   __ fld_d(Operand(esp, 0));
-  __ add(Operand(esp), Immediate(8));
+  __ add(esp, Immediate(8));
   __ ret(0);
 
   CodeDesc desc;
@@ -314,12 +314,12 @@
   v8::internal::byte buffer[256];
   Assembler assm(Isolate::Current(), buffer, sizeof buffer);
   __ mov(eax, Operand(esp, 4));
-  __ cvtsi2sd(xmm0, Operand(eax));
+  __ cvtsi2sd(xmm0, eax);
   // Copy xmm0 to st(0) using eight bytes of stack.
-  __ sub(Operand(esp), Immediate(8));
+  __ sub(esp, Immediate(8));
   __ movdbl(Operand(esp, 0), xmm0);
   __ fld_d(Operand(esp, 0));
-  __ add(Operand(esp), Immediate(8));
+  __ add(esp, Immediate(8));
   __ ret(0);
   CodeDesc desc;
   assm.GetCode(&desc);
@@ -408,4 +408,72 @@
   __ nop();
 }
 
+
+TEST(AssemblerMultiByteNop) {
+  InitializeVM();
+  v8::HandleScope scope;
+  v8::internal::byte buffer[1024];
+  Assembler assm(Isolate::Current(), buffer, sizeof(buffer));
+  __ push(ebx);
+  __ push(ecx);
+  __ push(edx);
+  __ push(edi);
+  __ push(esi);
+  __ mov(eax, 1);
+  __ mov(ebx, 2);
+  __ mov(ecx, 3);
+  __ mov(edx, 4);
+  __ mov(edi, 5);
+  __ mov(esi, 6);
+  for (int i = 0; i < 16; i++) {
+    int before = assm.pc_offset();
+    __ Nop(i);
+    CHECK_EQ(assm.pc_offset() - before, i);
+  }
+
+  Label fail;
+  __ cmp(eax, 1);
+  __ j(not_equal, &fail);
+  __ cmp(ebx, 2);
+  __ j(not_equal, &fail);
+  __ cmp(ecx, 3);
+  __ j(not_equal, &fail);
+  __ cmp(edx, 4);
+  __ j(not_equal, &fail);
+  __ cmp(edi, 5);
+  __ j(not_equal, &fail);
+  __ cmp(esi, 6);
+  __ j(not_equal, &fail);
+  __ mov(eax, 42);
+  __ pop(esi);
+  __ pop(edi);
+  __ pop(edx);
+  __ pop(ecx);
+  __ pop(ebx);
+  __ ret(0);
+  __ bind(&fail);
+  __ mov(eax, 13);
+  __ pop(esi);
+  __ pop(edi);
+  __ pop(edx);
+  __ pop(ecx);
+  __ pop(ebx);
+  __ ret(0);
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Code* code = Code::cast(HEAP->CreateCode(
+      desc,
+      Code::ComputeFlags(Code::STUB),
+      Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+  CHECK(code->IsCode());
+
+  F0 f = FUNCTION_CAST<F0>(code->entry());
+  int res = f();
+  CHECK_EQ(42, res);
+}
+
+
+
+
 #undef __
diff --git a/test/cctest/test-assembler-mips.cc b/test/cctest/test-assembler-mips.cc
index a6c76f0..6985433 100644
--- a/test/cctest/test-assembler-mips.cc
+++ b/test/cctest/test-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -219,21 +219,21 @@
 
   // Bit twiddling instructions & conditional moves.
   // Uses t0-t7 as set above.
-  __ clz(v0, t0);       // 29
-  __ clz(v1, t1);       // 19
+  __ Clz(v0, t0);       // 29
+  __ Clz(v1, t1);       // 19
   __ addu(v0, v0, v1);  // 48
-  __ clz(v1, t2);       // 3
+  __ Clz(v1, t2);       // 3
   __ addu(v0, v0, v1);  // 51
-  __ clz(v1, t7);       // 0
+  __ Clz(v1, t7);       // 0
   __ addu(v0, v0, v1);  // 51
   __ Branch(&error, ne, v0, Operand(51));
-  __ movn(a0, t3, t0);  // Move a0<-t3 (t0 is NOT 0).
+  __ Movn(a0, t3, t0);  // Move a0<-t3 (t0 is NOT 0).
   __ Ins(a0, t1, 12, 8);  // 0x7ff34fff
   __ Branch(&error, ne, a0, Operand(0x7ff34fff));
-  __ movz(a0, t6, t7);    // a0 not updated (t7 is NOT 0).
+  __ Movz(a0, t6, t7);    // a0 not updated (t7 is NOT 0).
   __ Ext(a1, a0, 8, 12);  // 0x34f
   __ Branch(&error, ne, a1, Operand(0x34f));
-  __ movz(a0, t6, v1);    // a0<-t6, v0 is 0, from 8 instr back.
+  __ Movz(a0, t6, v1);    // a0<-t6, v0 is 0, from 8 instr back.
   __ Branch(&error, ne, a0, Operand(t6));
 
   // Everything was correctly executed. Load the expected result.
@@ -579,8 +579,13 @@
 
     __ bind(&neither_is_nan);
 
-    __ c(OLT, D, f6, f4, 2);
-    __ bc1t(&less_than, 2);
+    if (kArchVariant == kLoongson) {
+      __ c(OLT, D, f6, f4);
+      __ bc1t(&less_than);
+    } else {
+      __ c(OLT, D, f6, f4, 2);
+      __ bc1t(&less_than, 2);
+    }
     __ nop();
     __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
     __ Branch(&outa_here);
@@ -774,7 +779,7 @@
   Assembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
-  if (CpuFeatures::IsSupported(FPU) && mips32r2) {
+  if (CpuFeatures::IsSupported(FPU) && kArchVariant == kMips32r2) {
     CpuFeatures::Scope scope(FPU);
 
     // Load all structure elements to registers.
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index 28f7c9b..d81923f 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -36,6 +36,7 @@
 #include "cctest.h"
 
 using v8::internal::Assembler;
+using v8::internal::Code;
 using v8::internal::CodeDesc;
 using v8::internal::FUNCTION_CAST;
 using v8::internal::Immediate;
@@ -53,6 +54,7 @@
 using v8::internal::r8;
 using v8::internal::r9;
 using v8::internal::rax;
+using v8::internal::rbx;
 using v8::internal::rbp;
 using v8::internal::rcx;
 using v8::internal::rdi;
@@ -86,8 +88,18 @@
 #define __ assm.
 
 
+static v8::Persistent<v8::Context> env;
+
+
+static void InitializeVM() {
+  if (env.IsEmpty()) {
+    env = v8::Context::New();
+  }
+}
+
+
 TEST(AssemblerX64ReturnOperation) {
-  OS::Setup();
+  OS::SetUp();
   // Allocate an executable page of memory.
   size_t actual_size;
   byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -109,7 +121,7 @@
 }
 
 TEST(AssemblerX64StackOperations) {
-  OS::Setup();
+  OS::SetUp();
   // Allocate an executable page of memory.
   size_t actual_size;
   byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -141,7 +153,7 @@
 }
 
 TEST(AssemblerX64ArithmeticOperations) {
-  OS::Setup();
+  OS::SetUp();
   // Allocate an executable page of memory.
   size_t actual_size;
   byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -163,7 +175,7 @@
 }
 
 TEST(AssemblerX64ImulOperation) {
-  OS::Setup();
+  OS::SetUp();
   // Allocate an executable page of memory.
   size_t actual_size;
   byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -191,7 +203,7 @@
 }
 
 TEST(AssemblerX64MemoryOperands) {
-  OS::Setup();
+  OS::SetUp();
   // Allocate an executable page of memory.
   size_t actual_size;
   byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -225,7 +237,7 @@
 }
 
 TEST(AssemblerX64ControlFlow) {
-  OS::Setup();
+  OS::SetUp();
   // Allocate an executable page of memory.
   size_t actual_size;
   byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -254,7 +266,7 @@
 }
 
 TEST(AssemblerX64LoopImmediates) {
-  OS::Setup();
+  OS::SetUp();
   // Allocate an executable page of memory.
   size_t actual_size;
   byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
@@ -359,4 +371,73 @@
   __ nop();
 }
 
+
+TEST(AssemblerMultiByteNop) {
+  InitializeVM();
+  v8::HandleScope scope;
+  v8::internal::byte buffer[1024];
+  Assembler assm(Isolate::Current(), buffer, sizeof(buffer));
+  __ push(rbx);
+  __ push(rcx);
+  __ push(rdx);
+  __ push(rdi);
+  __ push(rsi);
+  __ movq(rax, Immediate(1));
+  __ movq(rbx, Immediate(2));
+  __ movq(rcx, Immediate(3));
+  __ movq(rdx, Immediate(4));
+  __ movq(rdi, Immediate(5));
+  __ movq(rsi, Immediate(6));
+  for (int i = 0; i < 16; i++) {
+    int before = assm.pc_offset();
+    __ Nop(i);
+    CHECK_EQ(assm.pc_offset() - before, i);
+  }
+
+  Label fail;
+  __ cmpq(rax, Immediate(1));
+  __ j(not_equal, &fail);
+  __ cmpq(rbx, Immediate(2));
+  __ j(not_equal, &fail);
+  __ cmpq(rcx, Immediate(3));
+  __ j(not_equal, &fail);
+  __ cmpq(rdx, Immediate(4));
+  __ j(not_equal, &fail);
+  __ cmpq(rdi, Immediate(5));
+  __ j(not_equal, &fail);
+  __ cmpq(rsi, Immediate(6));
+  __ j(not_equal, &fail);
+  __ movq(rax, Immediate(42));
+  __ pop(rsi);
+  __ pop(rdi);
+  __ pop(rdx);
+  __ pop(rcx);
+  __ pop(rbx);
+  __ ret(0);
+  __ bind(&fail);
+  __ movq(rax, Immediate(13));
+  __ pop(rsi);
+  __ pop(rdi);
+  __ pop(rdx);
+  __ pop(rcx);
+  __ pop(rbx);
+  __ ret(0);
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Code* code = Code::cast(HEAP->CreateCode(
+      desc,
+      Code::ComputeFlags(Code::STUB),
+      v8::internal::Handle<v8::internal::Object>(
+          HEAP->undefined_value()))->ToObjectChecked());
+  CHECK(code->IsCode());
+
+  F0 f = FUNCTION_CAST<F0>(code->entry());
+  int res = f();
+  CHECK_EQ(42, res);
+}
+
+
+
+
 #undef __
diff --git a/test/cctest/test-ast.cc b/test/cctest/test-ast.cc
index 2aa7207..80c7fdf 100644
--- a/test/cctest/test-ast.cc
+++ b/test/cctest/test-ast.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -40,7 +40,8 @@
   CHECK_EQ(0, list->length());
 
   ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
-  AstNode* node = new(ZONE) EmptyStatement();
+  AstNodeFactory<AstNullVisitor> factory(Isolate::Current());
+  AstNode* node = factory.NewEmptyStatement();
   list->Add(node);
   CHECK_EQ(1, list->length());
   CHECK_EQ(node, list->at(0));
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index 2d9b012..9ca0b0a 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -270,8 +270,7 @@
   CHECK(!fun.is_null());
   bool has_pending_exception;
   Handle<JSObject> global(Isolate::Current()->context()->global());
-  Handle<Object> result =
-      Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
   CHECK(has_pending_exception);
   CHECK_EQ(42.0, Isolate::Current()->pending_exception()->
            ToObjectChecked()->Number());
@@ -305,10 +304,11 @@
   Handle<Object> fun1(fun1_object->ToObjectChecked());
   CHECK(fun1->IsJSFunction());
 
-  Object** argv[1] = {
-    Handle<Object>::cast(FACTORY->LookupAsciiSymbol("hello")).location()
-  };
-  Execution::Call(Handle<JSFunction>::cast(fun1), global, 1, argv,
+  Handle<Object> argv[] = { FACTORY->LookupAsciiSymbol("hello") };
+  Execution::Call(Handle<JSFunction>::cast(fun1),
+                  global,
+                  ARRAY_SIZE(argv),
+                  argv,
                   &has_pending_exception);
   CHECK(!has_pending_exception);
 }
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index f567a0f..b10e688 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -216,7 +216,7 @@
 TEST(CrashIfStoppingLastNonExistentProfile) {
   InitializeVM();
   TestSetup test_setup;
-  CpuProfiler::Setup();
+  CpuProfiler::SetUp();
   CpuProfiler::StartProfiling("1");
   CpuProfiler::StopProfiling("2");
   CpuProfiler::StartProfiling("1");
@@ -268,7 +268,7 @@
 TEST(DeleteAllCpuProfiles) {
   InitializeVM();
   TestSetup test_setup;
-  CpuProfiler::Setup();
+  CpuProfiler::SetUp();
   CHECK_EQ(0, CpuProfiler::GetProfilesCount());
   CpuProfiler::DeleteAllProfiles();
   CHECK_EQ(0, CpuProfiler::GetProfilesCount());
diff --git a/test/cctest/test-dataflow.cc b/test/cctest/test-dataflow.cc
index ad48f55..a63008d 100644
--- a/test/cctest/test-dataflow.cc
+++ b/test/cctest/test-dataflow.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,16 +36,17 @@
 
 TEST(BitVector) {
   v8::internal::V8::Initialize(NULL);
-  ZoneScope zone(Isolate::Current(), DELETE_ON_EXIT);
+  ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+  Zone* zone = ZONE;
   {
-    BitVector v(15);
+    BitVector v(15, zone);
     v.Add(1);
     CHECK(v.Contains(1));
     v.Remove(0);
     CHECK(!v.Contains(0));
     v.Add(0);
     v.Add(1);
-    BitVector w(15);
+    BitVector w(15, zone);
     w.Add(1);
     v.Intersect(w);
     CHECK(!v.Contains(0));
@@ -53,7 +54,7 @@
   }
 
   {
-    BitVector v(64);
+    BitVector v(64, zone);
     v.Add(27);
     v.Add(30);
     v.Add(31);
@@ -71,9 +72,9 @@
   }
 
   {
-    BitVector v(15);
+    BitVector v(15, zone);
     v.Add(0);
-    BitVector w(15);
+    BitVector w(15, zone);
     w.Add(1);
     v.Union(w);
     CHECK(v.Contains(0));
@@ -81,13 +82,13 @@
   }
 
   {
-    BitVector v(15);
+    BitVector v(15, zone);
     v.Add(0);
-    BitVector w(15);
+    BitVector w(15, zone);
     w = v;
     CHECK(w.Contains(0));
     w.Add(1);
-    BitVector u(w);
+    BitVector u(w, zone);
     CHECK(u.Contains(0));
     CHECK(u.Contains(1));
     v.Union(w);
@@ -96,9 +97,9 @@
   }
 
   {
-    BitVector v(35);
+    BitVector v(35, zone);
     v.Add(0);
-    BitVector w(35);
+    BitVector w(35, zone);
     w.Add(33);
     v.Union(w);
     CHECK(v.Contains(0));
@@ -106,15 +107,15 @@
   }
 
   {
-    BitVector v(35);
+    BitVector v(35, zone);
     v.Add(32);
     v.Add(33);
-    BitVector w(35);
+    BitVector w(35, zone);
     w.Add(33);
     v.Intersect(w);
     CHECK(!v.Contains(32));
     CHECK(v.Contains(33));
-    BitVector r(35);
+    BitVector r(35, zone);
     r.CopyFrom(v);
     CHECK(!r.Contains(32));
     CHECK(r.Contains(33));
diff --git a/test/cctest/test-date.cc b/test/cctest/test-date.cc
new file mode 100644
index 0000000..903a63a
--- /dev/null
+++ b/test/cctest/test-date.cc
@@ -0,0 +1,168 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "global-handles.h"
+#include "snapshot.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+class DateCacheMock: public DateCache {
+ public:
+  struct Rule {
+    int year, start_month, start_day, end_month, end_day, offset_sec;
+  };
+
+  DateCacheMock(int local_offset, Rule* rules, int rules_count)
+      : local_offset_(local_offset), rules_(rules), rules_count_(rules_count) {}
+
+ protected:
+  virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
+    int days = DaysFromTime(time_sec * 1000);
+    int time_in_day_sec = TimeInDay(time_sec * 1000, days) / 1000;
+    int year, month, day;
+    YearMonthDayFromDays(days, &year, &month, &day);
+    Rule* rule = FindRuleFor(year, month, day, time_in_day_sec);
+    return rule == NULL ? 0 : rule->offset_sec * 1000;
+  }
+
+
+  virtual int GetLocalOffsetFromOS() {
+    return local_offset_;
+  }
+
+ private:
+  Rule* FindRuleFor(int year, int month, int day, int time_in_day_sec) {
+    Rule* result = NULL;
+    for (int i = 0; i < rules_count_; i++)
+      if (Match(&rules_[i], year, month, day, time_in_day_sec)) {
+        result = &rules_[i];
+      }
+    return result;
+  }
+
+
+  bool Match(Rule* rule, int year, int month, int day, int time_in_day_sec) {
+    if (rule->year != 0 && rule->year != year) return false;
+    if (rule->start_month > month) return false;
+    if (rule->end_month < month) return false;
+    int start_day = ComputeRuleDay(year, rule->start_month, rule->start_day);
+    if (rule->start_month == month && start_day > day) return false;
+    if (rule->start_month == month && start_day == day &&
+        2 * 3600 > time_in_day_sec)
+      return false;
+    int end_day = ComputeRuleDay(year, rule->end_month, rule->end_day);
+    if (rule->end_month == month && end_day < day) return false;
+    if (rule->end_month == month && end_day == day &&
+        2 * 3600 <= time_in_day_sec)
+      return false;
+    return true;
+  }
+
+
+  int ComputeRuleDay(int year, int month, int day) {
+    if (day != 0) return day;
+    int days = DaysFromYearMonth(year, month);
+    // Find the first Sunday of the month.
+    while (Weekday(days + day) != 6) day++;
+    return day + 1;
+  }
+
+  int local_offset_;
+  Rule* rules_;
+  int rules_count_;
+};
+
+static int64_t TimeFromYearMonthDay(DateCache* date_cache,
+                                    int year,
+                                    int month,
+                                    int day) {
+  int64_t result = date_cache->DaysFromYearMonth(year, month);
+  return (result + day - 1) * DateCache::kMsPerDay;
+}
+
+static void CheckDST(int64_t time) {
+  Isolate* isolate = Isolate::Current();
+  DateCache* date_cache = isolate->date_cache();
+  int64_t actual = date_cache->ToLocal(time);
+  int64_t expected = time + date_cache->GetLocalOffsetFromOS() +
+                     date_cache->GetDaylightSavingsOffsetFromOS(time / 1000);
+  CHECK_EQ(actual, expected);
+}
+
+
+TEST(DaylightSavingsTime) {
+  LocalContext context;
+  v8::HandleScope scope;
+  Isolate* isolate = Isolate::Current();
+  DateCacheMock::Rule rules[] = {
+    {0, 2, 0, 10, 0, 3600},  // DST from March to November in any year.
+    {2010, 2, 0, 7, 20, 3600},  // DST from March to August 20 in 2010.
+    {2010, 7, 20, 8, 10, 0},  // No DST from August 20 to September 10 in 2010.
+    {2010, 8, 10, 10, 0, 3600},  // DST from September 10 to November in 2010.
+  };
+
+  int local_offset_ms = -36000000;  // -10 hours.
+
+  DateCacheMock* date_cache =
+    new DateCacheMock(local_offset_ms, rules, ARRAY_SIZE(rules));
+
+  isolate->set_date_cache(date_cache);
+
+  int64_t start_of_2010 = TimeFromYearMonthDay(date_cache, 2010, 0, 1);
+  int64_t start_of_2011 = TimeFromYearMonthDay(date_cache, 2011, 0, 1);
+  int64_t august_20 = TimeFromYearMonthDay(date_cache, 2010, 7, 20);
+  int64_t september_10 = TimeFromYearMonthDay(date_cache, 2010, 8, 10);
+  CheckDST((august_20 + september_10) / 2);
+  CheckDST(september_10);
+  CheckDST(september_10 + 2 * 3600);
+  CheckDST(september_10 + 2 * 3600 - 1000);
+  CheckDST(august_20 + 2 * 3600);
+  CheckDST(august_20 + 2 * 3600 - 1000);
+  CheckDST(august_20);
+  // Check each day of 2010.
+  for (int64_t time = start_of_2011 + 2 * 3600;
+       time >= start_of_2010;
+       time -= DateCache::kMsPerDay) {
+    CheckDST(time);
+    CheckDST(time - 1000);
+    CheckDST(time + 1000);
+  }
+  // Check one day from 2010 to 2100.
+  for (int year = 2100; year >= 2010; year--) {
+    CheckDST(TimeFromYearMonthDay(date_cache, year, 5, 5));
+  }
+  CheckDST((august_20 + september_10) / 2);
+  CheckDST(september_10);
+  CheckDST(september_10 + 2 * 3600);
+  CheckDST(september_10 + 2 * 3600 - 1000);
+  CheckDST(august_20 + 2 * 3600);
+  CheckDST(august_20 + 2 * 3600 - 1000);
+  CheckDST(august_20);
+}
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 45da6dc..ffa8458 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -409,11 +409,8 @@
 
 
 static Handle<Code> ComputeCallDebugBreak(int argc) {
-  CALL_HEAP_FUNCTION(
-      v8::internal::Isolate::Current(),
-      v8::internal::Isolate::Current()->stub_cache()->ComputeCallDebugBreak(
-          argc, Code::CALL_IC),
-      Code);
+  return Isolate::Current()->stub_cache()->ComputeCallDebugBreak(argc,
+                                                                 Code::CALL_IC);
 }
 
 
@@ -425,8 +422,8 @@
   CHECK_EQ(NULL, Isolate::Current()->debug()->debug_info_list_);
 
   // Collect garbage to ensure weak handles are cleared.
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
 
   // Iterate the head and check that there are no debugger related objects left.
   HeapIterator iterator;
@@ -859,7 +856,7 @@
 
   if (event == v8::Break) {
     break_point_hit_count++;
-    v8::Handle<v8::Function> fun = v8::Handle<v8::Function>::Cast(data);
+    CHECK(data->IsFunction());
     ClearBreakPoint(debug_event_remove_break_point);
   }
 }
@@ -944,7 +941,7 @@
       HEAP->CollectGarbage(v8::internal::NEW_SPACE);
     } else {
       // Mark sweep compact.
-      HEAP->CollectAllGarbage(true);
+      HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     }
   }
 }
@@ -1417,8 +1414,7 @@
 // Call the function three times with different garbage collections in between
 // and make sure that the break point survives.
 static void CallAndGC(v8::Local<v8::Object> recv,
-                      v8::Local<v8::Function> f,
-                      bool force_compaction) {
+                      v8::Local<v8::Function> f) {
   break_point_hit_count = 0;
 
   for (int i = 0; i < 3; i++) {
@@ -1432,14 +1428,15 @@
     CHECK_EQ(2 + i * 3, break_point_hit_count);
 
     // Mark sweep (and perhaps compact) and call function.
-    HEAP->CollectAllGarbage(force_compaction);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     f->Call(recv, 0, NULL);
     CHECK_EQ(3 + i * 3, break_point_hit_count);
   }
 }
 
 
-static void TestBreakPointSurviveGC(bool force_compaction) {
+// Test that a break point can be set at a return store location.
+TEST(BreakPointSurviveGC) {
   break_point_hit_count = 0;
   v8::HandleScope scope;
   DebugLocalContext env;
@@ -1450,50 +1447,45 @@
 
   // Test IC store break point with garbage collection.
   {
-    v8::Local<v8::Function> bar =
-        CompileFunction(&env, "function foo(){}", "foo");
+    CompileFunction(&env, "function foo(){}", "foo");
     foo = CompileFunction(&env, "function foo(){bar=0;}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test IC load break point with garbage collection.
   {
-    v8::Local<v8::Function> bar =
-        CompileFunction(&env, "function foo(){}", "foo");
+    CompileFunction(&env, "function foo(){}", "foo");
     foo = CompileFunction(&env, "bar=1;function foo(){var x=bar;}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test IC call break point with garbage collection.
   {
-    v8::Local<v8::Function> bar =
-        CompileFunction(&env, "function foo(){}", "foo");
+    CompileFunction(&env, "function foo(){}", "foo");
     foo = CompileFunction(&env,
                           "function bar(){};function foo(){bar();}",
                           "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test return break point with garbage collection.
   {
-    v8::Local<v8::Function> bar =
-        CompileFunction(&env, "function foo(){}", "foo");
+    CompileFunction(&env, "function foo(){}", "foo");
     foo = CompileFunction(&env, "function foo(){}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test non IC break point with garbage collection.
   {
-    v8::Local<v8::Function> bar =
-        CompileFunction(&env, "function foo(){}", "foo");
+    CompileFunction(&env, "function foo(){}", "foo");
     foo = CompileFunction(&env, "function foo(){var bar=0;}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
 
   v8::Debug::SetDebugEventListener(NULL);
@@ -1501,13 +1493,6 @@
 }
 
 
-// Test that a break point can be set at a return store location.
-TEST(BreakPointSurviveGC) {
-  TestBreakPointSurviveGC(false);
-  TestBreakPointSurviveGC(true);
-}
-
-
 // Test that break points can be set using the global Debug object.
 TEST(BreakPointThroughJavaScript) {
   break_point_hit_count = 0;
@@ -2259,7 +2244,7 @@
   }
   f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   SetScriptBreakPointByNameFromJS("test.html", 3, -1);
 
@@ -2753,14 +2738,14 @@
   v8::Handle<v8::Value> args[kArgc] = { a };
   foo->Call(env->Global(), kArgc, args);
 
-  // Setup break point and step through the function.
+  // Set up break point and step through the function.
   SetBreakPoint(foo, 3);
   step_action = StepNext;
   break_point_hit_count = 0;
   foo->Call(env->Global(), kArgc, args);
 
   // With stepping all break locations are hit.
-  CHECK_EQ(33, break_point_hit_count);
+  CHECK_EQ(34, break_point_hit_count);
 
   v8::Debug::SetDebugEventListener(NULL);
   CheckDebuggerUnloaded();
@@ -2800,14 +2785,14 @@
   v8::Handle<v8::Value> args[kArgc] = { a };
   foo->Call(env->Global(), kArgc, args);
 
-  // Setup break point and step through the function.
+  // Set up break point and step through the function.
   SetBreakPoint(foo, 3);
   step_action = StepNext;
   break_point_hit_count = 0;
   foo->Call(env->Global(), kArgc, args);
 
   // With stepping all break locations are hit.
-  CHECK_EQ(32, break_point_hit_count);
+  CHECK_EQ(33, break_point_hit_count);
 
   v8::Debug::SetDebugEventListener(NULL);
   CheckDebuggerUnloaded();
@@ -2844,14 +2829,14 @@
   // Call function without any break points to ensure inlining is in place.
   foo->Call(env->Global(), 0, NULL);
 
-  // Setup break point and step through the function.
+  // Set up break point and step through the function.
   SetBreakPoint(foo, 4);
   step_action = StepNext;
   break_point_hit_count = 0;
   foo->Call(env->Global(), 0, NULL);
 
   // With stepping all break locations are hit.
-  CHECK_EQ(53, break_point_hit_count);
+  CHECK_EQ(54, break_point_hit_count);
 
   v8::Debug::SetDebugEventListener(NULL);
   CheckDebuggerUnloaded();
@@ -2879,7 +2864,7 @@
   // Call function without any break points to ensure inlining is in place.
   foo->Call(env->Global(), 0, NULL);
 
-  // Setup break point and step through the function.
+  // Set up break point and step through the function.
   SetBreakPoint(foo, 3);
   step_action = StepNext;
   break_point_hit_count = 0;
@@ -2895,7 +2880,7 @@
 
 // Test of the stepping mechanism for named load in a loop.
 TEST(DebugStepNamedStoreLoop) {
-  DoDebugStepNamedStoreLoop(22);
+  DoDebugStepNamedStoreLoop(23);
 }
 
 
@@ -3267,7 +3252,7 @@
   v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
   result = foo->Call(env->Global(), argc, argv_10);
   CHECK_EQ(5, result->Int32Value());
-  CHECK_EQ(50, break_point_hit_count);
+  CHECK_EQ(51, break_point_hit_count);
 
   // Looping 100 times.
   step_action = StepIn;
@@ -3275,7 +3260,7 @@
   v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
   result = foo->Call(env->Global(), argc, argv_100);
   CHECK_EQ(50, result->Int32Value());
-  CHECK_EQ(455, break_point_hit_count);
+  CHECK_EQ(456, break_point_hit_count);
 
   // Get rid of the debug event listener.
   v8::Debug::SetDebugEventListener(NULL);
@@ -3319,7 +3304,7 @@
   v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
   result = foo->Call(env->Global(), argc, argv_10);
   CHECK_EQ(9, result->Int32Value());
-  CHECK_EQ(53, break_point_hit_count);
+  CHECK_EQ(54, break_point_hit_count);
 
   // Looping 100 times.
   step_action = StepIn;
@@ -3327,7 +3312,7 @@
   v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
   result = foo->Call(env->Global(), argc, argv_100);
   CHECK_EQ(99, result->Int32Value());
-  CHECK_EQ(503, break_point_hit_count);
+  CHECK_EQ(504, break_point_hit_count);
 
   // Get rid of the debug event listener.
   v8::Debug::SetDebugEventListener(NULL);
@@ -3761,8 +3746,7 @@
   v8::internal::Isolate::Current()->TraceException(false);
 
   // Create functions for testing break on exception.
-  v8::Local<v8::Function> throws =
-      CompileFunction(&env, "function throws(){throw 1;}", "throws");
+  CompileFunction(&env, "function throws(){throw 1;}", "throws");
   v8::Local<v8::Function> caught =
       CompileFunction(&env,
                       "function caught(){try {throws();} catch(e) {};}",
@@ -4238,9 +4222,9 @@
 
   // Get mirrors for the three objects with interceptor.
   CompileRun(
-      "named_mirror = debug.MakeMirror(intercepted_named);"
-      "indexed_mirror = debug.MakeMirror(intercepted_indexed);"
-      "both_mirror = debug.MakeMirror(intercepted_both)");
+      "var named_mirror = debug.MakeMirror(intercepted_named);"
+      "var indexed_mirror = debug.MakeMirror(intercepted_indexed);"
+      "var both_mirror = debug.MakeMirror(intercepted_both)");
   CHECK(CompileRun(
        "named_mirror instanceof debug.ObjectMirror")->BooleanValue());
   CHECK(CompileRun(
@@ -4281,7 +4265,7 @@
   CHECK_EQ(5, CompileRun(source)->Int32Value());
 
   // Get the interceptor properties for the object with only named interceptor.
-  CompileRun("named_values = named_mirror.properties()");
+  CompileRun("var named_values = named_mirror.properties()");
 
   // Check that the properties are interceptor properties.
   for (int i = 0; i < 3; i++) {
@@ -4300,7 +4284,7 @@
 
   // Get the interceptor properties for the object with only indexed
   // interceptor.
-  CompileRun("indexed_values = indexed_mirror.properties()");
+  CompileRun("var indexed_values = indexed_mirror.properties()");
 
   // Check that the properties are interceptor properties.
   for (int i = 0; i < 2; i++) {
@@ -4312,7 +4296,7 @@
 
   // Get the interceptor properties for the object with both types of
   // interceptors.
-  CompileRun("both_values = both_mirror.properties()");
+  CompileRun("var both_values = both_mirror.properties()");
 
   // Check that the properties are interceptor properties.
   for (int i = 0; i < 5; i++) {
@@ -4368,10 +4352,10 @@
 
   // Get mirrors for the four objects.
   CompileRun(
-      "o0_mirror = debug.MakeMirror(o0);"
-      "o1_mirror = debug.MakeMirror(o1);"
-      "o2_mirror = debug.MakeMirror(o2);"
-      "o3_mirror = debug.MakeMirror(o3)");
+      "var o0_mirror = debug.MakeMirror(o0);"
+      "var o1_mirror = debug.MakeMirror(o1);"
+      "var o2_mirror = debug.MakeMirror(o2);"
+      "var o3_mirror = debug.MakeMirror(o3)");
   CHECK(CompileRun("o0_mirror instanceof debug.ObjectMirror")->BooleanValue());
   CHECK(CompileRun("o1_mirror instanceof debug.ObjectMirror")->BooleanValue());
   CHECK(CompileRun("o2_mirror instanceof debug.ObjectMirror")->BooleanValue());
@@ -4457,11 +4441,11 @@
   CHECK_EQ(10, CompileRun("instance.x")->Int32Value());
 
   // Get mirror for the object with property getter.
-  CompileRun("instance_mirror = debug.MakeMirror(instance);");
+  CompileRun("var instance_mirror = debug.MakeMirror(instance);");
   CHECK(CompileRun(
       "instance_mirror instanceof debug.ObjectMirror")->BooleanValue());
 
-  CompileRun("named_names = instance_mirror.propertyNames();");
+  CompileRun("var named_names = instance_mirror.propertyNames();");
   CHECK_EQ(1, CompileRun("named_names.length")->Int32Value());
   CHECK(CompileRun("named_names[0] == 'x'")->BooleanValue());
   CHECK(CompileRun(
@@ -4493,7 +4477,7 @@
   env->Global()->Set(v8::String::New("instance"), named->NewInstance());
 
   // Get mirror for the object with property getter.
-  CompileRun("instance_mirror = debug.MakeMirror(instance);");
+  CompileRun("var instance_mirror = debug.MakeMirror(instance);");
   CHECK(CompileRun(
       "instance_mirror instanceof debug.ObjectMirror")->BooleanValue());
   CompileRun("named_names = instance_mirror.propertyNames();");
@@ -5557,10 +5541,8 @@
     v8::HandleScope scope;
 
     // Get the test functions again.
-    v8::Local<v8::Function> foo =
-      v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
-    v8::Local<v8::Function> bar =
-      v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+    v8::Local<v8::Function> foo(v8::Local<v8::Function>::Cast(
+        env->Global()->Get(v8::String::New("foo"))));
 
     foo->Call(env->Global(), 0, NULL);
     CHECK_EQ(0, break_point_hit_count);
@@ -5719,7 +5701,7 @@
   v8::HandleScope scope;
   DebugLocalContext env;
 
-  // Setup message and host dispatch handlers.
+  // Set up message and host dispatch handlers.
   v8::Debug::SetMessageHandler2(HostDispatchMessageHandler);
   v8::Debug::SetHostDispatchHandler(HostDispatchDispatchHandler, 10 /* ms */);
 
@@ -5807,7 +5789,7 @@
   v8::HandleScope scope;
   DebugLocalContext env;
 
-  // Setup debug message dispatch handler.
+  // Set up debug message dispatch handler.
   v8::Debug::SetDebugMessageDispatchHandler(DebugMessageHandler);
 
   CompileRun("var y = 1 + 2;\n");
@@ -5861,7 +5843,7 @@
   bool ok;
 
   // Initialize the socket library.
-  i::Socket::Setup();
+  i::Socket::SetUp();
 
   // Test starting and stopping the agent without any client connection.
   debugger->StartAgent("test", kPort1);
@@ -5959,7 +5941,7 @@
   OS::SNPrintF(i::Vector<char>(port_str, kPortBufferLen), "%d", kPort);
 
   // Initialize the socket library.
-  i::Socket::Setup();
+  i::Socket::SetUp();
 
   // Create a socket server to receive a debugger agent message.
   DebuggerAgentProtocolServerThread* server =
@@ -6037,7 +6019,9 @@
 
   EmptyExternalStringResource source_ext_str;
   v8::Local<v8::String> source = v8::String::NewExternal(&source_ext_str);
-  v8::Handle<v8::Script> evil_script = v8::Script::Compile(source);
+  v8::Handle<v8::Script> evil_script(v8::Script::Compile(source));
+  // "use" evil_script to make the compiler happy.
+  (void) evil_script;
   Handle<i::ExternalTwoByteString> i_source(
       i::ExternalTwoByteString::cast(*v8::Utils::OpenHandle(*source)));
   // This situation can happen if source was an external string disposed
@@ -6472,7 +6456,7 @@
 
   // Do garbage collection to ensure that only the script in this test will be
   // collected afterwards.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   script_collected_count = 0;
   v8::Debug::SetDebugEventListener(DebugEventScriptCollectedEvent,
@@ -6484,7 +6468,7 @@
 
   // Do garbage collection to collect the script above which is no longer
   // referenced.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   CHECK_EQ(2, script_collected_count);
 
@@ -6520,7 +6504,7 @@
 
     // Do garbage collection to ensure that only the script in this test will be
     // collected afterwards.
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
     v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
     {
@@ -6531,7 +6515,7 @@
 
   // Do garbage collection to collect the script above which is no longer
   // referenced.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   CHECK_EQ(2, script_collected_message_count);
 
@@ -6685,7 +6669,7 @@
     break_point_hit_count++;
 
     v8::HandleScope scope;
-    v8::Handle<v8::String> json = message.GetJSON();
+    message.GetJSON();
 
     SendContinueCommand();
   } else if (message.IsEvent() && message.GetEvent() == v8::AfterCompile) {
@@ -6696,7 +6680,7 @@
     isolate->stack_guard()->DebugBreak();
 
     // Force serialization to trigger some internal JS execution.
-    v8::Handle<v8::String> json = message.GetJSON();
+    message.GetJSON();
 
     // Restore previous state.
     if (is_debug_break) {
@@ -6879,7 +6863,7 @@
   foo->Call(env->Global(), 0, NULL);
 
   // When keeping the debug break several break will happen.
-  CHECK_EQ(3, break_point_hit_count);
+  CHECK_GT(break_point_hit_count, 1);
 
   v8::Debug::SetDebugEventListener(NULL);
   CheckDebuggerUnloaded();
@@ -7224,10 +7208,10 @@
   // Receive 100 breaks for each test and then terminate JavaScript execution.
   static const int kBreaksPerTest = 100;
 
-  for (int i = 0; i < 1 && loop_bodies[i] != NULL; i++) {
+  for (int i = 0; loop_bodies[i] != NULL; i++) {
     // Perform a lazy deoptimization after various numbers of breaks
     // have been hit.
-    for (int j = 0; j < 10; j++) {
+    for (int j = 0; j < 11; j++) {
       break_point_hit_count_deoptimize = j;
       if (j == 10) {
         break_point_hit_count_deoptimize = kBreaksPerTest;
@@ -7302,4 +7286,65 @@
 }
 
 
+v8::Local<v8::Script> inline_script;
+
+static void DebugBreakInlineListener(v8::DebugEvent event,
+                                     v8::Handle<v8::Object> exec_state,
+                                     v8::Handle<v8::Object> event_data,
+                                     v8::Handle<v8::Value> data) {
+  if (event != v8::Break) return;
+
+  int expected_frame_count = 4;
+  int expected_line_number[] = {1, 4, 7, 12};
+
+  i::Handle<i::Object> compiled_script = v8::Utils::OpenHandle(*inline_script);
+  i::Handle<i::Script> source_script = i::Handle<i::Script>(i::Script::cast(
+      i::JSFunction::cast(*compiled_script)->shared()->script()));
+
+  int break_id = v8::internal::Isolate::Current()->debug()->break_id();
+  char script[128];
+  i::Vector<char> script_vector(script, sizeof(script));
+  OS::SNPrintF(script_vector, "%%GetFrameCount(%d)", break_id);
+  v8::Local<v8::Value> result = CompileRun(script);
+
+  int frame_count = result->Int32Value();
+  CHECK_EQ(expected_frame_count, frame_count);
+
+  for (int i = 0; i < frame_count; i++) {
+    // The 5. element in the returned array of GetFrameDetails contains the
+    // source position of that frame.
+    OS::SNPrintF(script_vector, "%%GetFrameDetails(%d, %d)[5]", break_id, i);
+    v8::Local<v8::Value> result = CompileRun(script);
+    CHECK_EQ(expected_line_number[i],
+             i::GetScriptLineNumber(source_script, result->Int32Value()));
+  }
+  v8::Debug::SetDebugEventListener(NULL);
+  v8::V8::TerminateExecution();
+}
+
+
+TEST(DebugBreakInline) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope scope;
+  DebugLocalContext env;
+  const char* source =
+      "function debug(b) {             \n"
+      "  if (b) debugger;              \n"
+      "}                               \n"
+      "function f(b) {                 \n"
+      "  debug(b)                      \n"
+      "};                              \n"
+      "function g(b) {                 \n"
+      "  f(b);                         \n"
+      "};                              \n"
+      "g(false);                       \n"
+      "g(false);                       \n"
+      "%OptimizeFunctionOnNextCall(g); \n"
+      "g(true);";
+  v8::Debug::SetDebugEventListener(DebugBreakInlineListener);
+  inline_script = v8::Script::Compile(v8::String::New(source));
+  inline_script->Run();
+}
+
+
 #endif  // ENABLE_DEBUGGER_SUPPORT
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index 6198391..aa733c7 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -232,7 +232,7 @@
     context.Check("const x; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initialization
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());
   }
 
@@ -240,7 +240,7 @@
     context.Check("const x = 0; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initialization
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());  // SB 0 - BUG 1213579
   }
 }
@@ -285,18 +285,18 @@
 
   { PresentPropertyContext context;
     context.Check("const x; x",
-                  0,
-                  0,
+                  1,  // access
+                  1,  // initialization
                   1,  // (re-)declaration
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  EXPECT_RESULT, Undefined());
   }
 
   { PresentPropertyContext context;
     context.Check("const x = 0; x",
-                  0,
-                  0,
+                  1,  // access
+                  1,  // initialization
                   1,  // (re-)declaration
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  EXPECT_RESULT, Number::New(0));
   }
 }
 
@@ -341,7 +341,7 @@
     context.Check("const x; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initializetion
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());
   }
 
@@ -349,7 +349,7 @@
     context.Check("const x = 0; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initialization
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());  // SB 0 - BUG 1213579
   }
 
@@ -429,18 +429,20 @@
 
   { AppearingPropertyContext context;
     context.Check("const x; x",
-                  0,
-                  1,  // declaration
+                  1,  // access
                   2,  // declaration + initialization
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  1,  // declaration
+                  EXPECT_RESULT, Undefined());
   }
 
   { AppearingPropertyContext context;
     context.Check("const x = 0; x",
-                  0,
-                  1,  // declaration
+                  1,  // access
                   2,  // declaration + initialization
-                  EXPECT_EXCEPTION);  //  x has already been declared!
+                  1,  // declaration
+                  EXPECT_RESULT, Undefined());
+                  // Result is undefined because declaration succeeded but
+                  // initialization to 0 failed (due to context behavior).
   }
 }
 
@@ -496,9 +498,9 @@
   { ReappearingPropertyContext context;
     context.Check("const x; var x = 0",
                   0,
-                  2,  // var declaration + const initialization
-                  4,  // 2 x declaration + 2 x initialization
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  3,  // const declaration+initialization, var initialization
+                  3,  // 2 x declaration + var initialization
+                  EXPECT_RESULT, Undefined());
   }
 }
 
diff --git a/test/cctest/test-deoptimization.cc b/test/cctest/test-deoptimization.cc
index 056c981..c52c578 100644
--- a/test/cctest/test-deoptimization.cc
+++ b/test/cctest/test-deoptimization.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -97,6 +97,13 @@
 };
 
 
+// Abort any ongoing incremental marking to make sure that all weak global
+// handle callbacks are processed.
+static void NonIncrementalGC() {
+  HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+}
+
+
 static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
                                         const char* property_name) {
   v8::Local<v8::Function> fun =
@@ -107,9 +114,7 @@
 
 TEST(DeoptimizeSimple) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   // Test lazy deoptimization of a simple function.
   {
@@ -119,9 +124,9 @@
         "function h() { %DeoptimizeFunction(f); }"
         "function g() { count++; h(); }"
         "function f() { g(); };"
-        "f();"
-        "gc(); gc()");
+        "f();");
   }
+  NonIncrementalGC();
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -135,9 +140,9 @@
         "var count = 0;"
         "function g() { count++; %DeoptimizeFunction(f); f(false); }"
         "function f(x) { if (x) { g(); } else { return } };"
-        "f(true);"
-        "gc(); gc()");
+        "f(true);");
   }
+  NonIncrementalGC();
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -147,9 +152,7 @@
 
 TEST(DeoptimizeSimpleWithArguments) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   // Test lazy deoptimization of a simple function with some arguments.
   {
@@ -159,9 +162,9 @@
         "function h(x) { %DeoptimizeFunction(f); }"
         "function g(x, y) { count++; h(x); }"
         "function f(x, y, z) { g(1,x); y+z; };"
-        "f(1, \"2\", false);"
-        "gc(); gc()");
+        "f(1, \"2\", false);");
   }
+  NonIncrementalGC();
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -176,9 +179,9 @@
         "var count = 0;"
         "function g(x, y) { count++; %DeoptimizeFunction(f); f(false, 1, y); }"
         "function f(x, y, z) { if (x) { g(x, y); } else { return y + z; } };"
-        "f(true, 1, \"2\");"
-        "gc(); gc()");
+        "f(true, 1, \"2\");");
   }
+  NonIncrementalGC();
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -188,9 +191,7 @@
 
 TEST(DeoptimizeSimpleNested) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   // Test lazy deoptimization of a simple function. Have a nested function call
   // do the deoptimization.
@@ -202,8 +203,8 @@
         "function h(x, y, z) { return x + y + z; }"
         "function g(z) { count++; %DeoptimizeFunction(f); return z;}"
         "function f(x,y,z) { return h(x, y, g(z)); };"
-        "result = f(1, 2, 3);"
-        "gc(); gc()");
+        "result = f(1, 2, 3);");
+    NonIncrementalGC();
 
     CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
     CHECK_EQ(6, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -215,9 +216,7 @@
 
 TEST(DeoptimizeRecursive) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   {
     // Test lazy deoptimization of a simple function called recursively. Call
@@ -228,8 +227,9 @@
         "var calls = 0;"
         "function g() { count++; %DeoptimizeFunction(f); }"
         "function f(x) { calls++; if (x > 0) { f(x - 1); } else { g(); } };"
-        "f(10); gc(); gc()");
+        "f(10);");
   }
+  NonIncrementalGC();
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value());
@@ -237,15 +237,13 @@
 
   v8::Local<v8::Function> fun =
       v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
-  Handle<v8::internal::JSFunction> f = v8::Utils::OpenHandle(*fun);
+  CHECK(!fun.IsEmpty());
 }
 
 
 TEST(DeoptimizeMultiple) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   {
     AlwaysOptimizeAllowNativesSyntaxNoInlining options;
@@ -261,9 +259,9 @@
         "function f3(x, y, z) { f4(); return x + y + z; };"
         "function f2(x, y) { return x + f3(y + 1, y + 1, y + 1) + y; };"
         "function f1(x) { return f2(x + 1, x + 1) + x; };"
-        "result = f1(1);"
-        "gc(); gc()");
+        "result = f1(1);");
   }
+  NonIncrementalGC();
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -273,9 +271,7 @@
 
 TEST(DeoptimizeConstructor) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   {
     AlwaysOptimizeAllowNativesSyntaxNoInlining options;
@@ -284,9 +280,9 @@
         "function g() { count++;"
         "               %DeoptimizeFunction(f); }"
         "function f() {  g(); };"
-        "result = new f() instanceof f;"
-        "gc(); gc()");
+        "result = new f() instanceof f;");
   }
+  NonIncrementalGC();
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK(env->Global()->Get(v8_str("result"))->IsTrue());
@@ -301,9 +297,9 @@
         "               %DeoptimizeFunction(f); }"
         "function f(x, y) { this.x = x; g(); this.y = y; };"
         "result = new f(1, 2);"
-        "result = result.x + result.y;"
-        "gc(); gc()");
+        "result = result.x + result.y;");
   }
+  NonIncrementalGC();
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK_EQ(3, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -313,9 +309,7 @@
 
 TEST(DeoptimizeConstructorMultiple) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   {
     AlwaysOptimizeAllowNativesSyntaxNoInlining options;
@@ -332,9 +326,9 @@
         "function f2(x, y) {"
         "    this.result = x + new f3(y + 1, y + 1, y + 1).result + y; };"
         "function f1(x) { this.result = new f2(x + 1, x + 1).result + x; };"
-        "result = new f1(1).result;"
-        "gc(); gc()");
+        "result = new f1(1).result;");
   }
+  NonIncrementalGC();
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -344,9 +338,7 @@
 
 TEST(DeoptimizeBinaryOperationADDString) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   const char* f_source = "function f(x, y) { return x + y; };";
 
@@ -376,9 +368,9 @@
 
     // Call f and force deoptimization while processing the binary operation.
     CompileRun("deopt = true;"
-               "var result = f('a+', new X());"
-               "gc(); gc();");
+               "var result = f('a+', new X());");
   }
+  NonIncrementalGC();
 
   CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
@@ -428,18 +420,15 @@
 
   // Call f and force deoptimization while processing the binary operation.
   CompileRun("deopt = true;"
-             "var result = f(7, new X());"
-             "gc(); gc();");
-
+             "var result = f(7, new X());");
+  NonIncrementalGC();
   CHECK(!GetJSFunction((*env)->Global(), "f")->IsOptimized());
 }
 
 
 TEST(DeoptimizeBinaryOperationADD) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   TestDeoptimizeBinaryOpHelper(&env, "+");
 
@@ -451,9 +440,7 @@
 
 TEST(DeoptimizeBinaryOperationSUB) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   TestDeoptimizeBinaryOpHelper(&env, "-");
 
@@ -465,9 +452,7 @@
 
 TEST(DeoptimizeBinaryOperationMUL) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   TestDeoptimizeBinaryOpHelper(&env, "*");
 
@@ -479,9 +464,7 @@
 
 TEST(DeoptimizeBinaryOperationDIV) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   TestDeoptimizeBinaryOpHelper(&env, "/");
 
@@ -493,9 +476,7 @@
 
 TEST(DeoptimizeBinaryOperationMOD) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   TestDeoptimizeBinaryOpHelper(&env, "%");
 
@@ -507,9 +488,7 @@
 
 TEST(DeoptimizeCompare) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   const char* f_source = "function f(x, y) { return x < y; };";
 
@@ -539,9 +518,9 @@
 
     // Call f and force deoptimization while processing the comparison.
     CompileRun("deopt = true;"
-               "var result = f('a', new X());"
-               "gc(); gc();");
+               "var result = f('a', new X());");
   }
+  NonIncrementalGC();
 
   CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
@@ -552,9 +531,7 @@
 
 TEST(DeoptimizeLoadICStoreIC) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   // Functions to generate load/store/keyed load/keyed store IC calls.
   const char* f1_source = "function f1(x) { return x.y; };";
@@ -618,9 +595,9 @@
                "var result = f1(new X());"
                "g1(new X());"
                "f2(new X(), 'z');"
-               "g2(new X(), 'z');"
-               "gc(); gc();");
+               "g2(new X(), 'z');");
   }
+  NonIncrementalGC();
 
   CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
   CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
@@ -634,9 +611,7 @@
 
 TEST(DeoptimizeLoadICStoreICNested) {
   v8::HandleScope scope;
-  const char* extension_list[] = { "v8/gc" };
-  v8::ExtensionConfiguration extensions(1, extension_list);
-  LocalContext env(&extensions);
+  LocalContext env;
 
   // Functions to generate load/store/keyed load/keyed store IC calls.
   const char* f1_source = "function f1(x) { return x.y; };";
@@ -701,9 +676,9 @@
 
     // Call functions and force deoptimization while processing the ics.
     CompileRun("deopt = true;"
-               "var result = f1(new X());"
-               "gc(); gc();");
+               "var result = f1(new X());");
   }
+  NonIncrementalGC();
 
   CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
   CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
diff --git a/test/cctest/test-dictionary.cc b/test/cctest/test-dictionary.cc
index 15a854b..793e228 100644
--- a/test/cctest/test-dictionary.cc
+++ b/test/cctest/test-dictionary.cc
@@ -38,6 +38,7 @@
 
 using namespace v8::internal;
 
+
 TEST(ObjectHashTable) {
   v8::HandleScope scope;
   LocalContext context;
@@ -66,7 +67,8 @@
   CHECK_EQ(table->NumberOfDeletedElements(), 1);
   CHECK_EQ(table->Lookup(*a), HEAP->undefined_value());
 
-  // Keys should map back to their respective values.
+  // Keys should map back to their respective values and also should get
+  // an identity hash code generated.
   for (int i = 0; i < 100; i++) {
     Handle<JSObject> key = FACTORY->NewJSArray(7);
     Handle<JSObject> value = FACTORY->NewJSArray(11);
@@ -74,12 +76,67 @@
     CHECK_EQ(table->NumberOfElements(), i + 1);
     CHECK_NE(table->FindEntry(*key), ObjectHashTable::kNotFound);
     CHECK_EQ(table->Lookup(*key), *value);
+    CHECK(key->GetIdentityHash(OMIT_CREATION)->ToObjectChecked()->IsSmi());
   }
 
-  // Keys never added to the map should not be found.
-  for (int i = 0; i < 1000; i++) {
-    Handle<JSObject> o = FACTORY->NewJSArray(100);
-    CHECK_EQ(table->FindEntry(*o), ObjectHashTable::kNotFound);
-    CHECK_EQ(table->Lookup(*o), HEAP->undefined_value());
+  // Keys never added to the map which already have an identity hash
+  // code should not be found.
+  for (int i = 0; i < 100; i++) {
+    Handle<JSObject> key = FACTORY->NewJSArray(7);
+    CHECK(key->GetIdentityHash(ALLOW_CREATION)->ToObjectChecked()->IsSmi());
+    CHECK_EQ(table->FindEntry(*key), ObjectHashTable::kNotFound);
+    CHECK_EQ(table->Lookup(*key), HEAP->undefined_value());
+    CHECK(key->GetIdentityHash(OMIT_CREATION)->ToObjectChecked()->IsSmi());
+  }
+
+  // Keys that don't have an identity hash should not be found and also
+  // should not get an identity hash code generated.
+  for (int i = 0; i < 100; i++) {
+    Handle<JSObject> key = FACTORY->NewJSArray(7);
+    CHECK_EQ(table->Lookup(*key), HEAP->undefined_value());
+    CHECK_EQ(key->GetIdentityHash(OMIT_CREATION), HEAP->undefined_value());
   }
 }
+
+
+#ifdef DEBUG
+TEST(ObjectHashSetCausesGC) {
+  v8::HandleScope scope;
+  LocalContext context;
+  Handle<ObjectHashSet> table = FACTORY->NewObjectHashSet(1);
+  Handle<JSObject> key = FACTORY->NewJSArray(0);
+
+  // Simulate a full heap so that generating an identity hash code
+  // in subsequent calls will request GC.
+  FLAG_gc_interval = 0;
+
+  // Calling Contains() should not cause GC ever.
+  CHECK(!table->Contains(*key));
+
+  // Calling Remove() should not cause GC ever.
+  CHECK(!table->Remove(*key)->IsFailure());
+
+  // Calling Add() should request GC by returning a failure.
+  CHECK(table->Add(*key)->IsRetryAfterGC());
+}
+#endif
+
+
+#ifdef DEBUG
+TEST(ObjectHashTableCausesGC) {
+  v8::HandleScope scope;
+  LocalContext context;
+  Handle<ObjectHashTable> table = FACTORY->NewObjectHashTable(1);
+  Handle<JSObject> key = FACTORY->NewJSArray(0);
+
+  // Simulate a full heap so that generating an identity hash code
+  // in subsequent calls will request GC.
+  FLAG_gc_interval = 0;
+
+  // Calling Lookup() should not cause GC ever.
+  CHECK(table->Lookup(*key)->IsUndefined());
+
+  // Calling Put() should request GC by returning a failure.
+  CHECK(table->Put(*key, *key)->IsRetryAfterGC());
+}
+#endif
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 032e6bc..0e9432d 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -69,10 +69,10 @@
 }
 
 
-// Setup V8 to a state where we can at least run the assembler and
+// Set up V8 to a state where we can at least run the assembler and
 // disassembler. Declare the variables and allocate the data structures used
 // in the rest of the macros.
-#define SETUP()                                           \
+#define SET_UP()                                           \
   InitializeVM();                                         \
   v8::HandleScope scope;                                  \
   byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
@@ -102,7 +102,7 @@
 
 
 TEST(Type0) {
-  SETUP();
+  SET_UP();
 
   COMPARE(and_(r0, r1, Operand(r2)),
           "e0010002       and r0, r1, r2");
@@ -329,7 +329,7 @@
 
 
 TEST(Type1) {
-  SETUP();
+  SET_UP();
 
   COMPARE(and_(r0, r1, Operand(0x00000000)),
           "e2010000       and r0, r1, #0");
@@ -358,7 +358,7 @@
 
 
 TEST(Type3) {
-  SETUP();
+  SET_UP();
 
   if (CpuFeatures::IsSupported(ARMv7)) {
     COMPARE(ubfx(r0, r1, 5, 10),
@@ -413,7 +413,7 @@
 
 
 TEST(Vfp) {
-  SETUP();
+  SET_UP();
 
   if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
@@ -546,7 +546,7 @@
 
 
 TEST(LoadStore) {
-  SETUP();
+  SET_UP();
 
   COMPARE(ldrb(r0, MemOperand(r1)),
           "e5d10000       ldrb r0, [r1, #+0]");
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 9f7d0bb..da09505 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -63,9 +63,9 @@
 
   // Short immediate instructions
   __ adc(eax, 12345678);
-  __ add(Operand(eax), Immediate(12345678));
+  __ add(eax, Immediate(12345678));
   __ or_(eax, 12345678);
-  __ sub(Operand(eax), Immediate(12345678));
+  __ sub(eax, Immediate(12345678));
   __ xor_(eax, 12345678);
   __ and_(eax, 12345678);
   Handle<FixedArray> foo = FACTORY->NewFixedArray(10, TENURED);
@@ -75,7 +75,7 @@
   __ mov(ebx,  Operand(esp, ecx, times_2, 0));  // [esp+ecx*4]
 
   // ---- All instructions that I can think of
-  __ add(edx, Operand(ebx));
+  __ add(edx, ebx);
   __ add(edx, Operand(12, RelocInfo::NONE));
   __ add(edx, Operand(ebx, 0));
   __ add(edx, Operand(ebx, 16));
@@ -89,7 +89,7 @@
   __ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
 
   __ nop();
-  __ add(Operand(ebx), Immediate(12));
+  __ add(ebx, Immediate(12));
   __ nop();
   __ adc(ecx, 12);
   __ adc(ecx, 1000);
@@ -116,16 +116,16 @@
     CpuFeatures::Scope fscope(RDTSC);
     __ rdtsc();
   }
-  __ movsx_b(edx, Operand(ecx));
-  __ movsx_w(edx, Operand(ecx));
-  __ movzx_b(edx, Operand(ecx));
-  __ movzx_w(edx, Operand(ecx));
+  __ movsx_b(edx, ecx);
+  __ movsx_w(edx, ecx);
+  __ movzx_b(edx, ecx);
+  __ movzx_w(edx, ecx);
 
   __ nop();
-  __ imul(edx, Operand(ecx));
-  __ shld(edx, Operand(ecx));
-  __ shrd(edx, Operand(ecx));
-  __ bts(Operand(edx), ecx);
+  __ imul(edx, ecx);
+  __ shld(edx, ecx);
+  __ shrd(edx, ecx);
+  __ bts(edx, ecx);
   __ bts(Operand(ebx, ecx, times_4, 0), ecx);
   __ nop();
   __ pushad();
@@ -146,9 +146,9 @@
   __ nop();
 
   __ add(edx, Operand(esp, 16));
-  __ add(edx, Operand(ecx));
-  __ mov_b(edx, Operand(ecx));
-  __ mov_b(Operand(ecx), 6);
+  __ add(edx, ecx);
+  __ mov_b(edx, ecx);
+  __ mov_b(ecx, 6);
   __ mov_b(Operand(ebx, ecx, times_4, 10000), 6);
   __ mov_b(Operand(esp, 16), edx);
   __ mov_w(edx, Operand(esp, 16));
@@ -216,22 +216,20 @@
 
   __ adc(edx, 12345);
 
-  __ add(Operand(ebx), Immediate(12));
+  __ add(ebx, Immediate(12));
   __ add(Operand(edx, ecx, times_4, 10000), Immediate(12));
 
   __ and_(ebx, 12345);
 
   __ cmp(ebx, 12345);
-  __ cmp(Operand(ebx), Immediate(12));
+  __ cmp(ebx, Immediate(12));
   __ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
+  __ cmpb(eax, 100);
 
   __ or_(ebx, 12345);
 
-  __ sub(Operand(ebx), Immediate(12));
+  __ sub(ebx, Immediate(12));
   __ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
-  __ subb(Operand(edx, ecx, times_4, 10000), 100);
-  __ subb(Operand(eax), 100);
-  __ subb(eax, Operand(edx, ecx, times_4, 10000));
 
   __ xor_(ebx, 12345);
 
@@ -244,7 +242,7 @@
   __ stos();
 
   __ sub(edx, Operand(ebx, ecx, times_4, 10000));
-  __ sub(edx, Operand(ebx));
+  __ sub(edx, ebx);
 
   __ test(edx, Immediate(12345));
   __ test(edx, Operand(ebx, ecx, times_8, 10000));
@@ -446,11 +444,16 @@
   {
     if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope scope(SSE4_1);
-      __ pextrd(Operand(eax), xmm0, 1);
-      __ pinsrd(xmm1, Operand(eax), 0);
+      __ pextrd(eax, xmm0, 1);
+      __ pinsrd(xmm1, eax, 0);
     }
   }
 
+  // Nop instructions
+  for (int i = 0; i < 16; i++) {
+    __ Nop(i);
+  }
+
   __ ret(0);
 
   CodeDesc desc;
diff --git a/test/cctest/test-disasm-mips.cc b/test/cctest/test-disasm-mips.cc
index 5ad99d7..1f87424 100644
--- a/test/cctest/test-disasm-mips.cc
+++ b/test/cctest/test-disasm-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -71,10 +71,10 @@
 }
 
 
-// Setup V8 to a state where we can at least run the assembler and
+// Set up V8 to a state where we can at least run the assembler and
 // disassembler. Declare the variables and allocate the data structures used
 // in the rest of the macros.
-#define SETUP()                                           \
+#define SET_UP()                                           \
   InitializeVM();                                         \
   v8::HandleScope scope;                                  \
   byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
@@ -104,7 +104,7 @@
 
 
 TEST(Type0) {
-  SETUP();
+  SET_UP();
 
   COMPARE(addu(a0, a1, a2),
           "00a62021       addu    a0, a1, a2");
@@ -148,12 +148,14 @@
   COMPARE(divu(v0, v1),
           "0043001b       divu    v0, v1");
 
-  COMPARE(mul(a0, a1, a2),
-          "70a62002       mul     a0, a1, a2");
-  COMPARE(mul(t2, t3, t4),
-          "716c5002       mul     t2, t3, t4");
-  COMPARE(mul(v0, v1, s0),
-          "70701002       mul     v0, v1, s0");
+  if (kArchVariant != kLoongson) {
+    COMPARE(mul(a0, a1, a2),
+            "70a62002       mul     a0, a1, a2");
+    COMPARE(mul(t2, t3, t4),
+            "716c5002       mul     t2, t3, t4");
+    COMPARE(mul(v0, v1, s0),
+            "70701002       mul     v0, v1, s0");
+  }
 
   COMPARE(addiu(a0, a1, 0x0),
           "24a40000       addiu   a0, a1, 0");
@@ -274,7 +276,7 @@
   COMPARE(srav(v0, v1, fp),
           "03c31007       srav    v0, v1, fp");
 
-  if (mips32r2) {
+  if (kArchVariant == kMips32r2) {
     COMPARE(rotr(a0, a1, 0),
             "00252002       rotr    a0, a1, 0");
     COMPARE(rotr(s0, s1, 8),
@@ -377,48 +379,50 @@
   COMPARE(sltiu(v0, v1, -1),
           "2c62ffff       sltiu   v0, v1, -1");
 
-  COMPARE(movz(a0, a1, a2),
-          "00a6200a       movz    a0, a1, a2");
-  COMPARE(movz(s0, s1, s2),
-          "0232800a       movz    s0, s1, s2");
-  COMPARE(movz(t2, t3, t4),
-          "016c500a       movz    t2, t3, t4");
-  COMPARE(movz(v0, v1, a2),
-          "0066100a       movz    v0, v1, a2");
-  COMPARE(movn(a0, a1, a2),
-          "00a6200b       movn    a0, a1, a2");
-  COMPARE(movn(s0, s1, s2),
-          "0232800b       movn    s0, s1, s2");
-  COMPARE(movn(t2, t3, t4),
-          "016c500b       movn    t2, t3, t4");
-  COMPARE(movn(v0, v1, a2),
-          "0066100b       movn    v0, v1, a2");
+  if (kArchVariant != kLoongson) {
+    COMPARE(movz(a0, a1, a2),
+            "00a6200a       movz    a0, a1, a2");
+    COMPARE(movz(s0, s1, s2),
+            "0232800a       movz    s0, s1, s2");
+    COMPARE(movz(t2, t3, t4),
+            "016c500a       movz    t2, t3, t4");
+    COMPARE(movz(v0, v1, a2),
+            "0066100a       movz    v0, v1, a2");
+    COMPARE(movn(a0, a1, a2),
+            "00a6200b       movn    a0, a1, a2");
+    COMPARE(movn(s0, s1, s2),
+            "0232800b       movn    s0, s1, s2");
+    COMPARE(movn(t2, t3, t4),
+            "016c500b       movn    t2, t3, t4");
+    COMPARE(movn(v0, v1, a2),
+            "0066100b       movn    v0, v1, a2");
 
-  COMPARE(movt(a0, a1, 1),
-          "00a52001       movt    a0, a1, 1");
-  COMPARE(movt(s0, s1, 2),
-          "02298001       movt    s0, s1, 2");
-  COMPARE(movt(t2, t3, 3),
-          "016d5001       movt    t2, t3, 3");
-  COMPARE(movt(v0, v1, 7),
-          "007d1001       movt    v0, v1, 7");
-  COMPARE(movf(a0, a1, 0),
-          "00a02001       movf    a0, a1, 0");
-  COMPARE(movf(s0, s1, 4),
-          "02308001       movf    s0, s1, 4");
-  COMPARE(movf(t2, t3, 5),
-          "01745001       movf    t2, t3, 5");
-  COMPARE(movf(v0, v1, 6),
-          "00781001       movf    v0, v1, 6");
+    COMPARE(movt(a0, a1, 1),
+            "00a52001       movt    a0, a1, 1");
+    COMPARE(movt(s0, s1, 2),
+            "02298001       movt    s0, s1, 2");
+    COMPARE(movt(t2, t3, 3),
+            "016d5001       movt    t2, t3, 3");
+    COMPARE(movt(v0, v1, 7),
+            "007d1001       movt    v0, v1, 7");
+    COMPARE(movf(a0, a1, 0),
+            "00a02001       movf    a0, a1, 0");
+    COMPARE(movf(s0, s1, 4),
+            "02308001       movf    s0, s1, 4");
+    COMPARE(movf(t2, t3, 5),
+            "01745001       movf    t2, t3, 5");
+    COMPARE(movf(v0, v1, 6),
+            "00781001       movf    v0, v1, 6");
 
-  COMPARE(clz(a0, a1),
-          "70a42020       clz     a0, a1");
-  COMPARE(clz(s6, s7),
-          "72f6b020       clz     s6, s7");
-  COMPARE(clz(v0, v1),
-          "70621020       clz     v0, v1");
+    COMPARE(clz(a0, a1),
+            "70a42020       clz     a0, a1");
+    COMPARE(clz(s6, s7),
+            "72f6b020       clz     s6, s7");
+    COMPARE(clz(v0, v1),
+            "70621020       clz     v0, v1");
+  }
 
-  if (mips32r2) {
+  if (kArchVariant == kMips32r2) {
     COMPARE(ins_(a0, a1, 31, 1),
             "7ca4ffc4       ins     a0, a1, 31, 1");
     COMPARE(ins_(s6, s7, 30, 2),
diff --git a/test/cctest/test-disasm-x64.cc b/test/cctest/test-disasm-x64.cc
new file mode 100644
index 0000000..da85eb9
--- /dev/null
+++ b/test/cctest/test-disasm-x64.cc
@@ -0,0 +1,429 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+static v8::Persistent<v8::Context> env;
+
+static void InitializeVM() {
+  if (env.IsEmpty()) {
+    env = v8::Context::New();
+  }
+}
+
+
+#define __ assm.
+
+
+static void DummyStaticFunction(Object* result) {
+}
+
+
+TEST(DisasmX64) {
+  InitializeVM();
+  v8::HandleScope scope;
+  v8::internal::byte buffer[2048];
+  Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+  DummyStaticFunction(NULL);  // just bloody use it (DELETE; debugging)
+
+  // Short immediate instructions
+  __ addq(rax, Immediate(12345678));
+  __ or_(rax, Immediate(12345678));
+  __ subq(rax, Immediate(12345678));
+  __ xor_(rax, Immediate(12345678));
+  __ and_(rax, Immediate(12345678));
+
+  // ---- This one caused crash
+  __ movq(rbx,  Operand(rsp, rcx, times_2, 0));  // [rsp+rcx*4]
+
+  // ---- All instructions that I can think of
+  __ addq(rdx, rbx);
+  __ addq(rdx, Operand(rbx, 0));
+  __ addq(rdx, Operand(rbx, 16));
+  __ addq(rdx, Operand(rbx, 1999));
+  __ addq(rdx, Operand(rsp, 0));
+  __ addq(rdx, Operand(rsp, 16));
+  __ addq(rdx, Operand(rsp, 1999));
+  __ nop();
+  __ addq(rdi, Operand(rbp, rcx, times_4, 0));
+  __ addq(rdi, Operand(rbp, rcx, times_4, 12));
+  __ addq(Operand(rbp, rcx, times_4, 12), Immediate(12));
+
+  __ nop();
+  __ addq(rbx, Immediate(12));
+  __ nop();
+  __ nop();
+  __ and_(rdx, Immediate(3));
+  __ and_(rdx, Operand(rsp, 4));
+  __ cmpq(rdx, Immediate(3));
+  __ cmpq(rdx, Operand(rsp, 4));
+  __ cmpq(Operand(rbp, rcx, times_4, 0), Immediate(1000));
+  __ cmpb(rbx, Operand(rbp, rcx, times_2, 0));
+  __ cmpb(Operand(rbp, rcx, times_2, 0), rbx);
+  __ or_(rdx, Immediate(3));
+  __ xor_(rdx, Immediate(3));
+  __ nop();
+  {
+    CHECK(CpuFeatures::IsSupported(CPUID));
+    CpuFeatures::Scope fscope(CPUID);
+    __ cpuid();
+  }
+  {
+    CHECK(CpuFeatures::IsSupported(RDTSC));
+    CpuFeatures::Scope fscope(RDTSC);
+    __ rdtsc();
+  }
+  __ movsxbq(rdx, Operand(rcx, 0));
+  __ movsxwq(rdx, Operand(rcx, 0));
+  __ movzxbl(rdx, Operand(rcx, 0));
+  __ movzxwl(rdx, Operand(rcx, 0));
+  __ movzxbq(rdx, Operand(rcx, 0));
+  __ movzxwq(rdx, Operand(rcx, 0));
+
+  __ nop();
+  __ imul(rdx, rcx);
+  __ shld(rdx, rcx);
+  __ shrd(rdx, rcx);
+  __ bts(Operand(rdx, 0), rcx);
+  __ bts(Operand(rbx, rcx, times_4, 0), rcx);
+  __ nop();
+  __ push(Immediate(12));
+  __ push(Immediate(23456));
+  __ push(rcx);
+  __ push(rsi);
+  __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(Operand(rbx, rcx, times_4, 0));
+  __ push(Operand(rbx, rcx, times_4, 0));
+  __ push(Operand(rbx, rcx, times_4, 10000));
+  __ pop(rdx);
+  __ pop(rax);
+  __ pop(Operand(rbx, rcx, times_4, 0));
+  __ nop();
+
+  __ addq(rdx, Operand(rsp, 16));
+  __ addq(rdx, rcx);
+  __ movb(rdx, Operand(rcx, 0));
+  __ movb(rcx, Immediate(6));
+  __ movb(Operand(rsp, 16), rdx);
+  __ movw(Operand(rsp, 16), rdx);
+  __ nop();
+  __ movsxwq(rdx, Operand(rsp, 12));
+  __ movsxbq(rdx, Operand(rsp, 12));
+  __ movsxlq(rdx, Operand(rsp, 12));
+  __ movzxwq(rdx, Operand(rsp, 12));
+  __ movzxbq(rdx, Operand(rsp, 12));
+  __ nop();
+  __ movq(rdx, Immediate(1234567));
+  __ movq(rdx, Operand(rsp, 12));
+  __ movq(Operand(rbx, rcx, times_4, 10000), Immediate(12345));
+  __ movq(Operand(rbx, rcx, times_4, 10000), rdx);
+  __ nop();
+  __ decb(rdx);
+  __ decb(Operand(rax, 10));
+  __ decb(Operand(rbx, rcx, times_4, 10000));
+  __ decq(rdx);
+  __ cdq();
+
+  __ nop();
+  __ idivq(rdx);
+  __ mul(rdx);
+  __ neg(rdx);
+  __ not_(rdx);
+  __ testq(Operand(rbx, rcx, times_4, 10000), rdx);
+
+  __ imul(rdx, Operand(rbx, rcx, times_4, 10000));
+  __ imul(rdx, rcx, Immediate(12));
+  __ imul(rdx, rcx, Immediate(1000));
+
+  __ incq(rdx);
+  __ incq(Operand(rbx, rcx, times_4, 10000));
+  __ push(Operand(rbx, rcx, times_4, 10000));
+  __ pop(Operand(rbx, rcx, times_4, 10000));
+  __ jmp(Operand(rbx, rcx, times_4, 10000));
+
+  __ lea(rdx, Operand(rbx, rcx, times_4, 10000));
+  __ or_(rdx, Immediate(12345));
+  __ or_(rdx, Operand(rbx, rcx, times_4, 10000));
+
+  __ nop();
+
+  __ rcl(rdx, Immediate(1));
+  __ rcl(rdx, Immediate(7));
+  __ rcr(rdx, Immediate(1));
+  __ rcr(rdx, Immediate(7));
+  __ sar(rdx, Immediate(1));
+  __ sar(rdx, Immediate(6));
+  __ sar_cl(rdx);
+  __ sbbq(rdx, rbx);
+  __ shld(rdx, rbx);
+  __ shl(rdx, Immediate(1));
+  __ shl(rdx, Immediate(6));
+  __ shl_cl(rdx);
+  __ shrd(rdx, rbx);
+  __ shr(rdx, Immediate(1));
+  __ shr(rdx, Immediate(7));
+  __ shr_cl(rdx);
+
+
+  // Immediates
+
+  __ addq(rbx, Immediate(12));
+  __ addq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
+
+  __ and_(rbx, Immediate(12345));
+
+  __ cmpq(rbx, Immediate(12345));
+  __ cmpq(rbx, Immediate(12));
+  __ cmpq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
+  __ cmpb(rax, Immediate(100));
+
+  __ or_(rbx, Immediate(12345));
+
+  __ subq(rbx, Immediate(12));
+  __ subq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
+
+  __ xor_(rbx, Immediate(12345));
+
+  __ imul(rdx, rcx, Immediate(12));
+  __ imul(rdx, rcx, Immediate(1000));
+
+  __ cld();
+
+  __ subq(rdx, Operand(rbx, rcx, times_4, 10000));
+  __ subq(rdx, rbx);
+
+  __ testq(rdx, Immediate(12345));
+  __ testq(Operand(rbx, rcx, times_8, 10000), rdx);
+  __ testb(Operand(rcx, rbx, times_2, 1000), rdx);
+  __ testb(Operand(rax, -20), Immediate(0x9A));
+  __ nop();
+
+  __ xor_(rdx, Immediate(12345));
+  __ xor_(rdx, Operand(rbx, rcx, times_8, 10000));
+  __ bts(Operand(rbx, rcx, times_8, 10000), rdx);
+  __ hlt();
+  __ int3();
+  __ ret(0);
+  __ ret(8);
+
+  // Calls
+
+  Label L1, L2;
+  __ bind(&L1);
+  __ nop();
+  __ call(&L1);
+  __ call(&L2);
+  __ nop();
+  __ bind(&L2);
+  __ call(Operand(rbx, rcx, times_4, 10000));
+  __ nop();
+  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+      Builtins::kLoadIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  __ nop();
+  __ nop();
+
+  __ jmp(&L1);
+  __ jmp(Operand(rbx, rcx, times_4, 10000));
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  ExternalReference after_break_target =
+      ExternalReference(Debug_Address::AfterBreakTarget(),
+                        assm.isolate());
+#endif  // ENABLE_DEBUGGER_SUPPORT
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+  __ nop();
+
+
+  Label Ljcc;
+  __ nop();
+  // long jumps
+  __ j(overflow, &Ljcc);
+  __ j(no_overflow, &Ljcc);
+  __ j(below, &Ljcc);
+  __ j(above_equal, &Ljcc);
+  __ j(equal, &Ljcc);
+  __ j(not_equal, &Ljcc);
+  __ j(below_equal, &Ljcc);
+  __ j(above, &Ljcc);
+  __ j(sign, &Ljcc);
+  __ j(not_sign, &Ljcc);
+  __ j(parity_even, &Ljcc);
+  __ j(parity_odd, &Ljcc);
+  __ j(less, &Ljcc);
+  __ j(greater_equal, &Ljcc);
+  __ j(less_equal, &Ljcc);
+  __ j(greater, &Ljcc);
+  __ nop();
+  __ bind(&Ljcc);
+  // short jumps
+  __ j(overflow, &Ljcc);
+  __ j(no_overflow, &Ljcc);
+  __ j(below, &Ljcc);
+  __ j(above_equal, &Ljcc);
+  __ j(equal, &Ljcc);
+  __ j(not_equal, &Ljcc);
+  __ j(below_equal, &Ljcc);
+  __ j(above, &Ljcc);
+  __ j(sign, &Ljcc);
+  __ j(not_sign, &Ljcc);
+  __ j(parity_even, &Ljcc);
+  __ j(parity_odd, &Ljcc);
+  __ j(less, &Ljcc);
+  __ j(greater_equal, &Ljcc);
+  __ j(less_equal, &Ljcc);
+  __ j(greater, &Ljcc);
+
+  // 0xD9 instructions
+  __ nop();
+
+  __ fld(1);
+  __ fld1();
+  __ fldz();
+  __ fldpi();
+  __ fabs();
+  __ fchs();
+  __ fprem();
+  __ fprem1();
+  __ fincstp();
+  __ ftst();
+  __ fxch(3);
+  __ fld_s(Operand(rbx, rcx, times_4, 10000));
+  __ fstp_s(Operand(rbx, rcx, times_4, 10000));
+  __ ffree(3);
+  __ fld_d(Operand(rbx, rcx, times_4, 10000));
+  __ fstp_d(Operand(rbx, rcx, times_4, 10000));
+  __ nop();
+
+  __ fild_s(Operand(rbx, rcx, times_4, 10000));
+  __ fistp_s(Operand(rbx, rcx, times_4, 10000));
+  __ fild_d(Operand(rbx, rcx, times_4, 10000));
+  __ fistp_d(Operand(rbx, rcx, times_4, 10000));
+  __ fnstsw_ax();
+  __ nop();
+  __ fadd(3);
+  __ fsub(3);
+  __ fmul(3);
+  __ fdiv(3);
+
+  __ faddp(3);
+  __ fsubp(3);
+  __ fmulp(3);
+  __ fdivp(3);
+  __ fcompp();
+  __ fwait();
+  __ nop();
+  {
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope fscope(SSE2);
+      __ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
+      __ cvttss2si(rdx, xmm1);
+      __ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
+      __ cvttsd2si(rdx, xmm1);
+      __ cvttsd2siq(rdx, xmm1);
+      __ addsd(xmm1, xmm0);
+      __ mulsd(xmm1, xmm0);
+      __ subsd(xmm1, xmm0);
+      __ divsd(xmm1, xmm0);
+      __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
+      __ ucomisd(xmm0, xmm1);
+
+      // 128 bit move instructions.
+      __ movdqa(xmm0, Operand(rbx, rcx, times_4, 10000));
+      __ movdqa(Operand(rbx, rcx, times_4, 10000), xmm0);
+    }
+  }
+
+  // cmov.
+  {
+    if (CpuFeatures::IsSupported(CMOV)) {
+      CpuFeatures::Scope use_cmov(CMOV);
+      __ cmovq(overflow, rax, Operand(rax, 0));
+      __ cmovq(no_overflow, rax, Operand(rax, 1));
+      __ cmovq(below, rax, Operand(rax, 2));
+      __ cmovq(above_equal, rax, Operand(rax, 3));
+      __ cmovq(equal, rax, Operand(rbx, 0));
+      __ cmovq(not_equal, rax, Operand(rbx, 1));
+      __ cmovq(below_equal, rax, Operand(rbx, 2));
+      __ cmovq(above, rax, Operand(rbx, 3));
+      __ cmovq(sign, rax, Operand(rcx, 0));
+      __ cmovq(not_sign, rax, Operand(rcx, 1));
+      __ cmovq(parity_even, rax, Operand(rcx, 2));
+      __ cmovq(parity_odd, rax, Operand(rcx, 3));
+      __ cmovq(less, rax, Operand(rdx, 0));
+      __ cmovq(greater_equal, rax, Operand(rdx, 1));
+      __ cmovq(less_equal, rax, Operand(rdx, 2));
+      __ cmovq(greater, rax, Operand(rdx, 3));
+    }
+  }
+
+  // andpd, etc.
+  {
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope fscope(SSE2);
+      __ andpd(xmm0, xmm1);
+      __ andpd(xmm1, xmm2);
+
+      __ movaps(xmm0, xmm1);
+      __ movaps(xmm1, xmm2);
+    }
+  }
+
+  // Nop instructions
+  for (int i = 0; i < 16; i++) {
+    __ Nop(i);
+  }
+
+  __ ret(0);
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Object* code = HEAP->CreateCode(
+      desc,
+      Code::ComputeFlags(Code::STUB),
+      Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+  CHECK(code->IsCode());
+#ifdef OBJECT_PRINT
+  Code::cast(code)->Print();
+  byte* begin = Code::cast(code)->instruction_start();
+  byte* end = begin + Code::cast(code)->instruction_size();
+  disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+}
+
+#undef __
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 6c2afd4..a56f250 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -18,14 +18,30 @@
       : has_A2(false), has_B2(false), has_C2(false) {
   }
 
-  void Apply(i::HeapEntry** entry_ptr) {
-    if (IsReachableNodeWithName(*entry_ptr, "A2")) has_A2 = true;
-    if (IsReachableNodeWithName(*entry_ptr, "B2")) has_B2 = true;
-    if (IsReachableNodeWithName(*entry_ptr, "C2")) has_C2 = true;
+  void CheckEntry(i::HeapEntry* entry) {
+    if (strcmp(entry->name(), "A2") == 0) has_A2 = true;
+    if (strcmp(entry->name(), "B2") == 0) has_B2 = true;
+    if (strcmp(entry->name(), "C2") == 0) has_C2 = true;
   }
 
-  static bool IsReachableNodeWithName(i::HeapEntry* entry, const char* name) {
-    return strcmp(name, entry->name()) == 0 && entry->painted_reachable();
+  void CheckAllReachables(i::HeapEntry* root) {
+    i::List<i::HeapEntry*> list(10);
+    list.Add(root);
+    root->paint();
+    CheckEntry(root);
+    while (!list.is_empty()) {
+      i::HeapEntry* entry = list.RemoveLast();
+      i::Vector<i::HeapGraphEdge> children = entry->children();
+      for (int i = 0; i < children.length(); ++i) {
+        if (children[i].type() == i::HeapGraphEdge::kShortcut) continue;
+        i::HeapEntry* child = children[i].to();
+        if (!child->painted()) {
+          list.Add(child);
+          child->paint();
+          CheckEntry(child);
+        }
+      }
+    }
   }
 
   bool has_A2;
@@ -90,10 +106,6 @@
       const_cast<i::HeapSnapshot*>(
           reinterpret_cast<const i::HeapSnapshot*>(snapshot_env2));
   const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2);
-  // Paint all nodes reachable from global object.
-  i_snapshot_env2->ClearPaint();
-  const_cast<i::HeapEntry*>(
-      reinterpret_cast<const i::HeapEntry*>(global_env2))->PaintAllReachable();
 
   // Verify, that JS global object of env2 has '..2' properties.
   const v8::HeapGraphNode* a2_node =
@@ -105,8 +117,11 @@
       NULL, GetProperty(global_env2, v8::HeapGraphEdge::kShortcut, "b2_2"));
   CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kShortcut, "c2"));
 
+  // Paint all nodes reachable from global object.
   NamedEntriesDetector det;
-  i_snapshot_env2->IterateEntries(&det);
+  i_snapshot_env2->ClearPaint();
+  det.CheckAllReachables(const_cast<i::HeapEntry*>(
+      reinterpret_cast<const i::HeapEntry*>(global_env2)));
   CHECK(det.has_A2);
   CHECK(det.has_B2);
   CHECK(det.has_C2);
@@ -136,14 +151,47 @@
       GetProperty(x, v8::HeapGraphEdge::kProperty, "b");
   CHECK_NE(NULL, x2);
 
-  // Test approximate sizes.
-  CHECK_EQ(x->GetSelfSize() * 3, x->GetRetainedSize(false));
-  CHECK_EQ(x1->GetSelfSize(), x1->GetRetainedSize(false));
-  CHECK_EQ(x2->GetSelfSize(), x2->GetRetainedSize(false));
-  // Test exact sizes.
-  CHECK_EQ(x->GetSelfSize() * 3, x->GetRetainedSize(true));
-  CHECK_EQ(x1->GetSelfSize(), x1->GetRetainedSize(true));
-  CHECK_EQ(x2->GetSelfSize(), x2->GetRetainedSize(true));
+  // Test sizes.
+  CHECK_EQ(x->GetSelfSize() * 3, x->GetRetainedSize());
+  CHECK_EQ(x1->GetSelfSize(), x1->GetRetainedSize());
+  CHECK_EQ(x2->GetSelfSize(), x2->GetRetainedSize());
+}
+
+
+TEST(BoundFunctionInSnapshot) {
+  v8::HandleScope scope;
+  LocalContext env;
+  CompileRun(
+      "function myFunction(a, b) { this.a = a; this.b = b; }\n"
+      "function AAAAA() {}\n"
+      "boundFunction = myFunction.bind(new AAAAA(), 20, new Number(12)); \n");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("sizes"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  const v8::HeapGraphNode* f =
+      GetProperty(global, v8::HeapGraphEdge::kShortcut, "boundFunction");
+  CHECK(f);
+  CHECK_EQ(v8::String::New("native_bind"), f->GetName());
+  const v8::HeapGraphNode* bindings =
+      GetProperty(f, v8::HeapGraphEdge::kInternal, "bindings");
+  CHECK_NE(NULL, bindings);
+  CHECK_EQ(v8::HeapGraphNode::kArray, bindings->GetType());
+  CHECK_EQ(4, bindings->GetChildrenCount());
+
+  const v8::HeapGraphNode* bound_this = GetProperty(
+      f, v8::HeapGraphEdge::kShortcut, "bound_this");
+  CHECK(bound_this);
+  CHECK_EQ(v8::HeapGraphNode::kObject, bound_this->GetType());
+
+  const v8::HeapGraphNode* bound_function = GetProperty(
+      f, v8::HeapGraphEdge::kShortcut, "bound_function");
+  CHECK(bound_function);
+  CHECK_EQ(v8::HeapGraphNode::kClosure, bound_function->GetType());
+
+  const v8::HeapGraphNode* bound_argument = GetProperty(
+      f, v8::HeapGraphEdge::kShortcut, "bound_argument_1");
+  CHECK(bound_argument);
+  CHECK_EQ(v8::HeapGraphNode::kObject, bound_argument->GetType());
 }
 
 
@@ -252,6 +300,28 @@
   CHECK_EQ(v8::HeapGraphNode::kHeapNumber, b->GetType());
 }
 
+TEST(HeapSnapshotSlicedString) {
+  v8::HandleScope scope;
+  LocalContext env;
+  CompileRun(
+      "parent_string = \"123456789.123456789.123456789.123456789.123456789."
+      "123456789.123456789.123456789.123456789.123456789."
+      "123456789.123456789.123456789.123456789.123456789."
+      "123456789.123456789.123456789.123456789.123456789.\";"
+      "child_string = parent_string.slice(100);");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("strings"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  const v8::HeapGraphNode* parent_string =
+      GetProperty(global, v8::HeapGraphEdge::kShortcut, "parent_string");
+  CHECK_NE(NULL, parent_string);
+  const v8::HeapGraphNode* child_string =
+      GetProperty(global, v8::HeapGraphEdge::kShortcut, "child_string");
+  CHECK_NE(NULL, child_string);
+  const v8::HeapGraphNode* parent =
+      GetProperty(child_string, v8::HeapGraphEdge::kInternal, "parent");
+  CHECK_EQ(parent_string, parent);
+}
 
 TEST(HeapSnapshotInternalReferences) {
   v8::HandleScope scope;
@@ -282,6 +352,59 @@
 #define CHECK_NE_UINT64_T(a, b) \
   CHECK((a) != (b))  // NOLINT
 
+TEST(HeapEntryIdsAndArrayShift) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun(
+      "function AnObject() {\n"
+      "    this.first = 'first';\n"
+      "    this.second = 'second';\n"
+      "}\n"
+      "var a = new Array();\n"
+      "for (var i = 0; i < 10; ++i)\n"
+      "  a.push(new AnObject());\n");
+  const v8::HeapSnapshot* snapshot1 =
+      v8::HeapProfiler::TakeSnapshot(v8_str("s1"));
+
+  CompileRun(
+      "for (var i = 0; i < 1; ++i)\n"
+      "  a.shift();\n");
+
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+
+  const v8::HeapSnapshot* snapshot2 =
+      v8::HeapProfiler::TakeSnapshot(v8_str("s2"));
+
+  const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
+  const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
+  CHECK_NE_UINT64_T(0, global1->GetId());
+  CHECK_EQ_UINT64_T(global1->GetId(), global2->GetId());
+
+  const v8::HeapGraphNode* a1 =
+      GetProperty(global1, v8::HeapGraphEdge::kProperty, "a");
+  CHECK_NE(NULL, a1);
+  const v8::HeapGraphNode* e1 =
+      GetProperty(a1, v8::HeapGraphEdge::kHidden, "1");
+  CHECK_NE(NULL, e1);
+  const v8::HeapGraphNode* k1 =
+      GetProperty(e1, v8::HeapGraphEdge::kInternal, "elements");
+  CHECK_NE(NULL, k1);
+  const v8::HeapGraphNode* a2 =
+      GetProperty(global2, v8::HeapGraphEdge::kProperty, "a");
+  CHECK_NE(NULL, a2);
+  const v8::HeapGraphNode* e2 =
+      GetProperty(a2, v8::HeapGraphEdge::kHidden, "1");
+  CHECK_NE(NULL, e2);
+  const v8::HeapGraphNode* k2 =
+      GetProperty(e2, v8::HeapGraphEdge::kInternal, "elements");
+  CHECK_NE(NULL, k2);
+
+  CHECK_EQ_UINT64_T(a1->GetId(), a2->GetId());
+  CHECK_EQ_UINT64_T(e1->GetId(), e2->GetId());
+  CHECK_EQ_UINT64_T(k1->GetId(), k2->GetId());
+}
+
 TEST(HeapEntryIdsAndGC) {
   v8::HandleScope scope;
   LocalContext env;
@@ -294,7 +417,7 @@
   const v8::HeapSnapshot* snapshot1 =
       v8::HeapProfiler::TakeSnapshot(v8_str("s1"));
 
-  HEAP->CollectAllGarbage(true);  // Enforce compaction.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   const v8::HeapSnapshot* snapshot2 =
       v8::HeapProfiler::TakeSnapshot(v8_str("s2"));
@@ -563,6 +686,22 @@
 }
 
 
+static void CheckChildrenIds(const v8::HeapSnapshot* snapshot,
+                             const v8::HeapGraphNode* node,
+                             int level, int max_level) {
+  if (level > max_level) return;
+  CHECK_EQ(node, snapshot->GetNodeById(node->GetId()));
+  for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
+    const v8::HeapGraphEdge* prop = node->GetChild(i);
+    const v8::HeapGraphNode* child =
+        snapshot->GetNodeById(prop->GetToNode()->GetId());
+    CHECK_EQ_UINT64_T(prop->GetToNode()->GetId(), child->GetId());
+    CHECK_EQ(prop->GetToNode(), child);
+    CheckChildrenIds(snapshot, child, level + 1, max_level);
+  }
+}
+
+
 TEST(HeapSnapshotGetNodeById) {
   v8::HandleScope scope;
   LocalContext env;
@@ -570,12 +709,7 @@
   const v8::HeapSnapshot* snapshot =
       v8::HeapProfiler::TakeSnapshot(v8_str("id"));
   const v8::HeapGraphNode* root = snapshot->GetRoot();
-  CHECK_EQ(root, snapshot->GetNodeById(root->GetId()));
-  for (int i = 0, count = root->GetChildrenCount(); i < count; ++i) {
-    const v8::HeapGraphEdge* prop = root->GetChild(i);
-    CHECK_EQ(
-        prop->GetToNode(), snapshot->GetNodeById(prop->GetToNode()->GetId()));
-  }
+  CheckChildrenIds(snapshot, root, 0, 3);
   // Check a big id, which should not exist yet.
   CHECK_EQ(NULL, snapshot->GetNodeById(0x1000000UL));
 }
@@ -607,7 +741,7 @@
   LocalContext env;
 
   const int snapshots_count = v8::HeapProfiler::GetSnapshotsCount();
-  TestActivityControl aborting_control(3);
+  TestActivityControl aborting_control(1);
   const v8::HeapSnapshot* no_snapshot =
       v8::HeapProfiler::TakeSnapshot(v8_str("abort"),
                                      v8::HeapSnapshot::kFull,
@@ -633,11 +767,13 @@
 class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
  public:
   TestRetainedObjectInfo(int hash,
+                         const char* group_label,
                          const char* label,
                          intptr_t element_count = -1,
                          intptr_t size = -1)
       : disposed_(false),
         hash_(hash),
+        group_label_(group_label),
         label_(label),
         element_count_(element_count),
         size_(size) {
@@ -652,6 +788,7 @@
     return GetHash() == other->GetHash();
   }
   virtual intptr_t GetHash() { return hash_; }
+  virtual const char* GetGroupLabel() { return group_label_; }
   virtual const char* GetLabel() { return label_; }
   virtual intptr_t GetElementCount() { return element_count_; }
   virtual intptr_t GetSizeInBytes() { return size_; }
@@ -663,15 +800,15 @@
       if (wrapper->IsString()) {
         v8::String::AsciiValue ascii(wrapper);
         if (strcmp(*ascii, "AAA") == 0)
-          return new TestRetainedObjectInfo(1, "aaa", 100);
+          return new TestRetainedObjectInfo(1, "aaa-group", "aaa", 100);
         else if (strcmp(*ascii, "BBB") == 0)
-          return new TestRetainedObjectInfo(1, "aaa", 100);
+          return new TestRetainedObjectInfo(1, "aaa-group", "aaa", 100);
       }
     } else if (class_id == 2) {
       if (wrapper->IsString()) {
         v8::String::AsciiValue ascii(wrapper);
         if (strcmp(*ascii, "CCC") == 0)
-          return new TestRetainedObjectInfo(2, "ccc");
+          return new TestRetainedObjectInfo(2, "ccc-group", "ccc");
       }
     }
     CHECK(false);
@@ -684,6 +821,7 @@
   bool disposed_;
   int category_;
   int hash_;
+  const char* group_label_;
   const char* label_;
   intptr_t element_count_;
   intptr_t size_;
@@ -736,18 +874,21 @@
     delete TestRetainedObjectInfo::instances[i];
   }
 
-  const v8::HeapGraphNode* natives = GetNode(
-      snapshot->GetRoot(), v8::HeapGraphNode::kObject, "(Native objects)");
-  CHECK_NE(NULL, natives);
-  CHECK_EQ(2, natives->GetChildrenCount());
+  const v8::HeapGraphNode* native_group_aaa = GetNode(
+      snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "aaa-group");
+  CHECK_NE(NULL, native_group_aaa);
+  CHECK_EQ(1, native_group_aaa->GetChildrenCount());
   const v8::HeapGraphNode* aaa = GetNode(
-      natives, v8::HeapGraphNode::kNative, "aaa / 100 entries");
+      native_group_aaa, v8::HeapGraphNode::kNative, "aaa / 100 entries");
   CHECK_NE(NULL, aaa);
+  CHECK_EQ(2, aaa->GetChildrenCount());
+
+  const v8::HeapGraphNode* native_group_ccc = GetNode(
+      snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "ccc-group");
   const v8::HeapGraphNode* ccc = GetNode(
-      natives, v8::HeapGraphNode::kNative, "ccc");
+      native_group_ccc, v8::HeapGraphNode::kNative, "ccc");
   CHECK_NE(NULL, ccc);
 
-  CHECK_EQ(2, aaa->GetChildrenCount());
   const v8::HeapGraphNode* n_AAA = GetNode(
       aaa, v8::HeapGraphNode::kString, "AAA");
   CHECK_NE(NULL, n_AAA);
@@ -765,6 +906,75 @@
 }
 
 
+class GraphWithImplicitRefs {
+ public:
+  static const int kObjectsCount = 4;
+  explicit GraphWithImplicitRefs(LocalContext* env) {
+    CHECK_EQ(NULL, instance_);
+    instance_ = this;
+    for (int i = 0; i < kObjectsCount; i++) {
+      objects_[i] = v8::Persistent<v8::Object>::New(v8::Object::New());
+    }
+    (*env)->Global()->Set(v8_str("root_object"), objects_[0]);
+  }
+  ~GraphWithImplicitRefs() {
+    instance_ = NULL;
+  }
+
+  static void gcPrologue() {
+    instance_->AddImplicitReferences();
+  }
+
+ private:
+  void AddImplicitReferences() {
+    // 0 -> 1
+    v8::V8::AddImplicitReferences(
+        v8::Persistent<v8::Object>::Cast(objects_[0]), &objects_[1], 1);
+    // Adding two more references(note length=2 in params): 1 -> 2, 1 -> 3
+    v8::V8::AddImplicitReferences(
+        v8::Persistent<v8::Object>::Cast(objects_[1]), &objects_[2], 2);
+  }
+
+  v8::Persistent<v8::Value> objects_[kObjectsCount];
+  static GraphWithImplicitRefs* instance_;
+};
+
+GraphWithImplicitRefs* GraphWithImplicitRefs::instance_ = NULL;
+
+
+TEST(HeapSnapshotImplicitReferences) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  GraphWithImplicitRefs graph(&env);
+  v8::V8::SetGlobalGCPrologueCallback(&GraphWithImplicitRefs::gcPrologue);
+
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("implicit_refs"));
+
+  const v8::HeapGraphNode* global_object = GetGlobalObject(snapshot);
+  // Use kShortcut type to skip intermediate JSGlobalPropertyCell
+  const v8::HeapGraphNode* obj0 = GetProperty(
+      global_object, v8::HeapGraphEdge::kShortcut, "root_object");
+  CHECK(obj0);
+  CHECK_EQ(v8::HeapGraphNode::kObject, obj0->GetType());
+  const v8::HeapGraphNode* obj1 = GetProperty(
+      obj0, v8::HeapGraphEdge::kInternal, "native");
+  CHECK(obj1);
+  int implicit_targets_count = 0;
+  for (int i = 0, count = obj1->GetChildrenCount(); i < count; ++i) {
+    const v8::HeapGraphEdge* prop = obj1->GetChild(i);
+    v8::String::AsciiValue prop_name(prop->GetName());
+    if (prop->GetType() == v8::HeapGraphEdge::kInternal &&
+        strcmp("native", *prop_name) == 0) {
+      ++implicit_targets_count;
+    }
+  }
+  CHECK_EQ(2, implicit_targets_count);
+  v8::V8::SetGlobalGCPrologueCallback(NULL);
+}
+
+
 TEST(DeleteAllHeapSnapshots) {
   v8::HandleScope scope;
   LocalContext env;
@@ -873,6 +1083,20 @@
 }
 
 
+TEST(NoHandleLeaks) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun("document = { URL:\"abcdefgh\" };");
+
+  v8::Handle<v8::String> name(v8_str("leakz"));
+  int count_before = i::HandleScope::NumberOfHandles();
+  v8::HeapProfiler::TakeSnapshot(name);
+  int count_after = i::HandleScope::NumberOfHandles();
+  CHECK_EQ(count_before, count_after);
+}
+
+
 TEST(NodesIteration) {
   v8::HandleScope scope;
   LocalContext env;
@@ -1001,3 +1225,144 @@
   CHECK_EQ(0, StringCmp(
       "Object", i::V8HeapExplorer::GetConstructorName(*js_obj6)));
 }
+
+
+TEST(FastCaseGetter) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun("var obj1 = {};\n"
+             "obj1.__defineGetter__('propWithGetter', function Y() {\n"
+             "  return 42;\n"
+             "});\n"
+             "obj1.__defineSetter__('propWithSetter', function Z(value) {\n"
+             "  return this.value_ = value;\n"
+             "});\n");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("fastCaseGetter"));
+
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  CHECK_NE(NULL, global);
+  const v8::HeapGraphNode* obj1 =
+      GetProperty(global, v8::HeapGraphEdge::kShortcut, "obj1");
+  CHECK_NE(NULL, obj1);
+  const v8::HeapGraphNode* getterFunction =
+      GetProperty(obj1, v8::HeapGraphEdge::kProperty, "get-propWithGetter");
+  CHECK_NE(NULL, getterFunction);
+  const v8::HeapGraphNode* setterFunction =
+      GetProperty(obj1, v8::HeapGraphEdge::kProperty, "set-propWithSetter");
+  CHECK_NE(NULL, setterFunction);
+}
+
+
+bool HasWeakEdge(const v8::HeapGraphNode* node) {
+  for (int i = 0; i < node->GetChildrenCount(); ++i) {
+    const v8::HeapGraphEdge* handle_edge = node->GetChild(i);
+    if (handle_edge->GetType() == v8::HeapGraphEdge::kWeak) return true;
+  }
+  return false;
+}
+
+
+bool HasWeakGlobalHandle() {
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("weaks"));
+  const v8::HeapGraphNode* gc_roots = GetNode(
+      snapshot->GetRoot(), v8::HeapGraphNode::kObject, "(GC roots)");
+  CHECK_NE(NULL, gc_roots);
+  const v8::HeapGraphNode* global_handles = GetNode(
+      gc_roots, v8::HeapGraphNode::kObject, "(Global handles)");
+  CHECK_NE(NULL, global_handles);
+  return HasWeakEdge(global_handles);
+}
+
+
+static void PersistentHandleCallback(v8::Persistent<v8::Value> handle, void*) {
+  handle.Dispose();
+}
+
+
+TEST(WeakGlobalHandle) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CHECK(!HasWeakGlobalHandle());
+
+  v8::Persistent<v8::Object> handle =
+      v8::Persistent<v8::Object>::New(v8::Object::New());
+  handle.MakeWeak(NULL, PersistentHandleCallback);
+
+  CHECK(HasWeakGlobalHandle());
+}
+
+
+TEST(WeakGlobalContextRefs) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("weaks"));
+  const v8::HeapGraphNode* gc_roots = GetNode(
+      snapshot->GetRoot(), v8::HeapGraphNode::kObject, "(GC roots)");
+  CHECK_NE(NULL, gc_roots);
+  const v8::HeapGraphNode* global_handles = GetNode(
+      gc_roots, v8::HeapGraphNode::kObject, "(Global handles)");
+  CHECK_NE(NULL, global_handles);
+  const v8::HeapGraphNode* global_context = GetNode(
+      global_handles, v8::HeapGraphNode::kHidden, "system / GlobalContext");
+  CHECK_NE(NULL, global_context);
+  CHECK(HasWeakEdge(global_context));
+}
+
+
+TEST(SfiAndJsFunctionWeakRefs) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun(
+      "fun = (function (x) { return function () { return x + 1; } })(1);");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("fun"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  CHECK_NE(NULL, global);
+  const v8::HeapGraphNode* fun =
+      GetProperty(global, v8::HeapGraphEdge::kShortcut, "fun");
+  CHECK(HasWeakEdge(fun));
+  const v8::HeapGraphNode* shared =
+      GetProperty(fun, v8::HeapGraphEdge::kInternal, "shared");
+  CHECK(HasWeakEdge(shared));
+}
+
+
+TEST(PersistentHandleCount) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  // V8 also uses global handles internally, so we can't test for an absolute
+  // number.
+  int global_handle_count = v8::HeapProfiler::GetPersistentHandleCount();
+
+  // Create some persistent handles.
+  v8::Persistent<v8::String> p_AAA =
+      v8::Persistent<v8::String>::New(v8_str("AAA"));
+  CHECK_EQ(global_handle_count + 1,
+           v8::HeapProfiler::GetPersistentHandleCount());
+  v8::Persistent<v8::String> p_BBB =
+      v8::Persistent<v8::String>::New(v8_str("BBB"));
+  CHECK_EQ(global_handle_count + 2,
+           v8::HeapProfiler::GetPersistentHandleCount());
+  v8::Persistent<v8::String> p_CCC =
+      v8::Persistent<v8::String>::New(v8_str("CCC"));
+  CHECK_EQ(global_handle_count + 3,
+           v8::HeapProfiler::GetPersistentHandleCount());
+
+  // Dipose the persistent handles in a different order.
+  p_AAA.Dispose();
+  CHECK_EQ(global_handle_count + 2,
+           v8::HeapProfiler::GetPersistentHandleCount());
+  p_CCC.Dispose();
+  CHECK_EQ(global_handle_count + 1,
+           v8::HeapProfiler::GetPersistentHandleCount());
+  p_BBB.Dispose();
+  CHECK_EQ(global_handle_count, v8::HeapProfiler::GetPersistentHandleCount());
+}
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 11b8813..999e2c6 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 #include <stdlib.h>
 
@@ -612,7 +612,7 @@
   CHECK(!obj->HasLocalProperty(*second));
 
   // check string and symbol match
-  static const char* string1 = "fisk";
+  const char* string1 = "fisk";
   Handle<String> s1 = FACTORY->NewStringFromAscii(CStrVector(string1));
   obj->SetProperty(
       *s1, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
@@ -620,7 +620,7 @@
   CHECK(obj->HasLocalProperty(*s1_symbol));
 
   // check symbol and string match
-  static const char* string2 = "fugl";
+  const char* string2 = "fugl";
   Handle<String> s2_symbol = FACTORY->LookupAsciiSymbol(string2);
   obj->SetProperty(
       *s2_symbol, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
@@ -667,22 +667,23 @@
   Handle<JSObject> object = FACTORY->NewJSObject(function);
   Handle<JSArray> array = Handle<JSArray>::cast(object);
   // We just initialized the VM, no heap allocation failure yet.
-  Object* ok = array->Initialize(0)->ToObjectChecked();
+  array->Initialize(0)->ToObjectChecked();
 
   // Set array length to 0.
-  ok = array->SetElementsLength(Smi::FromInt(0))->ToObjectChecked();
+  array->SetElementsLength(Smi::FromInt(0))->ToObjectChecked();
   CHECK_EQ(Smi::FromInt(0), array->length());
-  CHECK(array->HasFastElements());  // Must be in fast mode.
+  // Must be in fast mode.
+  CHECK(array->HasFastTypeElements());
 
   // array[length] = name.
-  ok = array->SetElement(0, *name, kNonStrictMode, true)->ToObjectChecked();
+  array->SetElement(0, *name, NONE, kNonStrictMode)->ToObjectChecked();
   CHECK_EQ(Smi::FromInt(1), array->length());
   CHECK_EQ(array->GetElement(0), *name);
 
   // Set array length with larger than smi value.
   Handle<Object> length =
       FACTORY->NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
-  ok = array->SetElementsLength(*length)->ToObjectChecked();
+  array->SetElementsLength(*length)->ToObjectChecked();
 
   uint32_t int_length = 0;
   CHECK(length->ToArrayIndex(&int_length));
@@ -690,8 +691,7 @@
   CHECK(array->HasDictionaryElements());  // Must be in slow mode.
 
   // array[length] = name.
-  ok = array->SetElement(
-      int_length, *name, kNonStrictMode, true)->ToObjectChecked();
+  array->SetElement(int_length, *name, NONE, kNonStrictMode)->ToObjectChecked();
   uint32_t new_int_length = 0;
   CHECK(array->length()->ToArrayIndex(&new_int_length));
   CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
@@ -718,10 +718,8 @@
   obj->SetProperty(
       *second, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked();
 
-  Object* ok =
-      obj->SetElement(0, *first, kNonStrictMode, true)->ToObjectChecked();
-
-  ok = obj->SetElement(1, *second, kNonStrictMode, true)->ToObjectChecked();
+  obj->SetElement(0, *first, NONE, kNonStrictMode)->ToObjectChecked();
+  obj->SetElement(1, *second, NONE, kNonStrictMode)->ToObjectChecked();
 
   // Make the clone.
   Handle<JSObject> clone = Copy(obj);
@@ -739,8 +737,8 @@
   clone->SetProperty(
       *second, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
 
-  ok = clone->SetElement(0, *second, kNonStrictMode, true)->ToObjectChecked();
-  ok = clone->SetElement(1, *first, kNonStrictMode, true)->ToObjectChecked();
+  clone->SetElement(0, *second, NONE, kNonStrictMode)->ToObjectChecked();
+  clone->SetElement(1, *first, NONE, kNonStrictMode)->ToObjectChecked();
 
   CHECK_EQ(obj->GetElement(1), clone->GetElement(0));
   CHECK_EQ(obj->GetElement(0), clone->GetElement(1));
@@ -813,7 +811,7 @@
 
   // Allocate a JS array to OLD_POINTER_SPACE and NEW_SPACE
   objs[next_objs_index++] = FACTORY->NewJSArray(10);
-  objs[next_objs_index++] = FACTORY->NewJSArray(10, TENURED);
+  objs[next_objs_index++] = FACTORY->NewJSArray(10, FAST_ELEMENTS, TENURED);
 
   // Allocate a small string to OLD_DATA_SPACE and NEW_SPACE
   objs[next_objs_index++] =
@@ -822,7 +820,7 @@
       FACTORY->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
 
   // Allocate a large string (for large object space).
-  int large_size = HEAP->MaxObjectSizeInPagedSpace() + 1;
+  int large_size = Page::kMaxNonCodeHeapObjectSize + 1;
   char* str = new char[large_size];
   for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
   str[large_size - 1] = '\0';
@@ -838,49 +836,6 @@
 }
 
 
-TEST(LargeObjectSpaceContains) {
-  InitializeVM();
-
-  HEAP->CollectGarbage(NEW_SPACE);
-
-  Address current_top = HEAP->new_space()->top();
-  Page* page = Page::FromAddress(current_top);
-  Address current_page = page->address();
-  Address next_page = current_page + Page::kPageSize;
-  int bytes_to_page = static_cast<int>(next_page - current_top);
-  if (bytes_to_page <= FixedArray::kHeaderSize) {
-    // Alas, need to cross another page to be able to
-    // put desired value.
-    next_page += Page::kPageSize;
-    bytes_to_page = static_cast<int>(next_page - current_top);
-  }
-  CHECK(bytes_to_page > FixedArray::kHeaderSize);
-
-  intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags_;
-  Address flags_addr = reinterpret_cast<Address>(flags_ptr);
-
-  int bytes_to_allocate =
-      static_cast<int>(flags_addr - current_top) + kPointerSize;
-
-  int n_elements = (bytes_to_allocate - FixedArray::kHeaderSize) /
-      kPointerSize;
-  CHECK_EQ(bytes_to_allocate, FixedArray::SizeFor(n_elements));
-  FixedArray* array = FixedArray::cast(
-      HEAP->AllocateFixedArray(n_elements)->ToObjectChecked());
-
-  int index = n_elements - 1;
-  CHECK_EQ(flags_ptr,
-           HeapObject::RawField(array, FixedArray::OffsetOfElementAt(index)));
-  array->set(index, Smi::FromInt(0));
-  // This chould have turned next page into LargeObjectPage:
-  // CHECK(Page::FromAddress(next_page)->IsLargeObjectPage());
-
-  HeapObject* addr = HeapObject::FromAddress(next_page + 2 * kPointerSize);
-  CHECK(HEAP->new_space()->Contains(addr));
-  CHECK(!HEAP->lo_space()->Contains(addr));
-}
-
-
 TEST(EmptyHandleEscapeFrom) {
   InitializeVM();
 
@@ -907,8 +862,7 @@
   InitializeVM();
 
   // Increase the chance of 'bump-the-pointer' allocation in old space.
-  bool force_compaction = true;
-  HEAP->CollectAllGarbage(force_compaction);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   v8::HandleScope scope;
 
@@ -975,12 +929,6 @@
     return;
   }
   CHECK(HEAP->old_pointer_space()->Contains(clone->address()));
-
-  // Step 5: verify validity of region dirty marks.
-  Address clone_addr = clone->address();
-  Page* page = Page::FromAddress(clone_addr);
-  // Check that region covering inobject property 1 is marked dirty.
-  CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
 }
 
 
@@ -1010,17 +958,18 @@
   Handle<JSFunction> function(JSFunction::cast(func_value));
   CHECK(function->shared()->is_compiled());
 
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
+  // TODO(1609) Currently incremental marker does not support code flushing.
+  HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
 
   CHECK(function->shared()->is_compiled());
 
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
 
   // foo should no longer be in the compilation cache
   CHECK(!function->shared()->is_compiled() || function->IsOptimized());
@@ -1109,7 +1058,7 @@
     }
 
     // Mark compact handles the weak references.
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
 
     // Get rid of f3 and f5 in the same way.
@@ -1118,21 +1067,21 @@
       HEAP->PerformScavenge();
       CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
     }
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
     CompileRun("f5=null");
     for (int j = 0; j < 10; j++) {
       HEAP->PerformScavenge();
       CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
     }
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[i]));
 
     ctx[i]->Exit();
   }
 
   // Force compilation cache cleanup.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   // Dispose the global contexts one by one.
   for (int i = 0; i < kNumTestContexts; i++) {
@@ -1146,7 +1095,7 @@
     }
 
     // Mark compact handles the weak references.
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(kNumTestContexts - i - 1, CountGlobalContexts());
   }
 
@@ -1161,7 +1110,7 @@
   Handle<Object> object(HEAP->global_contexts_list());
   while (!object->IsUndefined()) {
     count++;
-    if (count == n) HEAP->CollectAllGarbage(true);
+    if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     object =
         Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK));
   }
@@ -1180,7 +1129,7 @@
   while (object->IsJSFunction() &&
          !Handle<JSFunction>::cast(object)->IsBuiltin()) {
     count++;
-    if (count == n) HEAP->CollectAllGarbage(true);
+    if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     object = Handle<Object>(
         Object::cast(JSFunction::cast(*object)->next_function_link()));
   }
@@ -1238,92 +1187,413 @@
 }
 
 
+TEST(TestSizeOfObjects) {
+  v8::V8::Initialize();
+
+  // Get initial heap size after several full GCs, which will stabilize
+  // the heap size and return with sweeping finished completely.
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  CHECK(HEAP->old_pointer_space()->IsSweepingComplete());
+  int initial_size = static_cast<int>(HEAP->SizeOfObjects());
+
+  {
+    // Allocate objects on several different old-space pages so that
+    // lazy sweeping kicks in for subsequent GC runs.
+    AlwaysAllocateScope always_allocate;
+    int filler_size = static_cast<int>(FixedArray::SizeFor(8192));
+    for (int i = 1; i <= 100; i++) {
+      HEAP->AllocateFixedArray(8192, TENURED)->ToObjectChecked();
+      CHECK_EQ(initial_size + i * filler_size,
+               static_cast<int>(HEAP->SizeOfObjects()));
+    }
+  }
+
+  // The heap size should go back to initial size after a full GC, even
+  // though sweeping didn't finish yet.
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  CHECK(!HEAP->old_pointer_space()->IsSweepingComplete());
+  CHECK_EQ(initial_size, static_cast<int>(HEAP->SizeOfObjects()));
+
+  // Advancing the sweeper step-wise should not change the heap size.
+  while (!HEAP->old_pointer_space()->IsSweepingComplete()) {
+    HEAP->old_pointer_space()->AdvanceSweeper(KB);
+    CHECK_EQ(initial_size, static_cast<int>(HEAP->SizeOfObjects()));
+  }
+}
+
+
 TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
   InitializeVM();
+  HEAP->EnsureHeapIsIterable();
   intptr_t size_of_objects_1 = HEAP->SizeOfObjects();
-  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+  HeapIterator iterator;
   intptr_t size_of_objects_2 = 0;
   for (HeapObject* obj = iterator.next();
        obj != NULL;
        obj = iterator.next()) {
     size_of_objects_2 += obj->Size();
   }
-  // Delta must be within 1% of the larger result.
+  // Delta must be within 5% of the larger result.
+  // TODO(gc): Tighten this up by distinguishing between byte
+  // arrays that are real and those that merely mark free space
+  // on the heap.
   if (size_of_objects_1 > size_of_objects_2) {
     intptr_t delta = size_of_objects_1 - size_of_objects_2;
     PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
            "Iterator: %" V8_PTR_PREFIX "d, "
            "delta: %" V8_PTR_PREFIX "d\n",
            size_of_objects_1, size_of_objects_2, delta);
-    CHECK_GT(size_of_objects_1 / 100, delta);
+    CHECK_GT(size_of_objects_1 / 20, delta);
   } else {
     intptr_t delta = size_of_objects_2 - size_of_objects_1;
     PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
            "Iterator: %" V8_PTR_PREFIX "d, "
            "delta: %" V8_PTR_PREFIX "d\n",
            size_of_objects_1, size_of_objects_2, delta);
-    CHECK_GT(size_of_objects_2 / 100, delta);
+    CHECK_GT(size_of_objects_2 / 20, delta);
   }
 }
 
 
-class HeapIteratorTestHelper {
- public:
-  HeapIteratorTestHelper(Object* a, Object* b)
-      : a_(a), b_(b), a_found_(false), b_found_(false) {}
-  bool a_found() { return a_found_; }
-  bool b_found() { return b_found_; }
-  void IterateHeap(HeapIterator::HeapObjectsFiltering mode) {
-    HeapIterator iterator(mode);
-    for (HeapObject* obj = iterator.next();
-         obj != NULL;
-         obj = iterator.next()) {
-      if (obj == a_)
-        a_found_ = true;
-      else if (obj == b_)
-        b_found_ = true;
-    }
+static void FillUpNewSpace(NewSpace* new_space) {
+  // Fill up new space to the point that it is completely full. Make sure
+  // that the scavenger does not undo the filling.
+  v8::HandleScope scope;
+  AlwaysAllocateScope always_allocate;
+  intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
+  intptr_t number_of_fillers = (available / FixedArray::SizeFor(1000)) - 10;
+  for (intptr_t i = 0; i < number_of_fillers; i++) {
+    CHECK(HEAP->InNewSpace(*FACTORY->NewFixedArray(1000, NOT_TENURED)));
   }
- private:
-  Object* a_;
-  Object* b_;
-  bool a_found_;
-  bool b_found_;
-};
+}
 
-TEST(HeapIteratorFilterUnreachable) {
+
+TEST(GrowAndShrinkNewSpace) {
+  InitializeVM();
+  NewSpace* new_space = HEAP->new_space();
+
+  // Explicitly growing should double the space capacity.
+  intptr_t old_capacity, new_capacity;
+  old_capacity = new_space->Capacity();
+  new_space->Grow();
+  new_capacity = new_space->Capacity();
+  CHECK(2 * old_capacity == new_capacity);
+
+  old_capacity = new_space->Capacity();
+  FillUpNewSpace(new_space);
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
+
+  // Explicitly shrinking should not affect space capacity.
+  old_capacity = new_space->Capacity();
+  new_space->Shrink();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
+
+  // Let the scavenger empty the new space.
+  HEAP->CollectGarbage(NEW_SPACE);
+  CHECK_LE(new_space->Size(), old_capacity);
+
+  // Explicitly shrinking should halve the space capacity.
+  old_capacity = new_space->Capacity();
+  new_space->Shrink();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == 2 * new_capacity);
+
+  // Consecutive shrinking should not affect space capacity.
+  old_capacity = new_space->Capacity();
+  new_space->Shrink();
+  new_space->Shrink();
+  new_space->Shrink();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
+}
+
+
+TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
   InitializeVM();
   v8::HandleScope scope;
-  CompileRun("a = {}; b = {};");
-  v8::Handle<Object> a(ISOLATE->context()->global()->GetProperty(
-      *FACTORY->LookupAsciiSymbol("a"))->ToObjectChecked());
-  v8::Handle<Object> b(ISOLATE->context()->global()->GetProperty(
-      *FACTORY->LookupAsciiSymbol("b"))->ToObjectChecked());
-  CHECK_NE(*a, *b);
-  {
-    HeapIteratorTestHelper helper(*a, *b);
-    helper.IterateHeap(HeapIterator::kFilterUnreachable);
-    CHECK(helper.a_found());
-    CHECK(helper.b_found());
+  NewSpace* new_space = HEAP->new_space();
+  intptr_t old_capacity, new_capacity;
+  old_capacity = new_space->Capacity();
+  new_space->Grow();
+  new_capacity = new_space->Capacity();
+  CHECK(2 * old_capacity == new_capacity);
+  FillUpNewSpace(new_space);
+  HEAP->CollectAllAvailableGarbage();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
+}
+
+
+static int NumberOfGlobalObjects() {
+  int count = 0;
+  HeapIterator iterator;
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+    if (obj->IsGlobalObject()) count++;
   }
-  CHECK(ISOLATE->context()->global()->DeleteProperty(
-      *FACTORY->LookupAsciiSymbol("a"), JSObject::FORCE_DELETION));
-  // We ensure that GC will not happen, so our raw pointer stays valid.
-  AssertNoAllocation no_alloc;
-  Object* a_saved = *a;
-  a.Clear();
-  // Verify that "a" object still resides in the heap...
+  return count;
+}
+
+
+// Test that we don't embed maps from foreign contexts into
+// optimized code.
+TEST(LeakGlobalContextViaMap) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope outer_scope;
+  v8::Persistent<v8::Context> ctx1 = v8::Context::New();
+  v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+  ctx1->Enter();
+
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(4, NumberOfGlobalObjects());
+
   {
-    HeapIteratorTestHelper helper(a_saved, *b);
-    helper.IterateHeap(HeapIterator::kNoFiltering);
-    CHECK(helper.a_found());
-    CHECK(helper.b_found());
+    v8::HandleScope inner_scope;
+    CompileRun("var v = {x: 42}");
+    v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
+    ctx2->Enter();
+    ctx2->Global()->Set(v8_str("o"), v);
+    v8::Local<v8::Value> res = CompileRun(
+        "function f() { return o.x; }"
+        "for (var i = 0; i < 10; ++i) f();"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f();");
+    CHECK_EQ(42, res->Int32Value());
+    ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+    ctx2->Exit();
+    ctx1->Exit();
+    ctx1.Dispose();
   }
-  // ...but is now unreachable.
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(2, NumberOfGlobalObjects());
+  ctx2.Dispose();
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(0, NumberOfGlobalObjects());
+}
+
+
+// Test that we don't embed functions from foreign contexts into
+// optimized code.
+TEST(LeakGlobalContextViaFunction) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope outer_scope;
+  v8::Persistent<v8::Context> ctx1 = v8::Context::New();
+  v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+  ctx1->Enter();
+
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(4, NumberOfGlobalObjects());
+
   {
-    HeapIteratorTestHelper helper(a_saved, *b);
-    helper.IterateHeap(HeapIterator::kFilterUnreachable);
-    CHECK(!helper.a_found());
-    CHECK(helper.b_found());
+    v8::HandleScope inner_scope;
+    CompileRun("var v = function() { return 42; }");
+    v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
+    ctx2->Enter();
+    ctx2->Global()->Set(v8_str("o"), v);
+    v8::Local<v8::Value> res = CompileRun(
+        "function f(x) { return x(); }"
+        "for (var i = 0; i < 10; ++i) f(o);"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f(o);");
+    CHECK_EQ(42, res->Int32Value());
+    ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+    ctx2->Exit();
+    ctx1->Exit();
+    ctx1.Dispose();
   }
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(2, NumberOfGlobalObjects());
+  ctx2.Dispose();
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(0, NumberOfGlobalObjects());
+}
+
+
+TEST(LeakGlobalContextViaMapKeyed) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope outer_scope;
+  v8::Persistent<v8::Context> ctx1 = v8::Context::New();
+  v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+  ctx1->Enter();
+
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(4, NumberOfGlobalObjects());
+
+  {
+    v8::HandleScope inner_scope;
+    CompileRun("var v = [42, 43]");
+    v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
+    ctx2->Enter();
+    ctx2->Global()->Set(v8_str("o"), v);
+    v8::Local<v8::Value> res = CompileRun(
+        "function f() { return o[0]; }"
+        "for (var i = 0; i < 10; ++i) f();"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f();");
+    CHECK_EQ(42, res->Int32Value());
+    ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+    ctx2->Exit();
+    ctx1->Exit();
+    ctx1.Dispose();
+  }
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(2, NumberOfGlobalObjects());
+  ctx2.Dispose();
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(0, NumberOfGlobalObjects());
+}
+
+
+TEST(LeakGlobalContextViaMapProto) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope outer_scope;
+  v8::Persistent<v8::Context> ctx1 = v8::Context::New();
+  v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+  ctx1->Enter();
+
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(4, NumberOfGlobalObjects());
+
+  {
+    v8::HandleScope inner_scope;
+    CompileRun("var v = { y: 42}");
+    v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
+    ctx2->Enter();
+    ctx2->Global()->Set(v8_str("o"), v);
+    v8::Local<v8::Value> res = CompileRun(
+        "function f() {"
+        "  var p = {x: 42};"
+        "  p.__proto__ = o;"
+        "  return p.x;"
+        "}"
+        "for (var i = 0; i < 10; ++i) f();"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f();");
+    CHECK_EQ(42, res->Int32Value());
+    ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+    ctx2->Exit();
+    ctx1->Exit();
+    ctx1.Dispose();
+  }
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(2, NumberOfGlobalObjects());
+  ctx2.Dispose();
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(0, NumberOfGlobalObjects());
+}
+
+
+TEST(InstanceOfStubWriteBarrier) {
+  i::FLAG_allow_natives_syntax = true;
+#ifdef DEBUG
+  i::FLAG_verify_heap = true;
+#endif
+  InitializeVM();
+  if (!i::V8::UseCrankshaft()) return;
+  v8::HandleScope outer_scope;
+
+  {
+    v8::HandleScope scope;
+    CompileRun(
+        "function foo () { }"
+        "function mkbar () { return new (new Function(\"\")) (); }"
+        "function f (x) { return (x instanceof foo); }"
+        "function g () { f(mkbar()); }"
+        "f(new foo()); f(new foo());"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f(new foo()); g();");
+  }
+
+  IncrementalMarking* marking = HEAP->incremental_marking();
+  marking->Abort();
+  marking->Start();
+
+  Handle<JSFunction> f =
+      v8::Utils::OpenHandle(
+          *v8::Handle<v8::Function>::Cast(
+              v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+
+  CHECK(f->IsOptimized());
+
+  while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) &&
+         !marking->IsStopped()) {
+    marking->Step(MB);
+  }
+
+  CHECK(marking->IsMarking());
+
+  // Discard any pending GC requests otherwise we will get GC when we enter
+  // code below.
+  if (ISOLATE->stack_guard()->IsGCRequest()) {
+    ISOLATE->stack_guard()->Continue(GC_REQUEST);
+  }
+
+  {
+    v8::HandleScope scope;
+    v8::Handle<v8::Object> global = v8::Context::GetCurrent()->Global();
+    v8::Handle<v8::Function> g =
+        v8::Handle<v8::Function>::Cast(global->Get(v8_str("g")));
+    g->Call(global, 0, NULL);
+  }
+
+  HEAP->incremental_marking()->set_should_hurry(true);
+  HEAP->CollectGarbage(OLD_POINTER_SPACE);
+}
+
+
+TEST(PrototypeTransitionClearing) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  CompileRun(
+      "var base = {};"
+      "var live = [];"
+      "for (var i = 0; i < 10; i++) {"
+      "  var object = {};"
+      "  var prototype = {};"
+      "  object.__proto__ = prototype;"
+      "  if (i >= 3) live.push(object, prototype);"
+      "}");
+
+  Handle<JSObject> baseObject =
+      v8::Utils::OpenHandle(
+          *v8::Handle<v8::Object>::Cast(
+              v8::Context::GetCurrent()->Global()->Get(v8_str("base"))));
+
+  // Verify that only dead prototype transitions are cleared.
+  CHECK_EQ(10, baseObject->map()->NumberOfProtoTransitions());
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  CHECK_EQ(10 - 3, baseObject->map()->NumberOfProtoTransitions());
+
+  // Verify that prototype transitions array was compacted.
+  FixedArray* trans = baseObject->map()->prototype_transitions();
+  for (int i = 0; i < 10 - 3; i++) {
+    int j = Map::kProtoTransitionHeaderSize +
+        i * Map::kProtoTransitionElementsPerEntry;
+    CHECK(trans->get(j + Map::kProtoTransitionMapOffset)->IsMap());
+    CHECK(trans->get(j + Map::kProtoTransitionPrototypeOffset)->IsJSObject());
+  }
+
+  // Make sure next prototype is placed on an old-space evacuation candidate.
+  Handle<JSObject> prototype;
+  PagedSpace* space = HEAP->old_pointer_space();
+  do {
+    prototype = FACTORY->NewJSArray(32 * KB, FAST_ELEMENTS, TENURED);
+  } while (space->FirstPage() == space->LastPage() ||
+      !space->LastPage()->Contains(prototype->address()));
+
+  // Add a prototype on an evacuation candidate and verify that transition
+  // clearing correctly records slots in prototype transition array.
+  i::FLAG_always_compact = true;
+  Handle<Map> map(baseObject->map());
+  CHECK(!space->LastPage()->Contains(map->prototype_transitions()->address()));
+  CHECK(space->LastPage()->Contains(prototype->address()));
+  baseObject->SetPrototype(*prototype, false)->ToObjectChecked();
+  CHECK(map->GetPrototypeTransition(*prototype)->IsMap());
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  CHECK(map->GetPrototypeTransition(*prototype)->IsMap());
 }
diff --git a/test/cctest/test-lockers.cc b/test/cctest/test-lockers.cc
index 7360da5..5035f87 100644
--- a/test/cctest/test-lockers.cc
+++ b/test/cctest/test-lockers.cc
@@ -204,7 +204,11 @@
 
 // Run many threads all locking on the same isolate
 TEST(IsolateLockingStress) {
+#ifdef V8_TARGET_ARCH_MIPS
+  const int kNThreads = 50;
+#else
   const int kNThreads = 100;
+#endif
   i::List<JoinableThread*> threads(kNThreads);
   v8::Isolate* isolate = v8::Isolate::New();
   for (int i = 0; i < kNThreads; i++) {
@@ -237,7 +241,7 @@
 
 // Run many threads each accessing its own isolate without locking
 TEST(MultithreadedParallelIsolates) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
   const int kNThreads = 10;
 #else
   const int kNThreads = 50;
@@ -275,7 +279,11 @@
 
 // Run  many threads with nested locks
 TEST(IsolateNestedLocking) {
+#ifdef V8_TARGET_ARCH_MIPS
+  const int kNThreads = 50;
+#else
   const int kNThreads = 100;
+#endif
   v8::Isolate* isolate = v8::Isolate::New();
   i::List<JoinableThread*> threads(kNThreads);
   for (int i = 0; i < kNThreads; i++) {
@@ -311,7 +319,7 @@
 
 // Run parallel threads that lock and access different isolates in parallel
 TEST(SeparateIsolatesLocksNonexclusive) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
   const int kNThreads = 50;
 #else
   const int kNThreads = 100;
@@ -385,7 +393,7 @@
 
 // Use unlocker inside of a Locker, multiple threads.
 TEST(LockerUnlocker) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
   const int kNThreads = 50;
 #else
   const int kNThreads = 100;
@@ -438,7 +446,7 @@
 
 // Use Unlocker inside two Lockers.
 TEST(LockTwiceAndUnlock) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
   const int kNThreads = 50;
 #else
   const int kNThreads = 100;
@@ -559,7 +567,11 @@
 
 // Locker inside an Unlocker inside a Locker.
 TEST(LockUnlockLockMultithreaded) {
+#ifdef V8_TARGET_ARCH_MIPS
+  const int kNThreads = 50;
+#else
   const int kNThreads = 100;
+#endif
   v8::Isolate* isolate = v8::Isolate::New();
   Persistent<v8::Context> context;
   {
@@ -606,7 +618,11 @@
 
 // Locker inside an Unlocker inside a Locker for default isolate.
 TEST(LockUnlockLockDefaultIsolateMultithreaded) {
+#ifdef V8_TARGET_ARCH_MIPS
+  const int kNThreads = 50;
+#else
   const int kNThreads = 100;
+#endif
   Persistent<v8::Context> context;
   {
     v8::Locker locker_;
@@ -639,3 +655,68 @@
     isolate->Dispose();
   }
 }
+
+
+static const char* kSimpleExtensionSource =
+  "(function Foo() {"
+  "  return 4;"
+  "})() ";
+
+class IsolateGenesisThread : public JoinableThread {
+ public:
+  IsolateGenesisThread(int count, const char* extension_names[])
+    : JoinableThread("IsolateGenesisThread"),
+      count_(count),
+      extension_names_(extension_names)
+  {}
+
+  virtual void Run() {
+    v8::Isolate* isolate = v8::Isolate::New();
+    {
+      v8::Isolate::Scope isolate_scope(isolate);
+      CHECK(!i::Isolate::Current()->has_installed_extensions());
+      v8::ExtensionConfiguration extensions(count_, extension_names_);
+      v8::Persistent<v8::Context> context = v8::Context::New(&extensions);
+      CHECK(i::Isolate::Current()->has_installed_extensions());
+      context.Dispose();
+    }
+    isolate->Dispose();
+  }
+ private:
+  int count_;
+  const char** extension_names_;
+};
+
+// Test installing extensions in separate isolates concurrently.
+// http://code.google.com/p/v8/issues/detail?id=1821
+TEST(ExtensionsRegistration) {
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
+  const int kNThreads = 10;
+#else
+  const int kNThreads = 40;
+#endif
+  v8::RegisterExtension(new v8::Extension("test0",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test1",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test2",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test3",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test4",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test5",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test6",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test7",
+                                          kSimpleExtensionSource));
+  const char* extension_names[] = { "test0", "test1",
+                                    "test2", "test3", "test4",
+                                    "test5", "test6", "test7" };
+  i::List<JoinableThread*> threads(kNThreads);
+  for (int i = 0; i < kNThreads; i++) {
+    threads.Add(new IsolateGenesisThread(8, extension_names));
+  }
+  StartJoinAndDeleteThreads(threads);
+}
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index f536e6b..6847ef7 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -307,6 +307,7 @@
   // Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
   int base = 0;
   CHECK_GT(sample.frames_count, base + 1);
+
   CHECK(IsAddressWithinFuncCode("JSFuncDoTrace", sample.stack[base + 0]));
   CHECK(IsAddressWithinFuncCode("JSTrace", sample.stack[base + 1]));
 }
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 72e663c..6f2324d 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -494,7 +494,7 @@
       "    (function a(j) { return function b() { return j; } })(100);\n"
       "})(this);");
   v8::V8::PauseProfiler();
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
   LOGGER->StringEvent("test-logging-done", "");
 
   // Iterate heap to find compiled functions, will write to log.
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index dcb51a0..973af19 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,6 +27,14 @@
 
 #include <stdlib.h>
 
+#ifdef __linux__
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#endif
+
 #include "v8.h"
 
 #include "global-handles.h"
@@ -44,21 +52,21 @@
 }
 
 
-TEST(MarkingStack) {
+TEST(MarkingDeque) {
   int mem_size = 20 * kPointerSize;
   byte* mem = NewArray<byte>(20*kPointerSize);
   Address low = reinterpret_cast<Address>(mem);
   Address high = low + mem_size;
-  MarkingStack s;
+  MarkingDeque s;
   s.Initialize(low, high);
 
   Address address = NULL;
-  while (!s.is_full()) {
-    s.Push(HeapObject::FromAddress(address));
+  while (!s.IsFull()) {
+    s.PushBlack(HeapObject::FromAddress(address));
     address += kPointerSize;
   }
 
-  while (!s.is_empty()) {
+  while (!s.IsEmpty()) {
     Address value = s.Pop()->address();
     address -= kPointerSize;
     CHECK_EQ(address, value);
@@ -78,7 +86,7 @@
   // from new space.
   FLAG_gc_global = true;
   FLAG_always_compact = true;
-  HEAP->ConfigureHeap(2*256*KB, 4*MB, 4*MB);
+  HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
 
   InitializeVM();
 
@@ -86,7 +94,7 @@
 
   // Allocate a fixed array in the new space.
   int array_size =
-      (HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
+      (Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
       (kPointerSize * 4);
   Object* obj = HEAP->AllocateFixedArray(array_size)->ToObjectChecked();
 
@@ -104,7 +112,7 @@
 
 
 TEST(NoPromotion) {
-  HEAP->ConfigureHeap(2*256*KB, 4*MB, 4*MB);
+  HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
 
   // Test the situation that some objects in new space are promoted to
   // the old space
@@ -116,9 +124,12 @@
   HEAP->CollectGarbage(OLD_POINTER_SPACE);
 
   // Allocate a big Fixed array in the new space.
-  int size = (HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
-      kPointerSize;
-  Object* obj = HEAP->AllocateFixedArray(size)->ToObjectChecked();
+  int max_size =
+      Min(Page::kMaxNonCodeHeapObjectSize, HEAP->MaxObjectSizeInNewSpace());
+
+  int length = (max_size - FixedArray::kHeaderSize) / (2*kPointerSize);
+  Object* obj = i::Isolate::Current()->heap()->AllocateFixedArray(length)->
+      ToObjectChecked();
 
   Handle<FixedArray> array(FixedArray::cast(obj));
 
@@ -139,9 +150,6 @@
 
   // Call mark compact GC, and it should pass.
   HEAP->CollectGarbage(OLD_POINTER_SPACE);
-
-  // array should not be promoted because the old space is full.
-  CHECK(HEAP->InSpace(*array, NEW_SPACE));
 }
 
 
@@ -228,6 +236,8 @@
 }
 
 
+// TODO(1600): compaction of map space is temporary removed from GC.
+#if 0
 static Handle<Map> CreateMap() {
   return FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
 }
@@ -252,11 +262,11 @@
   // be able to trigger map compaction.
   // To give an additional chance to fail, try to force compaction which
   // should be impossible right now.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kForceCompactionMask);
   // And now map pointers should be encodable again.
   CHECK(HEAP->map_space()->MapPointersEncodable());
 }
-
+#endif
 
 static int gc_starts = 0;
 static int gc_ends = 0;
@@ -442,3 +452,100 @@
   global_handles->AddImplicitReferences(
         Handle<HeapObject>::cast(object).location(), NULL, 0);
 }
+
+
+// Here is a memory use test that uses /proc, and is therefore Linux-only.  We
+// do not care how much memory the simulator uses, since it is only there for
+// debugging purposes.
+#if defined(__linux__) && !defined(USE_SIMULATOR)
+
+
+static uintptr_t ReadLong(char* buffer, intptr_t* position, int base) {
+  char* end_address = buffer + *position;
+  uintptr_t result = strtoul(buffer + *position, &end_address, base);
+  CHECK(result != ULONG_MAX || errno != ERANGE);
+  CHECK(end_address > buffer + *position);
+  *position = end_address - buffer;
+  return result;
+}
+
+
+static intptr_t MemoryInUse() {
+  intptr_t memory_use = 0;
+
+  int fd = open("/proc/self/maps", O_RDONLY);
+  if (fd < 0) return -1;
+
+  const int kBufSize = 10000;
+  char buffer[kBufSize];
+  int length = read(fd, buffer, kBufSize);
+  intptr_t line_start = 0;
+  CHECK_LT(length, kBufSize);  // Make the buffer bigger.
+  CHECK_GT(length, 0);  // We have to find some data in the file.
+  while (line_start < length) {
+    if (buffer[line_start] == '\n') {
+      line_start++;
+      continue;
+    }
+    intptr_t position = line_start;
+    uintptr_t start = ReadLong(buffer, &position, 16);
+    CHECK_EQ(buffer[position++], '-');
+    uintptr_t end = ReadLong(buffer, &position, 16);
+    CHECK_EQ(buffer[position++], ' ');
+    CHECK(buffer[position] == '-' || buffer[position] == 'r');
+    bool read_permission = (buffer[position++] == 'r');
+    CHECK(buffer[position] == '-' || buffer[position] == 'w');
+    bool write_permission = (buffer[position++] == 'w');
+    CHECK(buffer[position] == '-' || buffer[position] == 'x');
+    bool execute_permission = (buffer[position++] == 'x');
+    CHECK(buffer[position] == '-' || buffer[position] == 'p');
+    bool private_mapping = (buffer[position++] == 'p');
+    CHECK_EQ(buffer[position++], ' ');
+    uintptr_t offset = ReadLong(buffer, &position, 16);
+    USE(offset);
+    CHECK_EQ(buffer[position++], ' ');
+    uintptr_t major = ReadLong(buffer, &position, 16);
+    USE(major);
+    CHECK_EQ(buffer[position++], ':');
+    uintptr_t minor = ReadLong(buffer, &position, 16);
+    USE(minor);
+    CHECK_EQ(buffer[position++], ' ');
+    uintptr_t inode = ReadLong(buffer, &position, 10);
+    while (position < length && buffer[position] != '\n') position++;
+    if ((read_permission || write_permission || execute_permission) &&
+        private_mapping && inode == 0) {
+      memory_use += (end - start);
+    }
+
+    line_start = position;
+  }
+  close(fd);
+  return memory_use;
+}
+
+
+TEST(BootUpMemoryUse) {
+  intptr_t initial_memory = MemoryInUse();
+  FLAG_crankshaft = false;  // Avoid flakiness.
+  // Only Linux has the proc filesystem and only if it is mapped.  If it's not
+  // there we just skip the test.
+  if (initial_memory >= 0) {
+    InitializeVM();
+    intptr_t booted_memory = MemoryInUse();
+    if (sizeof(initial_memory) == 8) {
+      if (v8::internal::Snapshot::IsEnabled()) {
+        CHECK_LE(booted_memory - initial_memory, 6686 * 1024);  // 6476.
+      } else {
+        CHECK_LE(booted_memory - initial_memory, 6809 * 1024);  // 6628.
+      }
+    } else {
+      if (v8::internal::Snapshot::IsEnabled()) {
+        CHECK_LE(booted_memory - initial_memory, 6532 * 1024);  // 6388.
+      } else {
+        CHECK_LE(booted_memory - initial_memory, 6940 * 1024);  // 6456
+      }
+    }
+  }
+}
+
+#endif  // __linux__ and !USE_SIMULATOR
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index d98b675..6bcae7c 100755
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,6 +32,7 @@
 #include "v8.h"
 
 #include "cctest.h"
+#include "compiler.h"
 #include "execution.h"
 #include "isolate.h"
 #include "parser.h"
@@ -62,18 +63,19 @@
     int length = i::StrLength(key_token.keyword);
     CHECK(static_cast<int>(sizeof(buffer)) >= length);
     {
-      i::Utf8ToUC16CharacterStream stream(keyword, length);
-      i::JavaScriptScanner scanner(&unicode_cache);
-      // The scanner should parse 'let' as Token::LET for this test.
-      scanner.SetHarmonyBlockScoping(true);
+      i::Utf8ToUtf16CharacterStream stream(keyword, length);
+      i::Scanner scanner(&unicode_cache);
+      // The scanner should parse Harmony keywords for this test.
+      scanner.SetHarmonyScoping(true);
+      scanner.SetHarmonyModules(true);
       scanner.Initialize(&stream);
       CHECK_EQ(key_token.token, scanner.Next());
       CHECK_EQ(i::Token::EOS, scanner.Next());
     }
     // Removing characters will make keyword matching fail.
     {
-      i::Utf8ToUC16CharacterStream stream(keyword, length - 1);
-      i::JavaScriptScanner scanner(&unicode_cache);
+      i::Utf8ToUtf16CharacterStream stream(keyword, length - 1);
+      i::Scanner scanner(&unicode_cache);
       scanner.Initialize(&stream);
       CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
       CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -83,8 +85,8 @@
     for (int j = 0; j < static_cast<int>(ARRAY_SIZE(chars_to_append)); ++j) {
       memmove(buffer, keyword, length);
       buffer[length] = chars_to_append[j];
-      i::Utf8ToUC16CharacterStream stream(buffer, length + 1);
-      i::JavaScriptScanner scanner(&unicode_cache);
+      i::Utf8ToUtf16CharacterStream stream(buffer, length + 1);
+      i::Scanner scanner(&unicode_cache);
       scanner.Initialize(&stream);
       CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
       CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -93,8 +95,8 @@
     {
       memmove(buffer, keyword, length);
       buffer[length - 1] = '_';
-      i::Utf8ToUC16CharacterStream stream(buffer, length);
-      i::JavaScriptScanner scanner(&unicode_cache);
+      i::Utf8ToUtf16CharacterStream stream(buffer, length);
+      i::Scanner scanner(&unicode_cache);
       scanner.Initialize(&stream);
       CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
       CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -229,7 +231,7 @@
   CHECK_EQ(11, error_location.end_pos);
   // Should not crash.
   const char* message = pre_impl->BuildMessage();
-  i::Vector<const char*> args = pre_impl->BuildArgs();
+  pre_impl->BuildArgs();
   CHECK_GT(strlen(message), 0);
 }
 
@@ -253,17 +255,18 @@
   uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
   for (int i = 0; programs[i]; i++) {
     const char* program = programs[i];
-    i::Utf8ToUC16CharacterStream stream(
+    i::Utf8ToUtf16CharacterStream stream(
         reinterpret_cast<const i::byte*>(program),
         static_cast<unsigned>(strlen(program)));
     i::CompleteParserRecorder log;
-    i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+    i::Scanner scanner(i::Isolate::Current()->unicode_cache());
     scanner.Initialize(&stream);
 
+    int flags = i::kAllowLazy | i::kAllowNativesSyntax;
     v8::preparser::PreParser::PreParseResult result =
         v8::preparser::PreParser::PreParseProgram(&scanner,
                                                   &log,
-                                                  true,
+                                                  flags,
                                                   stack_limit);
     CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
     i::ScriptDataImpl data(log.ExtractData());
@@ -272,6 +275,43 @@
 }
 
 
+TEST(StandAlonePreParserNoNatives) {
+  v8::V8::Initialize();
+
+  int marker;
+  i::Isolate::Current()->stack_guard()->SetStackLimit(
+      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+  const char* programs[] = {
+      "%ArgleBargle(glop);",
+      "var x = %_IsSmi(42);",
+      NULL
+  };
+
+  uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
+  for (int i = 0; programs[i]; i++) {
+    const char* program = programs[i];
+    i::Utf8ToUtf16CharacterStream stream(
+        reinterpret_cast<const i::byte*>(program),
+        static_cast<unsigned>(strlen(program)));
+    i::CompleteParserRecorder log;
+    i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+    scanner.Initialize(&stream);
+
+    // Flags don't allow natives syntax.
+    v8::preparser::PreParser::PreParseResult result =
+        v8::preparser::PreParser::PreParseProgram(&scanner,
+                                                  &log,
+                                                  i::kAllowLazy,
+                                                  stack_limit);
+    CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
+    i::ScriptDataImpl data(log.ExtractData());
+    // Data contains syntax error.
+    CHECK(data.has_error());
+  }
+}
+
+
 TEST(RegressChromium62639) {
   v8::V8::Initialize();
 
@@ -286,8 +326,9 @@
   // and then used the invalid currently scanned literal. This always
   // failed in debug mode, and sometimes crashed in release mode.
 
-  i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(program),
-                                      static_cast<unsigned>(strlen(program)));
+  i::Utf8ToUtf16CharacterStream stream(
+      reinterpret_cast<const i::byte*>(program),
+      static_cast<unsigned>(strlen(program)));
   i::ScriptDataImpl* data =
       i::ParserApi::PreParse(&stream, NULL, false);
   CHECK(data->HasError());
@@ -310,17 +351,17 @@
       "try { } catch (e) { var foo = function () { /* first */ } }"
       "var bar = function () { /* second */ }";
 
-  i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(program),
-                                      static_cast<unsigned>(strlen(program)));
-  i::ScriptDataImpl* data =
-      i::ParserApi::PartialPreParse(&stream, NULL, false);
+  v8::HandleScope handles;
+  i::Handle<i::String> source(
+      FACTORY->NewStringFromAscii(i::CStrVector(program)));
+  i::ScriptDataImpl* data = i::ParserApi::PartialPreParse(source, NULL, false);
   CHECK(!data->HasError());
 
   data->Initialize();
 
   int first_function =
       static_cast<int>(strstr(program, "function") - program);
-  int first_lbrace = first_function + static_cast<int>(strlen("function () "));
+  int first_lbrace = first_function + i::StrLength("function () ");
   CHECK_EQ('{', program[first_lbrace]);
   i::FunctionEntry entry1 = data->GetFunctionEntry(first_lbrace);
   CHECK(!entry1.is_valid());
@@ -328,7 +369,7 @@
   int second_function =
       static_cast<int>(strstr(program + first_lbrace, "function") - program);
   int second_lbrace =
-      second_function + static_cast<int>(strlen("function () "));
+      second_function + i::StrLength("function () ");
   CHECK_EQ('{', program[second_lbrace]);
   i::FunctionEntry entry2 = data->GetFunctionEntry(second_lbrace);
   CHECK(entry2.is_valid());
@@ -352,11 +393,11 @@
 
   uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
 
-  i::Utf8ToUC16CharacterStream stream(
+  i::Utf8ToUtf16CharacterStream stream(
       reinterpret_cast<const i::byte*>(*program),
       static_cast<unsigned>(kProgramSize));
   i::CompleteParserRecorder log;
-  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::Scanner scanner(i::Isolate::Current()->unicode_cache());
   scanner.Initialize(&stream);
 
 
@@ -409,10 +450,10 @@
   i::Handle<i::String> uc16_string(
       FACTORY->NewExternalStringFromTwoByte(&resource));
 
-  i::ExternalTwoByteStringUC16CharacterStream uc16_stream(
+  i::ExternalTwoByteStringUtf16CharacterStream uc16_stream(
       i::Handle<i::ExternalTwoByteString>::cast(uc16_string), start, end);
-  i::GenericStringUC16CharacterStream string_stream(ascii_string, start, end);
-  i::Utf8ToUC16CharacterStream utf8_stream(
+  i::GenericStringUtf16CharacterStream string_stream(ascii_string, start, end);
+  i::Utf8ToUtf16CharacterStream utf8_stream(
       reinterpret_cast<const i::byte*>(ascii_source), end);
   utf8_stream.SeekForward(start);
 
@@ -535,12 +576,14 @@
   char buffer[kAllUtf8CharsSizeU];
   unsigned cursor = 0;
   for (int i = 0; i <= kMaxUC16Char; i++) {
-    cursor += unibrow::Utf8::Encode(buffer + cursor, i);
+    cursor += unibrow::Utf8::Encode(buffer + cursor,
+                                    i,
+                                    unibrow::Utf16::kNoPreviousCharacter);
   }
   ASSERT(cursor == kAllUtf8CharsSizeU);
 
-  i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(buffer),
-                                      kAllUtf8CharsSizeU);
+  i::Utf8ToUtf16CharacterStream stream(reinterpret_cast<const i::byte*>(buffer),
+                                       kAllUtf8CharsSizeU);
   for (int i = 0; i <= kMaxUC16Char; i++) {
     CHECK_EQU(i, stream.pos());
     int32_t c = stream.Advance();
@@ -570,11 +613,11 @@
 
 #undef CHECK_EQU
 
-void TestStreamScanner(i::UC16CharacterStream* stream,
+void TestStreamScanner(i::Utf16CharacterStream* stream,
                        i::Token::Value* expected_tokens,
                        int skip_pos = 0,  // Zero means not skipping.
                        int skip_to = 0) {
-  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::Scanner scanner(i::Isolate::Current()->unicode_cache());
   scanner.Initialize(stream);
 
   int i = 0;
@@ -593,8 +636,8 @@
   v8::V8::Initialize();
 
   const char* str1 = "{ foo get for : */ <- \n\n /*foo*/ bib";
-  i::Utf8ToUC16CharacterStream stream1(reinterpret_cast<const i::byte*>(str1),
-                                       static_cast<unsigned>(strlen(str1)));
+  i::Utf8ToUtf16CharacterStream stream1(reinterpret_cast<const i::byte*>(str1),
+                                        static_cast<unsigned>(strlen(str1)));
   i::Token::Value expectations1[] = {
       i::Token::LBRACE,
       i::Token::IDENTIFIER,
@@ -612,8 +655,8 @@
   TestStreamScanner(&stream1, expectations1, 0, 0);
 
   const char* str2 = "case default const {THIS\nPART\nSKIPPED} do";
-  i::Utf8ToUC16CharacterStream stream2(reinterpret_cast<const i::byte*>(str2),
-                                       static_cast<unsigned>(strlen(str2)));
+  i::Utf8ToUtf16CharacterStream stream2(reinterpret_cast<const i::byte*>(str2),
+                                        static_cast<unsigned>(strlen(str2)));
   i::Token::Value expectations2[] = {
       i::Token::CASE,
       i::Token::DEFAULT,
@@ -643,7 +686,7 @@
   for (int i = 0; i <= 4; i++) {
      expectations3[6 - i] = i::Token::ILLEGAL;
      expectations3[5 - i] = i::Token::EOS;
-     i::Utf8ToUC16CharacterStream stream3(
+     i::Utf8ToUtf16CharacterStream stream3(
          reinterpret_cast<const i::byte*>(str3),
          static_cast<unsigned>(strlen(str3)));
      TestStreamScanner(&stream3, expectations3, 1, 1 + i);
@@ -652,10 +695,10 @@
 
 
 void TestScanRegExp(const char* re_source, const char* expected) {
-  i::Utf8ToUC16CharacterStream stream(
+  i::Utf8ToUtf16CharacterStream stream(
        reinterpret_cast<const i::byte*>(re_source),
        static_cast<unsigned>(strlen(re_source)));
-  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::Scanner scanner(i::Isolate::Current()->unicode_cache());
   scanner.Initialize(&stream);
 
   i::Token::Value start = scanner.peek();
@@ -708,25 +751,320 @@
 }
 
 
-void TestParserSync(i::Handle<i::String> source, bool allow_lazy) {
+static int Utf8LengthHelper(const char* s) {
+  int len = i::StrLength(s);
+  int character_length = len;
+  for (int i = 0; i < len; i++) {
+    unsigned char c = s[i];
+    int input_offset = 0;
+    int output_adjust = 0;
+    if (c > 0x7f) {
+      if (c < 0xc0) continue;
+      if (c >= 0xf0) {
+        if (c >= 0xf8) {
+          // 5 and 6 byte UTF-8 sequences turn into a kBadChar for each UTF-8
+          // byte.
+          continue;  // Handle first UTF-8 byte.
+        }
+        if ((c & 7) == 0 && ((s[i + 1] & 0x30) == 0)) {
+          // This 4 byte sequence could have been coded as a 3 byte sequence.
+          // Record a single kBadChar for the first byte and continue.
+          continue;
+        }
+        input_offset = 3;
+        // 4 bytes of UTF-8 turn into 2 UTF-16 code units.
+        character_length -= 2;
+      } else if (c >= 0xe0) {
+        if ((c & 0xf) == 0 && ((s[i + 1] & 0x20) == 0)) {
+          // This 3 byte sequence could have been coded as a 2 byte sequence.
+          // Record a single kBadChar for the first byte and continue.
+          continue;
+        }
+        input_offset = 2;
+        // 3 bytes of UTF-8 turn into 1 UTF-16 code unit.
+        output_adjust = 2;
+      } else {
+        if ((c & 0x1e) == 0) {
+          // This 2 byte sequence could have been coded as a 1 byte sequence.
+          // Record a single kBadChar for the first byte and continue.
+          continue;
+        }
+        input_offset = 1;
+        // 2 bytes of UTF-8 turn into 1 UTF-16 code unit.
+        output_adjust = 1;
+      }
+      bool bad = false;
+      for (int j = 1; j <= input_offset; j++) {
+        if ((s[i + j] & 0xc0) != 0x80) {
+          // Bad UTF-8 sequence turns the first in the sequence into kBadChar,
+          // which is a single UTF-16 code unit.
+          bad = true;
+          break;
+        }
+      }
+      if (!bad) {
+        i += input_offset;
+        character_length -= output_adjust;
+      }
+    }
+  }
+  return character_length;
+}
+
+
+TEST(ScopePositions) {
+  // Test the parser for correctly setting the start and end positions
+  // of a scope. We check the scope positions of exactly one scope
+  // nested in the global scope of a program. 'inner source' is the
+  // source code that determines the part of the source belonging
+  // to the nested scope. 'outer_prefix' and 'outer_suffix' are
+  // parts of the source that belong to the global scope.
+  struct SourceData {
+    const char* outer_prefix;
+    const char* inner_source;
+    const char* outer_suffix;
+    i::ScopeType scope_type;
+    i::LanguageMode language_mode;
+  };
+
+  const SourceData source_data[] = {
+    { "  with ({}) ", "{ block; }", " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  with ({}) ", "{ block; }", "; more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  with ({}) ", "{\n"
+      "    block;\n"
+      "  }", "\n"
+      "  more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  with ({}) ", "statement;", " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  with ({}) ", "statement", "\n"
+      "  more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  with ({})\n"
+      "    ", "statement;", "\n"
+      "  more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  try {} catch ", "(e) { block; }", " more;",
+      i::CATCH_SCOPE, i::CLASSIC_MODE },
+    { "  try {} catch ", "(e) { block; }", "; more;",
+      i::CATCH_SCOPE, i::CLASSIC_MODE },
+    { "  try {} catch ", "(e) {\n"
+      "    block;\n"
+      "  }", "\n"
+      "  more;", i::CATCH_SCOPE, i::CLASSIC_MODE },
+    { "  try {} catch ", "(e) { block; }", " finally { block; } more;",
+      i::CATCH_SCOPE, i::CLASSIC_MODE },
+    { "  start;\n"
+      "  ", "{ let block; }", " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  start;\n"
+      "  ", "{ let block; }", "; more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  start;\n"
+      "  ", "{\n"
+      "    let block;\n"
+      "  }", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  start;\n"
+      "  function fun", "(a,b) { infunction; }", " more;",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    { "  start;\n"
+      "  function fun", "(a,b) {\n"
+      "    infunction;\n"
+      "  }", "\n"
+      "  more;", i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    { "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x) { block; }", " more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x) { block; }", "; more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x) {\n"
+      "    block;\n"
+      "  }", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x) statement;", " more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x) statement", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x)\n"
+      "    statement;", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {}) { block; }", " more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {}) { block; }", "; more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {}) {\n"
+      "    block;\n"
+      "  }", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {}) statement;", " more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {}) statement", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {})\n"
+      "    statement;", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    // Check that 6-byte and 4-byte encodings of UTF-8 strings do not throw
+    // the preparser off in terms of byte offsets.
+    // 6 byte encoding.
+    { "  'foo\355\240\201\355\260\211';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // 4 byte encoding.
+    { "  'foo\360\220\220\212';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // 3 byte encoding of \u0fff.
+    { "  'foo\340\277\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Broken 6 byte encoding with missing last byte.
+    { "  'foo\355\240\201\355\211';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Broken 3 byte encoding of \u0fff with missing last byte.
+    { "  'foo\340\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Broken 3 byte encoding of \u0fff with missing 2 last bytes.
+    { "  'foo\340';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Broken 3 byte encoding of \u00ff should be a 2 byte encoding.
+    { "  'foo\340\203\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Broken 3 byte encoding of \u007f should be a 2 byte encoding.
+    { "  'foo\340\201\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Unpaired lead surrogate.
+    { "  'foo\355\240\201';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Unpaired lead surrogate where following code point is a 3 byte sequence.
+    { "  'foo\355\240\201\340\277\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Unpaired lead surrogate where following code point is a 4 byte encoding
+    // of a trail surrogate.
+    { "  'foo\355\240\201\360\215\260\211';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Unpaired trail surrogate.
+    { "  'foo\355\260\211';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // 2 byte encoding of \u00ff.
+    { "  'foo\303\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Broken 2 byte encoding of \u00ff with missing last byte.
+    { "  'foo\303';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Broken 2 byte encoding of \u007f should be a 1 byte encoding.
+    { "  'foo\301\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Illegal 5 byte encoding.
+    { "  'foo\370\277\277\277\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Illegal 6 byte encoding.
+    { "  'foo\374\277\277\277\277\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Illegal 0xfe byte
+    { "  'foo\376\277\277\277\277\277\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    // Illegal 0xff byte
+    { "  'foo\377\277\277\277\277\277\277\277';\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    { "  'foo';\n"
+      "  (function fun", "(a,b) { 'bar\355\240\201\355\260\213'; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    { "  'foo';\n"
+      "  (function fun", "(a,b) { 'bar\360\220\220\214'; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    { NULL, NULL, NULL, i::EVAL_SCOPE, i::CLASSIC_MODE }
+  };
+
+  v8::HandleScope handles;
+  v8::Persistent<v8::Context> context = v8::Context::New();
+  v8::Context::Scope context_scope(context);
+
+  int marker;
+  i::Isolate::Current()->stack_guard()->SetStackLimit(
+      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+  i::FLAG_harmony_scoping = true;
+
+  for (int i = 0; source_data[i].outer_prefix; i++) {
+    int kPrefixLen = Utf8LengthHelper(source_data[i].outer_prefix);
+    int kInnerLen = Utf8LengthHelper(source_data[i].inner_source);
+    int kSuffixLen = Utf8LengthHelper(source_data[i].outer_suffix);
+    int kPrefixByteLen = i::StrLength(source_data[i].outer_prefix);
+    int kInnerByteLen = i::StrLength(source_data[i].inner_source);
+    int kSuffixByteLen = i::StrLength(source_data[i].outer_suffix);
+    int kProgramSize = kPrefixLen + kInnerLen + kSuffixLen;
+    int kProgramByteSize = kPrefixByteLen + kInnerByteLen + kSuffixByteLen;
+    i::Vector<char> program = i::Vector<char>::New(kProgramByteSize + 1);
+    i::OS::SNPrintF(program, "%s%s%s",
+                             source_data[i].outer_prefix,
+                             source_data[i].inner_source,
+                             source_data[i].outer_suffix);
+
+    // Parse program source.
+    i::Handle<i::String> source(
+        FACTORY->NewStringFromUtf8(i::CStrVector(program.start())));
+    CHECK_EQ(source->length(), kProgramSize);
+    i::Handle<i::Script> script = FACTORY->NewScript(source);
+    i::Parser parser(script, i::kAllowLazy | i::EXTENDED_MODE, NULL, NULL);
+    i::CompilationInfo info(script);
+    info.MarkAsGlobal();
+    info.SetLanguageMode(source_data[i].language_mode);
+    i::FunctionLiteral* function = parser.ParseProgram(&info);
+    CHECK(function != NULL);
+
+    // Check scope types and positions.
+    i::Scope* scope = function->scope();
+    CHECK(scope->is_global_scope());
+    CHECK_EQ(scope->start_position(), 0);
+    CHECK_EQ(scope->end_position(), kProgramSize);
+    CHECK_EQ(scope->inner_scopes()->length(), 1);
+
+    i::Scope* inner_scope = scope->inner_scopes()->at(0);
+    CHECK_EQ(inner_scope->type(), source_data[i].scope_type);
+    CHECK_EQ(inner_scope->start_position(), kPrefixLen);
+    // The end position of a token is one position after the last
+    // character belonging to that token.
+    CHECK_EQ(inner_scope->end_position(), kPrefixLen + kInnerLen);
+  }
+}
+
+
+void TestParserSync(i::Handle<i::String> source, int flags) {
   uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
+  bool harmony_scoping = ((i::kLanguageModeMask & flags) == i::EXTENDED_MODE);
 
   // Preparse the data.
   i::CompleteParserRecorder log;
-  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
-  i::GenericStringUC16CharacterStream stream(source, 0, source->length());
+  i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+  i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
+  scanner.SetHarmonyScoping(harmony_scoping);
   scanner.Initialize(&stream);
   v8::preparser::PreParser::PreParseResult result =
       v8::preparser::PreParser::PreParseProgram(
-          &scanner, &log, allow_lazy, stack_limit);
+          &scanner, &log, flags, stack_limit);
   CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
   i::ScriptDataImpl data(log.ExtractData());
 
   // Parse the data
   i::Handle<i::Script> script = FACTORY->NewScript(source);
-  i::Parser parser(script, false, NULL, NULL);
-  i::FunctionLiteral* function =
-      parser.ParseProgram(source, true, i::kNonStrictMode);
+  bool save_harmony_scoping = i::FLAG_harmony_scoping;
+  i::FLAG_harmony_scoping = harmony_scoping;
+  i::Parser parser(script, flags, NULL, NULL);
+  i::CompilationInfo info(script);
+  info.MarkAsGlobal();
+  i::FunctionLiteral* function = parser.ParseProgram(&info);
+  i::FLAG_harmony_scoping = save_harmony_scoping;
 
   i::String* type_string = NULL;
   if (function == NULL) {
@@ -779,6 +1117,23 @@
 }
 
 
+void TestParserSyncWithFlags(i::Handle<i::String> source) {
+  static const int kFlagsCount = 6;
+  const int flags[kFlagsCount] = {
+    i::kNoParsingFlags | i::CLASSIC_MODE,
+    i::kNoParsingFlags | i::STRICT_MODE,
+    i::kNoParsingFlags | i::EXTENDED_MODE,
+    i::kAllowLazy | i::CLASSIC_MODE,
+    i::kAllowLazy | i::STRICT_MODE,
+    i::kAllowLazy | i::EXTENDED_MODE
+  };
+
+  for (int k = 0; k < kFlagsCount; ++k) {
+    TestParserSync(source, flags[k]);
+  }
+}
+
+
 TEST(ParserSync) {
   const char* context_data[][2] = {
     { "", "" },
@@ -876,8 +1231,7 @@
         CHECK(length == kProgramSize);
         i::Handle<i::String> source =
             FACTORY->NewStringFromAscii(i::CStrVector(program.start()));
-        TestParserSync(source, true);
-        TestParserSync(source, false);
+        TestParserSyncWithFlags(source);
       }
     }
   }
diff --git a/test/cctest/test-platform-linux.cc b/test/cctest/test-platform-linux.cc
index 756b947..2a8d497 100644
--- a/test/cctest/test-platform-linux.cc
+++ b/test/cctest/test-platform-linux.cc
@@ -67,7 +67,7 @@
 
 
 TEST(VirtualMemory) {
-  OS::Setup();
+  OS::SetUp();
   VirtualMemory* vm = new VirtualMemory(1 * MB);
   CHECK(vm->IsReserved());
   void* block_addr = vm->address();
diff --git a/test/cctest/test-platform-win32.cc b/test/cctest/test-platform-win32.cc
index 9bd0014..36b30aa 100644
--- a/test/cctest/test-platform-win32.cc
+++ b/test/cctest/test-platform-win32.cc
@@ -13,7 +13,7 @@
 
 
 TEST(VirtualMemory) {
-  OS::Setup();
+  OS::SetUp();
   VirtualMemory* vm = new VirtualMemory(1 * MB);
   CHECK(vm->IsReserved());
   void* block_addr = vm->address();
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index 76fd244..def829c 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -52,7 +52,7 @@
     CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
   }
   CHECK(!i::TokenEnumeratorTester::token_removed(&te)->at(2));
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK(i::TokenEnumeratorTester::token_removed(&te)->at(2));
   CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
   CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
diff --git a/test/cctest/test-random.cc b/test/cctest/test-random.cc
new file mode 100644
index 0000000..a1f4931
--- /dev/null
+++ b/test/cctest/test-random.cc
@@ -0,0 +1,109 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "compiler.h"
+#include "execution.h"
+#include "isolate.h"
+
+
+using namespace v8::internal;
+
+static v8::Persistent<v8::Context> env;
+
+
+void SetSeeds(Handle<ByteArray> seeds, uint32_t state0, uint32_t state1) {
+  for (int i = 0; i < 4; i++) {
+    seeds->set(i, static_cast<byte>(state0 >> (i * kBitsPerByte)));
+    seeds->set(i + 4, static_cast<byte>(state1 >> (i * kBitsPerByte)));
+  }
+}
+
+
+void TestSeeds(Handle<JSFunction> fun,
+               Handle<Context> context,
+               uint32_t state0,
+               uint32_t state1) {
+  bool has_pending_exception;
+  Handle<JSObject> global(context->global());
+  Handle<ByteArray> seeds(context->random_seed());
+
+  SetSeeds(seeds, state0, state1);
+  Handle<Object> value =
+      Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  CHECK(value->IsHeapNumber());
+  CHECK(fun->IsOptimized());
+  double crankshaft_value = HeapNumber::cast(*value)->value();
+
+  SetSeeds(seeds, state0, state1);
+  V8::FillHeapNumberWithRandom(*value, *context);
+  double runtime_value = HeapNumber::cast(*value)->value();
+  CHECK_EQ(runtime_value, crankshaft_value);
+}
+
+
+TEST(CrankshaftRandom) {
+  if (env.IsEmpty()) env = v8::Context::New();
+  // Skip test if crankshaft is disabled.
+  if (!V8::UseCrankshaft()) return;
+  v8::HandleScope scope;
+  env->Enter();
+
+  Handle<Context> context(Isolate::Current()->context());
+  Handle<JSObject> global(context->global());
+  Handle<ByteArray> seeds(context->random_seed());
+  bool has_pending_exception;
+
+  CompileRun("function f() { return Math.random(); }");
+
+  Object* symbol = FACTORY->LookupAsciiSymbol("f")->ToObjectChecked();
+  MaybeObject* fun_object =
+      context->global()->GetProperty(String::cast(symbol));
+  Handle<JSFunction> fun(JSFunction::cast(fun_object->ToObjectChecked()));
+
+  // Optimize function.
+  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  if (!fun->IsOptimized()) fun->MarkForLazyRecompilation();
+
+  // Test with some random values.
+  TestSeeds(fun, context, 0xC0C0AFFE, 0x31415926);
+  TestSeeds(fun, context, 0x01020304, 0xFFFFFFFF);
+  TestSeeds(fun, context, 0x00000001, 0x00000000);
+
+  // Test that we bail out to runtime when seeds are uninitialized (zeros).
+  SetSeeds(seeds, 0, 0);
+  Handle<Object> value =
+      Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  CHECK(value->IsHeapNumber());
+  CHECK(fun->IsOptimized());
+  double crankshaft_value = HeapNumber::cast(*value)->value();
+  CHECK_NE(0.0, crankshaft_value);
+}
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index 89a9112..d941d0f 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -449,6 +449,7 @@
     case 0xA0:
     case 0x2028:
     case 0x2029:
+    case 0xFEFF:
       return true;
     default:
       return unibrow::Space::Is(c);
@@ -530,7 +531,7 @@
   typedef int Key;
   typedef int Value;
   static const int kNoKey;
-  static const int kNoValue;
+  static int NoValue() { return 0; }
   static inline int Compare(int a, int b) {
     if (a < b)
       return -1;
@@ -543,7 +544,6 @@
 
 
 const int TestConfig::kNoKey = 0;
-const int TestConfig::kNoValue = 0;
 
 
 static unsigned PseudoRandom(int i, int j) {
@@ -837,7 +837,8 @@
   Handle<Code> code = Handle<Code>::cast(code_object);
 
   int captures[4] = {42, 37, 87, 117};
-  const uc16 input_data[6] = {'f', 'o', 'o', 'f', 'o', '\xa0'};
+  const uc16 input_data[6] = {'f', 'o', 'o', 'f', 'o',
+                              static_cast<uc16>('\xa0')};
   Handle<String> input =
       factory->NewStringFromTwoByte(Vector<const uc16>(input_data, 6));
   Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input);
@@ -857,7 +858,8 @@
   CHECK_EQ(-1, captures[2]);
   CHECK_EQ(-1, captures[3]);
 
-  const uc16 input_data2[9] = {'b', 'a', 'r', 'b', 'a', 'r', 'b', 'a', '\xa0'};
+  const uc16 input_data2[9] = {'b', 'a', 'r', 'b', 'a', 'r', 'b', 'a',
+                               static_cast<uc16>('\xa0')};
   input = factory->NewStringFromTwoByte(Vector<const uc16>(input_data2, 9));
   seq_input = Handle<SeqTwoByteString>::cast(input);
   start_adr = seq_input->GetCharsAddress();
diff --git a/test/cctest/test-reloc-info.cc b/test/cctest/test-reloc-info.cc
index 5bdc4c3..e638201 100644
--- a/test/cctest/test-reloc-info.cc
+++ b/test/cctest/test-reloc-info.cc
@@ -34,7 +34,7 @@
 
 static void WriteRinfo(RelocInfoWriter* writer,
                        byte* pc, RelocInfo::Mode mode, intptr_t data) {
-  RelocInfo rinfo(pc, mode, data);
+  RelocInfo rinfo(pc, mode, data, NULL);
   writer->Write(&rinfo);
 }
 
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 8e85444..e426e7b 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -114,10 +114,6 @@
       ExternalReference(isolate->counters()->keyed_load_function_prototype());
   CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
            encoder.Encode(keyed_load_function_prototype.address()));
-  ExternalReference the_hole_value_location =
-      ExternalReference::the_hole_value_location(isolate);
-  CHECK_EQ(make_code(UNCLASSIFIED, 2),
-           encoder.Encode(the_hole_value_location.address()));
   ExternalReference stack_limit_address =
       ExternalReference::address_of_stack_limit(isolate);
   CHECK_EQ(make_code(UNCLASSIFIED, 4),
@@ -127,14 +123,15 @@
   CHECK_EQ(make_code(UNCLASSIFIED, 5),
            encoder.Encode(real_stack_limit_address.address()));
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  CHECK_EQ(make_code(UNCLASSIFIED, 15),
+  CHECK_EQ(make_code(UNCLASSIFIED, 16),
            encoder.Encode(ExternalReference::debug_break(isolate).address()));
 #endif  // ENABLE_DEBUGGER_SUPPORT
   CHECK_EQ(make_code(UNCLASSIFIED, 10),
            encoder.Encode(
                ExternalReference::new_space_start(isolate).address()));
   CHECK_EQ(make_code(UNCLASSIFIED, 3),
-           encoder.Encode(ExternalReference::roots_address(isolate).address()));
+           encoder.Encode(
+               ExternalReference::roots_array_start(isolate).address()));
 }
 
 
@@ -157,15 +154,13 @@
            decoder.Decode(
                make_code(STATS_COUNTER,
                          Counters::k_keyed_load_function_prototype)));
-  CHECK_EQ(ExternalReference::the_hole_value_location(isolate).address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 2)));
   CHECK_EQ(ExternalReference::address_of_stack_limit(isolate).address(),
            decoder.Decode(make_code(UNCLASSIFIED, 4)));
   CHECK_EQ(ExternalReference::address_of_real_stack_limit(isolate).address(),
            decoder.Decode(make_code(UNCLASSIFIED, 5)));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   CHECK_EQ(ExternalReference::debug_break(isolate).address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 15)));
+           decoder.Decode(make_code(UNCLASSIFIED, 16)));
 #endif  // ENABLE_DEBUGGER_SUPPORT
   CHECK_EQ(ExternalReference::new_space_start(isolate).address(),
            decoder.Decode(make_code(UNCLASSIFIED, 10)));
@@ -365,8 +360,8 @@
       Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
     }
   }
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   Object* raw_foo;
   {
@@ -490,7 +485,7 @@
   }
   // If we don't do this then we end up with a stray root pointing at the
   // context even after we have disposed of env.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
   Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
@@ -563,16 +558,20 @@
 TEST(LinearAllocation) {
   v8::V8::Initialize();
   int new_space_max = 512 * KB;
+  int paged_space_max = Page::kMaxNonCodeHeapObjectSize;
+  int code_space_max = HEAP->code_space()->AreaSize();
 
   for (int size = 1000; size < 5 * MB; size += size >> 1) {
+    size &= ~8;  // Round.
     int new_space_size = (size < new_space_max) ? size : new_space_max;
+    int paged_space_size = (size < paged_space_max) ? size : paged_space_max;
     HEAP->ReserveSpace(
         new_space_size,
-        size,              // Old pointer space.
-        size,              // Old data space.
-        size,              // Code space.
-        size,              // Map space.
-        size,              // Cell space.
+        paged_space_size,  // Old pointer space.
+        paged_space_size,  // Old data space.
+        HEAP->code_space()->RoundSizeDownToObjectAlignment(code_space_max),
+        HEAP->map_space()->RoundSizeDownToObjectAlignment(paged_space_size),
+        HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
         size);             // Large object space.
     LinearAllocationScope linear_allocation_scope;
     const int kSmallFixedArrayLength = 4;
@@ -599,14 +598,14 @@
 
     Object* pointer_last = NULL;
     for (int i = 0;
-         i + kSmallFixedArraySize <= size;
+         i + kSmallFixedArraySize <= paged_space_size;
          i += kSmallFixedArraySize) {
       Object* obj = HEAP->AllocateFixedArray(kSmallFixedArrayLength,
                                              TENURED)->ToObjectChecked();
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
-          page_fullness > Page::kObjectAreaSize) {
+          page_fullness > HEAP->old_pointer_space()->AreaSize()) {
         i = RoundUp(i, Page::kPageSize);
         pointer_last = NULL;
       }
@@ -618,13 +617,15 @@
     }
 
     Object* data_last = NULL;
-    for (int i = 0; i + kSmallStringSize <= size; i += kSmallStringSize) {
+    for (int i = 0;
+         i + kSmallStringSize <= paged_space_size;
+         i += kSmallStringSize) {
       Object* obj = HEAP->AllocateRawAsciiString(kSmallStringLength,
                                                  TENURED)->ToObjectChecked();
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
-          page_fullness > Page::kObjectAreaSize) {
+          page_fullness > HEAP->old_data_space()->AreaSize()) {
         i = RoundUp(i, Page::kPageSize);
         data_last = NULL;
       }
@@ -636,13 +637,13 @@
     }
 
     Object* map_last = NULL;
-    for (int i = 0; i + kMapSize <= size; i += kMapSize) {
+    for (int i = 0; i + kMapSize <= paged_space_size; i += kMapSize) {
       Object* obj = HEAP->AllocateMap(JS_OBJECT_TYPE,
                                       42 * kPointerSize)->ToObjectChecked();
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kMapSize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
-          page_fullness > Page::kObjectAreaSize) {
+          page_fullness > HEAP->map_space()->AreaSize()) {
         i = RoundUp(i, Page::kPageSize);
         map_last = NULL;
       }
@@ -653,7 +654,7 @@
       map_last = obj;
     }
 
-    if (size > Page::kObjectAreaSize) {
+    if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Support for reserving space in large object space is not there yet,
       // but using an always-allocate scope is fine for now.
       AlwaysAllocateScope always;
diff --git a/test/cctest/test-sockets.cc b/test/cctest/test-sockets.cc
index 4af55db..ad73540 100644
--- a/test/cctest/test-sockets.cc
+++ b/test/cctest/test-sockets.cc
@@ -129,7 +129,7 @@
   bool ok;
 
   // Initialize socket support.
-  ok = Socket::Setup();
+  ok = Socket::SetUp();
   CHECK(ok);
 
   // Send and receive some data.
diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc
index 0f22ce1..92de2a6 100644
--- a/test/cctest/test-spaces.cc
+++ b/test/cctest/test-spaces.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,7 +32,9 @@
 
 using namespace v8::internal;
 
+#if 0
 static void VerifyRegionMarking(Address page_start) {
+#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
   Page* p = Page::FromAddress(page_start);
 
   p->SetRegionMarks(Page::kAllRegionsCleanMarks);
@@ -54,9 +56,13 @@
        addr += kPointerSize) {
     CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
   }
+#endif
 }
+#endif
 
 
+// TODO(gc) you can no longer allocate pages like this. Details are hidden.
+#if 0
 TEST(Page) {
   byte* mem = NewArray<byte>(2*Page::kPageSize);
   CHECK(mem != NULL);
@@ -89,6 +95,7 @@
 
   DeleteArray(mem);
 }
+#endif
 
 
 namespace v8 {
@@ -118,95 +125,76 @@
 
 
 TEST(MemoryAllocator) {
-  OS::Setup();
+  OS::SetUp();
   Isolate* isolate = Isolate::Current();
   isolate->InitializeLoggingAndCounters();
   Heap* heap = isolate->heap();
-  CHECK(heap->ConfigureHeapDefault());
-  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
-  CHECK(memory_allocator->Setup(heap->MaxReserved(),
-                                heap->MaxExecutableSize()));
-  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
+  CHECK(isolate->heap()->ConfigureHeapDefault());
 
+  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
+  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
+                                heap->MaxExecutableSize()));
+
+  int total_pages = 0;
   OldSpace faked_space(heap,
                        heap->MaxReserved(),
                        OLD_POINTER_SPACE,
                        NOT_EXECUTABLE);
-  int total_pages = 0;
-  int requested = MemoryAllocator::kPagesPerChunk;
-  int allocated;
-  // If we request n pages, we should get n or n - 1.
-  Page* first_page = memory_allocator->AllocatePages(
-      requested, &allocated, &faked_space);
-  CHECK(first_page->is_valid());
-  CHECK(allocated == requested || allocated == requested - 1);
-  total_pages += allocated;
+  Page* first_page =
+      memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
 
-  Page* last_page = first_page;
-  for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
-    CHECK(memory_allocator->IsPageInSpace(p, &faked_space));
-    last_page = p;
+  first_page->InsertAfter(faked_space.anchor()->prev_page());
+  CHECK(first_page->is_valid());
+  CHECK(first_page->next_page() == faked_space.anchor());
+  total_pages++;
+
+  for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
+    CHECK(p->owner() == &faked_space);
   }
 
   // Again, we should get n or n - 1 pages.
-  Page* others = memory_allocator->AllocatePages(
-      requested, &allocated, &faked_space);
-  CHECK(others->is_valid());
-  CHECK(allocated == requested || allocated == requested - 1);
-  total_pages += allocated;
-
-  memory_allocator->SetNextPage(last_page, others);
+  Page* other =
+      memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
+  CHECK(other->is_valid());
+  total_pages++;
+  other->InsertAfter(first_page);
   int page_count = 0;
-  for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
-    CHECK(memory_allocator->IsPageInSpace(p, &faked_space));
+  for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
+    CHECK(p->owner() == &faked_space);
     page_count++;
   }
   CHECK(total_pages == page_count);
 
   Page* second_page = first_page->next_page();
   CHECK(second_page->is_valid());
-
-  // Freeing pages at the first chunk starting at or after the second page
-  // should free the entire second chunk.  It will return the page it was passed
-  // (since the second page was in the first chunk).
-  Page* free_return = memory_allocator->FreePages(second_page);
-  CHECK(free_return == second_page);
-  memory_allocator->SetNextPage(first_page, free_return);
-
-  // Freeing pages in the first chunk starting at the first page should free
-  // the first chunk and return an invalid page.
-  Page* invalid_page = memory_allocator->FreePages(first_page);
-  CHECK(!invalid_page->is_valid());
-
+  memory_allocator->Free(first_page);
+  memory_allocator->Free(second_page);
   memory_allocator->TearDown();
   delete memory_allocator;
 }
 
 
 TEST(NewSpace) {
-  OS::Setup();
+  OS::SetUp();
   Isolate* isolate = Isolate::Current();
   isolate->InitializeLoggingAndCounters();
   Heap* heap = isolate->heap();
   CHECK(heap->ConfigureHeapDefault());
   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
-  CHECK(memory_allocator->Setup(heap->MaxReserved(),
+  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
                                 heap->MaxExecutableSize()));
   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
 
   NewSpace new_space(heap);
 
-  void* chunk =
-      memory_allocator->ReserveInitialChunk(4 * heap->ReservedSemiSpaceSize());
-  CHECK(chunk != NULL);
-  Address start = RoundUp(static_cast<Address>(chunk),
-                          2 * heap->ReservedSemiSpaceSize());
-  CHECK(new_space.Setup(start, 2 * heap->ReservedSemiSpaceSize()));
-  CHECK(new_space.HasBeenSetup());
+  CHECK(new_space.SetUp(HEAP->ReservedSemiSpaceSize(),
+                        HEAP->ReservedSemiSpaceSize()));
+  CHECK(new_space.HasBeenSetUp());
 
-  while (new_space.Available() >= Page::kMaxHeapObjectSize) {
+  while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
     Object* obj =
-        new_space.AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
+        new_space.AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->
+        ToObjectUnchecked();
     CHECK(new_space.Contains(HeapObject::cast(obj)));
   }
 
@@ -217,13 +205,13 @@
 
 
 TEST(OldSpace) {
-  OS::Setup();
+  OS::SetUp();
   Isolate* isolate = Isolate::Current();
   isolate->InitializeLoggingAndCounters();
   Heap* heap = isolate->heap();
   CHECK(heap->ConfigureHeapDefault());
   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
-  CHECK(memory_allocator->Setup(heap->MaxReserved(),
+  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
                                 heap->MaxExecutableSize()));
   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
 
@@ -233,16 +221,10 @@
                              NOT_EXECUTABLE);
   CHECK(s != NULL);
 
-  void* chunk = memory_allocator->ReserveInitialChunk(
-      4 * heap->ReservedSemiSpaceSize());
-  CHECK(chunk != NULL);
-  Address start = static_cast<Address>(chunk);
-  size_t size = RoundUp(start, 2 * heap->ReservedSemiSpaceSize()) - start;
-
-  CHECK(s->Setup(start, size));
+  CHECK(s->SetUp());
 
   while (s->Available() > 0) {
-    s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
+    s->AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->ToObjectUnchecked();
   }
 
   s->TearDown();
@@ -258,14 +240,12 @@
   LargeObjectSpace* lo = HEAP->lo_space();
   CHECK(lo != NULL);
 
-  Map* faked_map = reinterpret_cast<Map*>(HeapObject::FromAddress(0));
   int lo_size = Page::kPageSize;
 
-  Object* obj = lo->AllocateRaw(lo_size)->ToObjectUnchecked();
+  Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked();
   CHECK(obj->IsHeapObject());
 
   HeapObject* ho = HeapObject::cast(obj);
-  ho->set_map(faked_map);
 
   CHECK(lo->Contains(HeapObject::cast(obj)));
 
@@ -275,14 +255,13 @@
 
   while (true) {
     intptr_t available = lo->Available();
-    { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size);
+    { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
       if (!maybe_obj->ToObject(&obj)) break;
     }
-    HeapObject::cast(obj)->set_map(faked_map);
     CHECK(lo->Available() < available);
   };
 
   CHECK(!lo->IsEmpty());
 
-  CHECK(lo->AllocateRaw(lo_size)->IsFailure());
+  CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
 }
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 55c2141..e11349b 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 // Check that we can traverse very deep stacks of ConsStrings using
 // StringInputBuffer.  Check that Get(int) works on very deep stacks
@@ -355,7 +355,7 @@
 
   // Make sure we cover all always-flat lengths and at least one above.
   static const int kMaxLength = 20;
-  CHECK_GT(kMaxLength, i::String::kMinNonFlatLength);
+  CHECK_GT(kMaxLength, i::ConsString::kMinLength);
 
   // Allocate two JavaScript arrays for holding short strings.
   v8::Handle<v8::Array> ascii_external_strings =
@@ -502,6 +502,35 @@
 }
 
 
+class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
+ public:
+  explicit AsciiVectorResource(i::Vector<const char> vector)
+      : data_(vector) {}
+  virtual ~AsciiVectorResource() {}
+  virtual size_t length() const { return data_.length(); }
+  virtual const char* data() const { return data_.start(); }
+ private:
+  i::Vector<const char> data_;
+};
+
+
+TEST(SliceFromExternal) {
+  FLAG_string_slices = true;
+  InitializeVM();
+  v8::HandleScope scope;
+  AsciiVectorResource resource(
+      i::Vector<const char>("abcdefghijklmnopqrstuvwxyz", 26));
+  Handle<String> string = FACTORY->NewExternalStringFromAscii(&resource);
+  CHECK(string->IsExternalString());
+  Handle<String> slice = FACTORY->NewSubString(string, 1, 25);
+  CHECK(slice->IsSlicedString());
+  CHECK(string->IsExternalString());
+  CHECK_EQ(SlicedString::cast(*slice)->parent(), *string);
+  CHECK(SlicedString::cast(*slice)->parent()->IsExternalString());
+  CHECK(slice->IsFlat());
+}
+
+
 TEST(TrivialSlice) {
   // This tests whether a slice that contains the entire parent string
   // actually creates a new string (it should not).
diff --git a/test/cctest/test-threads.cc b/test/cctest/test-threads.cc
index 985b9e5..713d1e8 100644
--- a/test/cctest/test-threads.cc
+++ b/test/cctest/test-threads.cc
@@ -63,7 +63,7 @@
 static Turn turn = FILL_CACHE;
 
 
-class ThreadA: public v8::internal::Thread {
+class ThreadA : public v8::internal::Thread {
  public:
   ThreadA() : Thread("ThreadA") { }
   void Run() {
@@ -99,7 +99,7 @@
 };
 
 
-class ThreadB: public v8::internal::Thread {
+class ThreadB : public v8::internal::Thread {
  public:
   ThreadB() : Thread("ThreadB") { }
   void Run() {
@@ -111,7 +111,7 @@
           v8::Context::Scope context_scope(v8::Context::New());
 
           // Clear the caches by forcing major GC.
-          HEAP->CollectAllGarbage(false);
+          HEAP->CollectAllGarbage(v8::internal::Heap::kNoGCFlags);
           turn = SECOND_TIME_FILL_CACHE;
           break;
         }
@@ -190,3 +190,19 @@
     delete threads[i];
   }
 }
+
+
+class ThreadC : public v8::internal::Thread {
+ public:
+  ThreadC() : Thread("ThreadC") { }
+  void Run() {
+    Join();
+  }
+};
+
+
+TEST(ThreadJoinSelf) {
+  ThreadC thread;
+  thread.Start();
+  thread.Join();
+}
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index e4f70df..df8ff72 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -105,7 +105,7 @@
 
 TEST(MemCopy) {
   v8::V8::Initialize();
-  OS::Setup();
+  OS::SetUp();
   const int N = OS::kMinComplexMemCopy + 128;
   Vector<byte> buffer1 = Vector<byte>::New(N);
   Vector<byte> buffer2 = Vector<byte>::New(N);
diff --git a/test/cctest/test-weakmaps.cc b/test/cctest/test-weakmaps.cc
index db4db25..56d5936 100644
--- a/test/cctest/test-weakmaps.cc
+++ b/test/cctest/test-weakmaps.cc
@@ -50,7 +50,7 @@
                            Handle<JSObject> key,
                            int value) {
   Handle<ObjectHashTable> table = PutIntoObjectHashTable(
-      Handle<ObjectHashTable>(weakmap->table()),
+      Handle<ObjectHashTable>(ObjectHashTable::cast(weakmap->table())),
       Handle<JSObject>(JSObject::cast(*key)),
       Handle<Smi>(Smi::FromInt(value)));
   weakmap->set_table(*table);
@@ -85,13 +85,14 @@
     v8::HandleScope scope;
     PutIntoWeakMap(weakmap, Handle<JSObject>(JSObject::cast(*key)), 23);
   }
-  CHECK_EQ(1, weakmap->table()->NumberOfElements());
+  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
 
   // Force a full GC.
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(0, NumberOfWeakCalls);
-  CHECK_EQ(1, weakmap->table()->NumberOfElements());
-  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 
   // Make the global reference to the key weak.
   {
@@ -107,12 +108,14 @@
   // weak references whereas the second one will also clear weak maps.
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(1, NumberOfWeakCalls);
-  CHECK_EQ(1, weakmap->table()->NumberOfElements());
-  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(1, NumberOfWeakCalls);
-  CHECK_EQ(0, weakmap->table()->NumberOfElements());
-  CHECK_EQ(1, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      1, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 }
 
 
@@ -122,7 +125,7 @@
   Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
 
   // Check initial capacity.
-  CHECK_EQ(32, weakmap->table()->Capacity());
+  CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->Capacity());
 
   // Fill up weak map to trigger capacity change.
   {
@@ -135,15 +138,17 @@
   }
 
   // Check increased capacity.
-  CHECK_EQ(128, weakmap->table()->Capacity());
+  CHECK_EQ(128, ObjectHashTable::cast(weakmap->table())->Capacity());
 
   // Force a full GC.
-  CHECK_EQ(32, weakmap->table()->NumberOfElements());
-  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
   HEAP->CollectAllGarbage(false);
-  CHECK_EQ(0, weakmap->table()->NumberOfElements());
-  CHECK_EQ(32, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      32, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 
   // Check shrunk capacity.
-  CHECK_EQ(32, weakmap->table()->Capacity());
+  CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->Capacity());
 }
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index d095a24..12ebf90 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -41,16 +41,6 @@
 # We are compatible with Safari and Firefox.
 chapter11/11.1/11.1.5: UNIMPLEMENTED
 
-# We do not have a global object called 'global' as required by tests.
-chapter15/15.1: FAIL_OK
-
-# NaN is writable. We are compatible with JSC.
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
-# Infinity is writable. We are compatible with JSC.
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-179: FAIL_OK
-# undefined is writable. We are compatible with JSC.
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-180: FAIL_OK
-
 # Our Function object has an "arguments" property which is used as a
 # non-property in the test.
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-183: FAIL_OK
@@ -106,9 +96,6 @@
 # SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-11: FAIL_OK
 
-# We do not implement all methods on RegExp.
-chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-13: FAIL
-
 # SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-14: FAIL_OK
 
@@ -196,27 +183,6 @@
 # have no effect on the actual array on which reduceRight is called.
 chapter15/15.4/15.4.4/15.4.4.22/15.4.4.22-9-7: FAIL_OK
 
-# We do not correctly recognize \uFEFF as whitespace
-chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-10: FAIL
-chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-18: FAIL
-chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-34: FAIL
-
-# RegExp.prototype is not of type RegExp - we are bug compatible with JSC.
-chapter15/15.10/15.10.6/15.10.6: FAIL_OK
-
-# We do not have the properties of a RegExp instance on RegExp.prototype.
-# The spec says we should - but we are currently bug compatible with JSC.
-chapter15/15.10/15.10.7/15.10.7.1/15.10.7.1-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.1/15.10.7.1-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.2/15.10.7.2-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.2/15.10.7.2-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.3/15.10.7.3-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.3/15.10.7.3-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.4/15.10.7.4-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.4/15.10.7.4-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-2: FAIL_OK
-
 ##############################################################################
 # Unimplemented parts of strict mode
 # Setting expectations to fail only so that the tests trigger as soon as
@@ -348,8 +314,3 @@
 # Array.prototype.reduce - null passed as thisValue to strict callbackfn
 # Invalid test case: http://es5conform.codeplex.com/workitem/29085
 chapter15/15.4/15.4.4/15.4.4.21/15.4.4.21-9-c-ii-4-s: FAIL
-
-[ $arch == mips ]
-
-# Skip all tests on MIPS.
-*: SKIP
diff --git a/test/message/message.status b/test/message/message.status
index 70354ce..fc2896b 100644
--- a/test/message/message.status
+++ b/test/message/message.status
@@ -29,10 +29,3 @@
 
 # All tests in the bug directory are expected to fail.
 bugs: FAIL
-
-
-##############################################################################
-[ $arch == mips ]
-
-# Skip all tests on MIPS.
-*: SKIP
diff --git a/test/mjsunit/apply.js b/test/mjsunit/apply.js
index c166110..413ee93 100644
--- a/test/mjsunit/apply.js
+++ b/test/mjsunit/apply.js
@@ -190,3 +190,10 @@
     "moreseper-prime");
 
 delete(Array.prototype["1"]);
+
+// Check correct handling of non-array argument lists.
+assertSame(this, f0.apply(this, {}), "non-array-1");
+assertSame(this, f0.apply(this, { length:1 }), "non-array-2");
+assertEquals(void 0, f1.apply(this, { length:1 }), "non-array-3");
+assertEquals(void 0, f1.apply(this, { 0:"foo" }), "non-array-4");
+assertEquals("foo", f1.apply(this, { length:1, 0:"foo" }), "non-array-5");
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/array-construct-transition.js
similarity index 76%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/array-construct-transition.js
index d3f2e35..577e321 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/array-construct-transition.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax --smi-only-arrays
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6,7,8));
 
-assertThrows('eval(delete eval)');
-
+if (support_smi_only_arrays) {
+  var a = new Array(0, 1, 2);
+  assertTrue(%HasFastSmiOnlyElements(a));
+  var b = new Array(0.5, 1.2, 2.3);
+  assertTrue(%HasFastDoubleElements(b));
+  var c = new Array(0.5, 1.2, new Object());
+  assertTrue(%HasFastElements(c));
+}
diff --git a/test/mjsunit/array-join.js b/test/mjsunit/array-join.js
index 5c837a5..c08c182 100644
--- a/test/mjsunit/array-join.js
+++ b/test/mjsunit/array-join.js
@@ -75,10 +75,10 @@
   Array.prototype.toString = oldToString;
 }
 
-var a = new Array(123123123);
-assertEquals(123123122, String(a).length);
-assertEquals(123123122, a.join(",").length);
-assertEquals(246246244, a.join("oo").length);
+var a = new Array(123123);
+assertEquals(123122, String(a).length);
+assertEquals(123122, a.join(",").length);
+assertEquals(246244, a.join("oo").length);
 
 a = new Array(Math.pow(2,32) - 1);  // Max length.
 assertEquals("", a.join(""));
@@ -90,4 +90,4 @@
 for (var i = 0; i < a.length; i++) a[i] = undefined;
 a[5] = "ab";
 a[90000] = "cd";
-assertEquals("abcd", a.join(""));  // Must not throw.
\ No newline at end of file
+assertEquals("abcd", a.join(""));  // Must not throw.
diff --git a/test/mjsunit/array-literal-transitions.js b/test/mjsunit/array-literal-transitions.js
new file mode 100644
index 0000000..f657525
--- /dev/null
+++ b/test/mjsunit/array-literal-transitions.js
@@ -0,0 +1,210 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Test element kind of objects.
+// Since --smi-only-arrays affects builtins, its default setting at compile
+// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
+// by default, only a no-snapshot build actually has smi-only arrays enabled
+// in this test case.  Depending on whether smi-only arrays are actually
+// enabled, this test takes the appropriate code path to check smi-only arrays.
+
+support_smi_only_arrays = %HasFastSmiOnlyElements([1,2,3,4,5,6,7,8,9,10]);
+
+if (support_smi_only_arrays) {
+  print("Tests include smi-only arrays.");
+} else {
+  print("Tests do NOT include smi-only arrays.");
+}
+
+// IC and Crankshaft support for smi-only elements in dynamic array literals.
+function get(foo) { return foo; }  // Used to generate dynamic values.
+
+function array_literal_test() {
+  var a0 = [1, 2, 3];
+  assertTrue(%HasFastSmiOnlyElements(a0));
+  var a1 = [get(1), get(2), get(3)];
+  assertTrue(%HasFastSmiOnlyElements(a1));
+
+  var b0 = [1, 2, get("three")];
+  assertTrue(%HasFastElements(b0));
+  var b1 = [get(1), get(2), get("three")];
+  assertTrue(%HasFastElements(b1));
+
+  var c0 = [1, 2, get(3.5)];
+  assertTrue(%HasFastDoubleElements(c0));
+  assertEquals(3.5, c0[2]);
+  assertEquals(2, c0[1]);
+  assertEquals(1, c0[0]);
+
+  var c1 = [1, 2, 3.5];
+  assertTrue(%HasFastDoubleElements(c1));
+  assertEquals(3.5, c1[2]);
+  assertEquals(2, c1[1]);
+  assertEquals(1, c1[0]);
+
+  var c2 = [get(1), get(2), get(3.5)];
+  assertTrue(%HasFastDoubleElements(c2));
+  assertEquals(3.5, c2[2]);
+  assertEquals(2, c2[1]);
+  assertEquals(1, c2[0]);
+
+  var object = new Object();
+  var d0 = [1, 2, object];
+  assertTrue(%HasFastElements(d0));
+  assertEquals(object, d0[2]);
+  assertEquals(2, d0[1]);
+  assertEquals(1, d0[0]);
+
+  var e0 = [1, 2, 3.5];
+  assertTrue(%HasFastDoubleElements(e0));
+  assertEquals(3.5, e0[2]);
+  assertEquals(2, e0[1]);
+  assertEquals(1, e0[0]);
+
+  var f0 = [1, 2, [1, 2]];
+  assertTrue(%HasFastElements(f0));
+  assertEquals([1,2], f0[2]);
+  assertEquals(2, f0[1]);
+  assertEquals(1, f0[0]);
+}
+
+if (support_smi_only_arrays) {
+  for (var i = 0; i < 3; i++) {
+    array_literal_test();
+  }
+  %OptimizeFunctionOnNextCall(array_literal_test);
+  array_literal_test();
+
+  function test_large_literal() {
+
+    function d() {
+      gc();
+      return 2.5;
+    }
+
+    function o() {
+      gc();
+      return new Object();
+    }
+
+    large =
+        [ 0, 1, 2, 3, 4, 5, d(), d(), d(), d(), d(), d(), o(), o(), o(), o() ];
+    assertFalse(%HasDictionaryElements(large));
+    assertFalse(%HasFastSmiOnlyElements(large));
+    assertFalse(%HasFastDoubleElements(large));
+    assertTrue(%HasFastElements(large));
+    assertEquals(large,
+                 [0, 1, 2, 3, 4, 5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5,
+                  new Object(), new Object(), new Object(), new Object()]);
+  }
+
+  for (var i = 0; i < 3; i++) {
+    test_large_literal();
+  }
+  %OptimizeFunctionOnNextCall(test_large_literal);
+  test_large_literal();
+
+  function deopt_array(use_literal) {
+    if (use_literal) {
+      return [.5, 3, 4];
+    }  else {
+      return new Array();
+    }
+  }
+
+  deopt_array(false);
+  deopt_array(false);
+  deopt_array(false);
+  %OptimizeFunctionOnNextCall(deopt_array);
+  var array = deopt_array(false);
+  assertTrue(2 != %GetOptimizationStatus(deopt_array));
+  deopt_array(true);
+  assertTrue(2 != %GetOptimizationStatus(deopt_array));
+  array = deopt_array(false);
+  assertTrue(2 != %GetOptimizationStatus(deopt_array));
+
+  // Check that unexpected changes in the objects stored into the boilerplate
+  // also force a deopt.
+  function deopt_array_literal_all_smis(a) {
+    return [0, 1, a];
+  }
+
+  deopt_array_literal_all_smis(2);
+  deopt_array_literal_all_smis(3);
+  deopt_array_literal_all_smis(4);
+  array = deopt_array_literal_all_smis(4);
+  assertEquals(0, array[0]);
+  assertEquals(1, array[1]);
+  assertEquals(4, array[2]);
+  %OptimizeFunctionOnNextCall(deopt_array_literal_all_smis);
+  array = deopt_array_literal_all_smis(5);
+  array = deopt_array_literal_all_smis(6);
+  assertTrue(2 != %GetOptimizationStatus(deopt_array_literal_all_smis));
+  assertEquals(0, array[0]);
+  assertEquals(1, array[1]);
+  assertEquals(6, array[2]);
+
+  array = deopt_array_literal_all_smis(.5);
+  assertTrue(1 != %GetOptimizationStatus(deopt_array_literal_all_smis));
+  assertEquals(0, array[0]);
+  assertEquals(1, array[1]);
+  assertEquals(.5, array[2]);
+
+  function deopt_array_literal_all_doubles(a) {
+    return [0.5, 1, a];
+  }
+
+  deopt_array_literal_all_doubles(.5);
+  deopt_array_literal_all_doubles(.5);
+  deopt_array_literal_all_doubles(.5);
+  array = deopt_array_literal_all_doubles(0.5);
+  assertEquals(0.5, array[0]);
+  assertEquals(1, array[1]);
+  assertEquals(0.5, array[2]);
+  %OptimizeFunctionOnNextCall(deopt_array_literal_all_doubles);
+  array = deopt_array_literal_all_doubles(5);
+  array = deopt_array_literal_all_doubles(6);
+  assertTrue(2 != %GetOptimizationStatus(deopt_array_literal_all_doubles));
+  assertEquals(0.5, array[0]);
+  assertEquals(1, array[1]);
+  assertEquals(6, array[2]);
+
+  var foo = new Object();
+  array = deopt_array_literal_all_doubles(foo);
+  assertTrue(1 != %GetOptimizationStatus(deopt_array_literal_all_doubles));
+  assertEquals(0.5, array[0]);
+  assertEquals(1, array[1]);
+  assertEquals(foo, array[2]);
+}
+
+(function literals_after_osr() {
+  var color = [0];
+  // Trigger OSR.
+  while (%GetOptimizationStatus(literals_after_osr) == 2) {}
+  return [color[0]];
+})();
diff --git a/test/mjsunit/array-store-and-grow.js b/test/mjsunit/array-store-and-grow.js
new file mode 100644
index 0000000..131d4eb
--- /dev/null
+++ b/test/mjsunit/array-store-and-grow.js
@@ -0,0 +1,183 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Verifies that the KeyedStoreIC correctly handles out-of-bounds stores
+// to an array that grow it by a single element. Test functions are
+// called twice to make sure that the IC is used, first call is handled
+// by the runtime in the miss stub.
+
+function array_store_1(a,b,c) {
+  return (a[b] = c);
+}
+
+// Check handling of the empty array.
+var a = [];
+array_store_1(a, 0, 1);
+a = [];
+array_store_1(a, 0, 1);
+assertEquals(1, a[0]);
+assertEquals(1, array_store_1([], 0, 1));
+
+a = [];
+for (x=0;x<100000;++x) {
+  assertEquals(x, array_store_1(a, x, x));
+}
+
+for (x=0;x<100000;++x) {
+  assertEquals(x, array_store_1([], 0, x));
+}
+
+function array_store_2(a,b,c) {
+  return (a[b] = c);
+}
+
+a = [];
+array_store_2(a, 0, 0.5);
+a = [];
+array_store_2(a, 0, 0.5);
+assertEquals(0.5, a[0]);
+assertEquals(0.5, array_store_2([], 0, 0.5));
+
+function array_store_3(a,b,c) {
+  return (a[b] = c);
+}
+
+x = new Object();
+a = [];
+array_store_3(a, 0, x);
+a = [];
+array_store_3(a, 0, x);
+assertEquals(x, a[0]);
+assertEquals(x, array_store_3([], 0, x));
+
+// Check the handling of COW arrays
+function makeCOW() {
+  return [1];
+}
+
+function array_store_4(a,b,c) {
+  return (a[b] = c);
+}
+
+a = makeCOW();
+array_store_4(a, 1, 1);
+a = makeCOW();
+array_store_4(a, 1, 1);
+assertEquals(1, a[1]);
+assertEquals(1, array_store_4([], 1, 1));
+
+function array_store_5(a,b,c) {
+  return (a[b] = c);
+}
+
+a = makeCOW();
+array_store_5(a, 1, 0.5);
+a = makeCOW();
+array_store_5(a, 1, 0.5);
+assertEquals(0.5, a[1]);
+assertEquals(0.5, array_store_5([], 1, 0.5));
+
+function array_store_6(a,b,c) {
+  return (a[b] = c);
+}
+
+a = makeCOW();
+array_store_6(a, 1, x);
+a = makeCOW();
+array_store_6(a, 1, x);
+assertEquals(x, a[1]);
+assertEquals(x, array_store_6([], 1, x));
+
+// Check the handling of mutable arrays.
+a = new Array(1,2,3);
+array_store_4(a, 3, 1);
+a = new Array(1,2,3);
+array_store_4(a, 3, 1);
+assertEquals(1, a[3]);
+assertEquals(1, array_store_4([], 3, 1));
+
+function array_store_5(a,b,c) {
+  return (a[b] = c);
+}
+
+a = new Array(1,2,3);
+array_store_5(a, 3, 0.5);
+a = new Array(1,2,3);
+array_store_5(a, 3, 0.5);
+assertEquals(0.5, a[3]);
+assertEquals(0.5, array_store_5([], 3, 0.5));
+
+function array_store_6(a,b,c) {
+  return (a[b] = c);
+}
+
+a = new Array(1,2,3);
+array_store_6(a, 3, x);
+a = new Array(1,2,3);
+array_store_6(a, 3, x);
+assertEquals(x, a[3]);
+assertEquals(x, array_store_6([], 3, x));
+
+function array_store_7(a,b,c) {
+  return (a[b] = c);
+}
+
+// Check the handling of mutable arrays of doubles
+var a = new Array(0.5, 1.5);
+array_store_7(a, 2, .5);
+a = new Array(0.5, 1.5);
+array_store_7(a, 2, .5);
+assertEquals(0.5, a[2]);
+a = new Array(0.5, 1.5);
+assertEquals(0.5, array_store_7(a, 2, 0.5));
+
+for (x=0;x<100000;++x) {
+  a = new Array(0.5, 1.5);
+  assertEquals(x, array_store_7(a, 2, x));
+}
+
+function array_store_8(a,b,c) {
+  return (a[b] = c);
+}
+
+var a = new Array(0.5, 1.5);
+array_store_8(a, 2, .5);
+a = new Array(0.5, 1.5);
+array_store_8(a, 10, .5);
+assertEquals(0.5, a[10]);
+
+// Grow the empty array with a double store.
+function array_store_9(a,b,c) {
+  return (a[b] = c);
+}
+
+var a = [];
+array_store_9(a, 0, 0.5);
+a = [];
+array_store_1(a, 0, 0.5);
+assertEquals(0.5, a[0]);
+assertEquals(0.5, array_store_1([], 0, 0.5));
diff --git a/test/mjsunit/array-tostring.js b/test/mjsunit/array-tostring.js
new file mode 100644
index 0000000..6708657
--- /dev/null
+++ b/test/mjsunit/array-tostring.js
@@ -0,0 +1,159 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Array's toString should call the object's own join method, if one exists and
+// is callable. Otherwise, just use the original Object.toString function.
+
+var success = "[test success]";
+var expectedThis;
+function testJoin() {
+  assertEquals(0, arguments.length);
+  assertSame(expectedThis, this);
+  return success;
+}
+
+
+// On an Array object.
+
+// Default case.
+var a1 = [1, 2, 3];
+assertEquals(a1.join(), a1.toString());
+
+// Non-standard "join" function is called correctly.
+var a2 = [1, 2, 3];
+a2.join = testJoin;
+expectedThis = a2;
+assertEquals(success, a2.toString());
+
+// Non-callable join function is ignored and Object.prototype.toString is
+// used instead.
+var a3 = [1, 2, 3];
+a3.join = "not callable";
+assertEquals("[object Array]", a3.toString());
+
+// Non-existing join function is treated same as non-callable.
+var a4 = [1, 2, 3];
+a4.__proto__ = { toString: Array.prototype.toString };
+// No join on Array.
+assertEquals("[object Array]", a4.toString());
+
+
+// On a non-Array object.
+
+// Default looks-like-an-array case.
+var o1 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString,
+          join: Array.prototype.join};
+assertEquals(o1.join(), o1.toString());
+
+
+// Non-standard join is called correctly.
+// Check that we don't read, e.g., length before calling join.
+var o2 = {toString : Array.prototype.toString,
+          join: testJoin,
+          get length() { assertUnreachable(); },
+          get 0() { assertUnreachable(); }};
+expectedThis = o2;
+assertEquals(success, o2.toString());
+
+// Non-standard join is called even if it looks like an array.
+var o3 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString,
+          join: testJoin};
+expectedThis = o3;
+assertEquals(success, o3.toString());
+
+// Non-callable join works same as for Array.
+var o4 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString,
+          join: "not callable"};
+assertEquals("[object Object]", o4.toString());
+
+
+// Non-existing join works same as for Array.
+var o5 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString
+          /* no join */};
+assertEquals("[object Object]", o5.toString());
+
+
+// Test that ToObject is called before getting "join", so the instance
+// that "join" is read from is the same one passed as receiver later.
+var called_before = false;
+expectedThis = null;
+Object.defineProperty(Number.prototype, "join", {get: function() {
+            assertFalse(called_before);
+            called_before = true;
+            expectedThis = this;
+            return testJoin;
+        }});
+Number.prototype.arrayToString = Array.prototype.toString;
+assertEquals(success, (42).arrayToString());
+
+// ----------------------------------------------------------
+// Testing Array.prototype.toLocaleString
+
+// Ensure that it never uses Array.prototype.toString for anything.
+Array.prototype.toString = function() { assertUnreachable(); };
+
+// Default case.
+var la1 = [1, [2, 3], 4];
+assertEquals("1,2,3,4", la1.toLocaleString());
+
+// Used on a string (which looks like an array of characters).
+String.prototype.toLocaleString = Array.prototype.toLocaleString;
+assertEquals("1,2,3,4", "1234".toLocaleString());
+
+// If toLocaleString of element is not callable, throw a TypeError.
+var la2 = [1, {toLocaleString: "not callable"}, 3];
+assertThrows(function() { la2.toLocaleString(); }, TypeError);
+
+// If toLocaleString of element is callable, call it.
+var la3 = [1, {toLocaleString: function() { return "XX";}}, 3];
+assertEquals("1,XX,3", la3.toLocaleString());
+
+// Omitted elements, as well as undefined and null, become empty string.
+var la4 = [1, null, 3, undefined, 5,, 7];
+assertEquals("1,,3,,5,,7", la4.toLocaleString());
+
+
+// ToObject is called first and the same object is being used for the
+// rest of the operations.
+Object.defineProperty(Number.prototype, "length", {
+    get: function() {
+      exptectedThis = this;
+      return 3;
+    }});
+for (var i = 0; i < 3; i++) {
+  Object.defineProperty(Number.prototype, i, {
+      get: function() {
+        assertEquals(expectedThis, this);
+        return +this;
+      }});
+}
+Number.prototype.arrayToLocaleString = Array.prototype.toLocaleString;
+assertEquals("42,42,42", (42).arrayToLocaleString());
\ No newline at end of file
diff --git a/test/mjsunit/assert-opt-and-deopt.js b/test/mjsunit/assert-opt-and-deopt.js
index c9adb5b..51cb99a 100644
--- a/test/mjsunit/assert-opt-and-deopt.js
+++ b/test/mjsunit/assert-opt-and-deopt.js
@@ -150,11 +150,6 @@
 
 f(1);
 
-tracker.AssertOptCount(f, 0);
-tracker.AssertIsOptimized(f, false);
-tracker.AssertDeoptHappened(f, false);
-tracker.AssertDeoptCount(f, 0);
-
 %OptimizeFunctionOnNextCall(f);
 f(1);
 
@@ -172,6 +167,7 @@
 
 // Let's trigger optimization for another type.
 for (var i = 0; i < 5; i++) f("a");
+
 %OptimizeFunctionOnNextCall(f);
 f("b");
 
diff --git a/test/mjsunit/bugs/bug-618.js b/test/mjsunit/bugs/bug-618.js
index ae84326..0513f87 100644
--- a/test/mjsunit/bugs/bug-618.js
+++ b/test/mjsunit/bugs/bug-618.js
@@ -42,4 +42,4 @@
 
 assertEquals(23, new C().x);
 C.prototype.__defineSetter__('x', function(value) { this.y = 23; });
-assertEquals(void 0, new C().x));
+assertEquals(void 0, new C().x);
diff --git a/test/mjsunit/bugs/harmony/debug-blockscopes.js b/test/mjsunit/bugs/harmony/debug-blockscopes.js
index a407c53..fda32eb 100644
--- a/test/mjsunit/bugs/harmony/debug-blockscopes.js
+++ b/test/mjsunit/bugs/harmony/debug-blockscopes.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug --harmony-block-scoping
+// Flags: --expose-debug-as debug --harmony-scoping
 // The functions used for testing backtraces. They are at the top to make the
 // testing of source line/column easier.
 
diff --git a/test/mjsunit/builtins.js b/test/mjsunit/builtins.js
index f2ad544..e43b589 100644
--- a/test/mjsunit/builtins.js
+++ b/test/mjsunit/builtins.js
@@ -27,8 +27,7 @@
 
 // Flags: --expose-natives-as=builtins
 
-// Checks that all function properties of the builtin object are neither
-// writable nor configurable. Also, theose functions that are actually
+// Checks that all function properties of the builtin object that are actually
 // constructors (recognized by having properties on their .prototype object),
 // have only unconfigurable properties on the prototype, and the methods
 // are also non-writable.
@@ -75,8 +74,6 @@
   assertTrue(desc.hasOwnProperty("value"));
   var value = desc.value;
   if (isFunction(value)) {
-    assertFalse(desc.writable, name);
-    assertFalse(desc.configurable, name);
     checkConstructor(value, name);
   }
 }
diff --git a/test/mjsunit/comparison-ops-and-undefined.js b/test/mjsunit/comparison-ops-and-undefined.js
new file mode 100644
index 0000000..06db076
--- /dev/null
+++ b/test/mjsunit/comparison-ops-and-undefined.js
@@ -0,0 +1,128 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function test_helper_for_ics(func, b1, b2, b3, b4) {
+  assertEquals(b1, func(.5, .5));
+  assertEquals(b2, func(.5, undefined));
+  assertEquals(b3, func(undefined, .5));
+  assertEquals(b4, func(undefined, undefined));
+}
+
+function test_helper_for_crankshaft(func, b1, b2, b3, b4) {
+  assertEquals(b1, func(.5, .5));
+  %OptimizeFunctionOnNextCall(func);
+  assertEquals(b1, func(.5, .5));
+  assertEquals(b2, func(.5, undefined));
+  assertEquals(b3, func(undefined, .5));
+  assertEquals(b4, func(undefined, undefined));
+}
+
+function less_1(a, b) {
+  return a < b;
+}
+
+test_helper_for_ics(less_1, false, false, false, false);
+
+function less_2(a, b) {
+  return a < b;
+}
+
+test_helper_for_crankshaft(less_1, false, false, false, false);
+
+function greater_1(a, b) {
+  return a > b;
+}
+
+test_helper_for_ics(greater_1, false, false, false, false);
+
+function greater_2(a, b) {
+  return a > b;
+}
+
+test_helper_for_crankshaft(greater_1, false, false, false, false);
+
+function less_equal_1(a, b) {
+  return a <= b;
+}
+
+test_helper_for_ics(less_equal_1, true, false, false, false);
+
+function less_equal_2(a, b) {
+  return a <= b;
+}
+
+test_helper_for_crankshaft(less_equal_1, true, false, false, false);
+
+function greater_equal_1(a, b) {
+  return a >= b;
+}
+
+test_helper_for_ics(greater_equal_1, true, false, false, false);
+
+function greater_equal_2(a, b) {
+  return a >= b;
+}
+
+test_helper_for_crankshaft(greater_equal_1, true, false, false, false);
+
+function equal_1(a, b) {
+  return a == b;
+}
+
+test_helper_for_ics(equal_1, true, false, false, true);
+
+function equal_2(a, b) {
+  return a == b;
+}
+
+test_helper_for_crankshaft(equal_2, true, false, false, true);
+
+function strict_equal_1(a, b) {
+  return a === b;
+}
+
+test_helper_for_ics(strict_equal_1, true, false, false, true);
+
+function strict_equal_2(a, b) {
+  return a === b;
+}
+
+test_helper_for_crankshaft(strict_equal_2, true, false, false, true);
+
+function not_equal_1(a, b) {
+  return a != b;
+}
+
+test_helper_for_ics(not_equal_1, false, true, true, false);
+
+function not_equal_2(a, b) {
+  return a != b;
+}
+
+test_helper_for_crankshaft(not_equal_2, false, true, true, false);
diff --git a/test/mjsunit/compiler/alloc-object-huge.js b/test/mjsunit/compiler/alloc-object-huge.js
new file mode 100644
index 0000000..d6d9f1b
--- /dev/null
+++ b/test/mjsunit/compiler/alloc-object-huge.js
@@ -0,0 +1,308 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --inline-construct --nolimit-inlining
+
+// Test that huge constructors (more than 256 this assignments) are
+// handled correctly.
+
+// Test huge constructor when being inlined into hydrogen.
+function test() {
+  return new huge();
+}
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+var o = test();
+assertEquals(1, o.foo1);
+assertEquals(257, o.foo257);
+
+// Test huge constructor with specialized constructor stub.
+var o = new huge();
+assertEquals(1, o.foo1);
+assertEquals(257, o.foo257);
+
+// The huge constructor, nothing interesting beyond this point.
+function huge() {
+  this.foo1 = 1;
+  this.foo2 = 2;
+  this.foo3 = 3;
+  this.foo4 = 4;
+  this.foo5 = 5;
+  this.foo6 = 6;
+  this.foo7 = 7;
+  this.foo8 = 8;
+  this.foo9 = 9;
+  this.foo10 = 10;
+  this.foo11 = 11;
+  this.foo12 = 12;
+  this.foo13 = 13;
+  this.foo14 = 14;
+  this.foo15 = 15;
+  this.foo16 = 16;
+  this.foo17 = 17;
+  this.foo18 = 18;
+  this.foo19 = 19;
+  this.foo20 = 20;
+  this.foo21 = 21;
+  this.foo22 = 22;
+  this.foo23 = 23;
+  this.foo24 = 24;
+  this.foo25 = 25;
+  this.foo26 = 26;
+  this.foo27 = 27;
+  this.foo28 = 28;
+  this.foo29 = 29;
+  this.foo30 = 30;
+  this.foo31 = 31;
+  this.foo32 = 32;
+  this.foo33 = 33;
+  this.foo34 = 34;
+  this.foo35 = 35;
+  this.foo36 = 36;
+  this.foo37 = 37;
+  this.foo38 = 38;
+  this.foo39 = 39;
+  this.foo40 = 40;
+  this.foo41 = 41;
+  this.foo42 = 42;
+  this.foo43 = 43;
+  this.foo44 = 44;
+  this.foo45 = 45;
+  this.foo46 = 46;
+  this.foo47 = 47;
+  this.foo48 = 48;
+  this.foo49 = 49;
+  this.foo50 = 50;
+  this.foo51 = 51;
+  this.foo52 = 52;
+  this.foo53 = 53;
+  this.foo54 = 54;
+  this.foo55 = 55;
+  this.foo56 = 56;
+  this.foo57 = 57;
+  this.foo58 = 58;
+  this.foo59 = 59;
+  this.foo60 = 60;
+  this.foo61 = 61;
+  this.foo62 = 62;
+  this.foo63 = 63;
+  this.foo64 = 64;
+  this.foo65 = 65;
+  this.foo66 = 66;
+  this.foo67 = 67;
+  this.foo68 = 68;
+  this.foo69 = 69;
+  this.foo70 = 70;
+  this.foo71 = 71;
+  this.foo72 = 72;
+  this.foo73 = 73;
+  this.foo74 = 74;
+  this.foo75 = 75;
+  this.foo76 = 76;
+  this.foo77 = 77;
+  this.foo78 = 78;
+  this.foo79 = 79;
+  this.foo80 = 80;
+  this.foo81 = 81;
+  this.foo82 = 82;
+  this.foo83 = 83;
+  this.foo84 = 84;
+  this.foo85 = 85;
+  this.foo86 = 86;
+  this.foo87 = 87;
+  this.foo88 = 88;
+  this.foo89 = 89;
+  this.foo90 = 90;
+  this.foo91 = 91;
+  this.foo92 = 92;
+  this.foo93 = 93;
+  this.foo94 = 94;
+  this.foo95 = 95;
+  this.foo96 = 96;
+  this.foo97 = 97;
+  this.foo98 = 98;
+  this.foo99 = 99;
+  this.foo100 = 100;
+  this.foo101 = 101;
+  this.foo102 = 102;
+  this.foo103 = 103;
+  this.foo104 = 104;
+  this.foo105 = 105;
+  this.foo106 = 106;
+  this.foo107 = 107;
+  this.foo108 = 108;
+  this.foo109 = 109;
+  this.foo110 = 110;
+  this.foo111 = 111;
+  this.foo112 = 112;
+  this.foo113 = 113;
+  this.foo114 = 114;
+  this.foo115 = 115;
+  this.foo116 = 116;
+  this.foo117 = 117;
+  this.foo118 = 118;
+  this.foo119 = 119;
+  this.foo120 = 120;
+  this.foo121 = 121;
+  this.foo122 = 122;
+  this.foo123 = 123;
+  this.foo124 = 124;
+  this.foo125 = 125;
+  this.foo126 = 126;
+  this.foo127 = 127;
+  this.foo128 = 128;
+  this.foo129 = 129;
+  this.foo130 = 130;
+  this.foo131 = 131;
+  this.foo132 = 132;
+  this.foo133 = 133;
+  this.foo134 = 134;
+  this.foo135 = 135;
+  this.foo136 = 136;
+  this.foo137 = 137;
+  this.foo138 = 138;
+  this.foo139 = 139;
+  this.foo140 = 140;
+  this.foo141 = 141;
+  this.foo142 = 142;
+  this.foo143 = 143;
+  this.foo144 = 144;
+  this.foo145 = 145;
+  this.foo146 = 146;
+  this.foo147 = 147;
+  this.foo148 = 148;
+  this.foo149 = 149;
+  this.foo150 = 150;
+  this.foo151 = 151;
+  this.foo152 = 152;
+  this.foo153 = 153;
+  this.foo154 = 154;
+  this.foo155 = 155;
+  this.foo156 = 156;
+  this.foo157 = 157;
+  this.foo158 = 158;
+  this.foo159 = 159;
+  this.foo160 = 160;
+  this.foo161 = 161;
+  this.foo162 = 162;
+  this.foo163 = 163;
+  this.foo164 = 164;
+  this.foo165 = 165;
+  this.foo166 = 166;
+  this.foo167 = 167;
+  this.foo168 = 168;
+  this.foo169 = 169;
+  this.foo170 = 170;
+  this.foo171 = 171;
+  this.foo172 = 172;
+  this.foo173 = 173;
+  this.foo174 = 174;
+  this.foo175 = 175;
+  this.foo176 = 176;
+  this.foo177 = 177;
+  this.foo178 = 178;
+  this.foo179 = 179;
+  this.foo180 = 180;
+  this.foo181 = 181;
+  this.foo182 = 182;
+  this.foo183 = 183;
+  this.foo184 = 184;
+  this.foo185 = 185;
+  this.foo186 = 186;
+  this.foo187 = 187;
+  this.foo188 = 188;
+  this.foo189 = 189;
+  this.foo190 = 190;
+  this.foo191 = 191;
+  this.foo192 = 192;
+  this.foo193 = 193;
+  this.foo194 = 194;
+  this.foo195 = 195;
+  this.foo196 = 196;
+  this.foo197 = 197;
+  this.foo198 = 198;
+  this.foo199 = 199;
+  this.foo200 = 200;
+  this.foo201 = 201;
+  this.foo202 = 202;
+  this.foo203 = 203;
+  this.foo204 = 204;
+  this.foo205 = 205;
+  this.foo206 = 206;
+  this.foo207 = 207;
+  this.foo208 = 208;
+  this.foo209 = 209;
+  this.foo210 = 210;
+  this.foo211 = 211;
+  this.foo212 = 212;
+  this.foo213 = 213;
+  this.foo214 = 214;
+  this.foo215 = 215;
+  this.foo216 = 216;
+  this.foo217 = 217;
+  this.foo218 = 218;
+  this.foo219 = 219;
+  this.foo220 = 220;
+  this.foo221 = 221;
+  this.foo222 = 222;
+  this.foo223 = 223;
+  this.foo224 = 224;
+  this.foo225 = 225;
+  this.foo226 = 226;
+  this.foo227 = 227;
+  this.foo228 = 228;
+  this.foo229 = 229;
+  this.foo230 = 230;
+  this.foo231 = 231;
+  this.foo232 = 232;
+  this.foo233 = 233;
+  this.foo234 = 234;
+  this.foo235 = 235;
+  this.foo236 = 236;
+  this.foo237 = 237;
+  this.foo238 = 238;
+  this.foo239 = 239;
+  this.foo240 = 240;
+  this.foo241 = 241;
+  this.foo242 = 242;
+  this.foo243 = 243;
+  this.foo244 = 244;
+  this.foo245 = 245;
+  this.foo246 = 246;
+  this.foo247 = 247;
+  this.foo248 = 248;
+  this.foo249 = 249;
+  this.foo250 = 250;
+  this.foo251 = 251;
+  this.foo252 = 252;
+  this.foo253 = 253;
+  this.foo254 = 254;
+  this.foo255 = 255;
+  this.foo256 = 256;
+  this.foo257 = 257;
+}
diff --git a/test/mjsunit/compiler/alloc-object.js b/test/mjsunit/compiler/alloc-object.js
new file mode 100644
index 0000000..1d44efb
--- /dev/null
+++ b/test/mjsunit/compiler/alloc-object.js
@@ -0,0 +1,90 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc --inline-construct
+
+// Test that inlined object allocation works for different layouts of
+// objects (e.g. in object properties, slack tracking in progress or
+// changing of function prototypes)
+
+function test_helper(construct, a, b) {
+  return new construct(a, b);
+}
+
+function test(construct) {
+  %DeoptimizeFunction(test);
+  test_helper(construct, 0, 0);
+  test_helper(construct, 0, 0);
+  %OptimizeFunctionOnNextCall(test_helper);
+  // Test adding a new property after allocation was inlined.
+  var o = test_helper(construct, 1, 2);
+  o.z = 3;
+  assertEquals(1, o.x);
+  assertEquals(2, o.y);
+  assertEquals(3, o.z);
+  // Test changing the prototype after allocation was inlined.
+  construct.prototype = { z:6 };
+  var o = test_helper(construct, 4, 5);
+  assertEquals(4, o.x);
+  assertEquals(5, o.y);
+  assertEquals(6, o.z);
+  %DeoptimizeFunction(test_helper);
+  gc();  // Makes V8 forget about type information for test_helper.
+}
+
+function finalize_slack_tracking(construct) {
+  // Value chosen based on kGenerousAllocationCount = 8.
+  for (var i = 0; i < 8; i++) {
+    new construct(0, 0);
+  }
+}
+
+
+// Both properties are pre-allocated in object properties.
+function ConstructInObjectPreAllocated(a, b) {
+  this.x = a;
+  this.y = b;
+}
+finalize_slack_tracking(ConstructInObjectPreAllocated);
+test(ConstructInObjectPreAllocated);
+
+
+// Both properties are unused in object properties.
+function ConstructInObjectUnused(a, b) {
+  this.x = a < 0 ? 0 : a;
+  this.y = b > 0 ? b : 0;
+}
+finalize_slack_tracking(ConstructInObjectUnused);
+test(ConstructInObjectUnused);
+
+
+// Test inlined allocation while slack tracking is still in progress.
+function ConstructWhileSlackTracking(a, b) {
+  this.x = a;
+  this.y = b;
+}
+test(ConstructWhileSlackTracking);
diff --git a/test/mjsunit/compiler/compare.js b/test/mjsunit/compiler/compare.js
index 3f96087..460b0ab 100644
--- a/test/mjsunit/compiler/compare.js
+++ b/test/mjsunit/compiler/compare.js
@@ -83,9 +83,9 @@
 }
 
 TestNonPrimitive("xy", MaxLT);
-TestNonPrimitive("yx", MaxLE);
+TestNonPrimitive("xy", MaxLE);
 TestNonPrimitive("xy", MaxGE);
-TestNonPrimitive("yx", MaxGT);
+TestNonPrimitive("xy", MaxGT);
 
 // Test compare in case of aliased registers.
 function CmpX(x) { if (x == x) return 42; }
diff --git a/test/mjsunit/compiler/inline-arguments.js b/test/mjsunit/compiler/inline-arguments.js
index 532fc26..b6adf7f 100644
--- a/test/mjsunit/compiler/inline-arguments.js
+++ b/test/mjsunit/compiler/inline-arguments.js
@@ -27,11 +27,89 @@
 
 // Flags: --allow-natives-syntax
 
-// Test inlining functions that use arguments.
-function f() { return g(1, 2, 3); }
+function A() {
+}
 
-function g(x, y, z) { return %_ArgumentsLength(); }
+A.prototype.X = function (a, b, c) {
+  assertTrue(this instanceof A);
+  assertEquals(1, a);
+  assertEquals(2, b);
+  assertEquals(3, c);
+};
 
-for (var i = 0; i < 5; ++i) f();
-%OptimizeFunctionOnNextCall(f);
-assertEquals(3, f());
+A.prototype.Y = function () {
+  this.X.apply(this, arguments);
+};
+
+A.prototype.Z = function () {
+  this.Y(1,2,3);
+};
+
+var a = new A();
+a.Z(4,5,6);
+a.Z(4,5,6);
+%OptimizeFunctionOnNextCall(a.Z);
+a.Z(4,5,6);
+A.prototype.X.apply = function (receiver, args) {
+  return Function.prototype.apply.call(this, receiver, args);
+};
+a.Z(4,5,6);
+
+
+// Ensure that HArgumentsObject is inserted in a correct place
+// and dominates all uses.
+function F1() { }
+function F2() { F1.apply(this, arguments); }
+function F3(x, y) {
+  if (x) {
+    F2(y);
+  }
+}
+
+function F31() {
+  return F1.apply(this, arguments);
+}
+
+function F4() {
+  F3(true, false);
+  return F31(1);
+}
+
+F4(1);
+F4(1);
+F4(1);
+%OptimizeFunctionOnNextCall(F4);
+F4(1);
+
+
+// Test correct adapation of arguments.
+// Strict mode prevents arguments object from shadowing parameters.
+(function () {
+  "use strict";
+
+  function G2() {
+    assertArrayEquals([1,2], arguments);
+  }
+
+  function G4() {
+    assertArrayEquals([1,2,3,4], arguments);
+  }
+
+  function adapt2to4(a, b, c, d) {
+    G2.apply(this, arguments);
+  }
+
+  function adapt4to2(a, b) {
+    G4.apply(this, arguments);
+  }
+
+  function test_adaptation() {
+    adapt2to4(1, 2);
+    adapt4to2(1, 2, 3, 4);
+  }
+
+  test_adaptation();
+  test_adaptation();
+  %OptimizeFunctionOnNextCall(test_adaptation);
+  test_adaptation();
+})();
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/compiler/inline-arity-mismatch.js
similarity index 70%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/compiler/inline-arity-mismatch.js
index d3f2e35..4a61fa3 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/compiler/inline-arity-mismatch.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,38 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// Test inlining at call sites with mismatched arity.
 
-assertThrows('eval(delete eval)');
+function f(a) {
+  return a.x;
+}
 
+function g(a, b) {
+  return a.x;
+}
+
+function h1(a, b) {
+  return f(a, a) * g(b);
+}
+
+function h2(a, b) {
+  return f(a, a) * g(b);
+}
+
+
+var o = {x: 2};
+
+assertEquals(4, h1(o, o));
+assertEquals(4, h1(o, o));
+assertEquals(4, h2(o, o));
+assertEquals(4, h2(o, o));
+%OptimizeFunctionOnNextCall(h1);
+%OptimizeFunctionOnNextCall(h2);
+assertEquals(4, h1(o, o));
+assertEquals(4, h2(o, o));
+
+var u = {y:0, x:1};
+assertEquals(2, h1(u, o));
+assertEquals(2, h2(o, u));
diff --git a/test/mjsunit/compiler/inline-construct.js b/test/mjsunit/compiler/inline-construct.js
new file mode 100644
index 0000000..af9e69c
--- /dev/null
+++ b/test/mjsunit/compiler/inline-construct.js
@@ -0,0 +1,152 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc --inline-construct
+
+// Test inlining of constructor calls.
+
+function TestInlinedConstructor(closure) {
+  var result;
+  var counter = { value:0 };
+  result = closure(11, 12, counter);
+  assertEquals(23, result);
+  assertEquals(1, counter.value);
+  result = closure(23, 19, counter);
+  assertEquals(42, result);
+  assertEquals(2, counter.value);
+  %OptimizeFunctionOnNextCall(closure);
+  result = closure(1, 42, counter)
+  assertEquals(43, result);
+  assertEquals(3, counter.value);
+  result = closure("foo", "bar", counter)
+  assertEquals("foobar", result)
+  assertEquals(4, counter.value);
+}
+
+function TestInAllContexts(constructor) {
+  function value_context(a, b, counter) {
+    var obj = new constructor(a, b, counter);
+    return obj.x;
+  }
+  function test_context(a, b, counter) {
+    if (!new constructor(a, b, counter)) {
+      assertUnreachable("should not happen");
+    }
+    return a + b;
+  }
+  function effect_context(a, b, counter) {
+    new constructor(a, b, counter);
+    return a + b;
+  }
+  TestInlinedConstructor(value_context);
+  TestInlinedConstructor(test_context);
+  TestInlinedConstructor(effect_context);
+  %DeoptimizeFunction(value_context);
+  %DeoptimizeFunction(test_context);
+  %DeoptimizeFunction(effect_context);
+  gc();  // Makes V8 forget about type information for *_context.
+}
+
+
+// Test constructor returning nothing in all contexts.
+function c1(a, b, counter) {
+  this.x = a + b;
+  counter.value++;
+}
+TestInAllContexts(c1);
+
+
+// Test constructor returning an object in all contexts.
+function c2(a, b, counter) {
+  var obj = new Object();
+  obj.x = a + b;
+  counter.value++;
+  return obj;
+}
+TestInAllContexts(c2);
+
+
+// Test constructor returning a primitive value in all contexts.
+function c3(a, b, counter) {
+  this.x = a + b;
+  counter.value++;
+  return "not an object";
+}
+TestInAllContexts(c3);
+
+
+// Test constructor called with too many arguments.
+function c_too_many(a, b) {
+  this.x = a + b;
+}
+function f_too_many(a, b, c) {
+  var obj = new c_too_many(a, b, c);
+  return obj.x;
+}
+assertEquals(23, f_too_many(11, 12, 1));
+assertEquals(42, f_too_many(23, 19, 1));
+%OptimizeFunctionOnNextCall(f_too_many);
+assertEquals(43, f_too_many(1, 42, 1));
+assertEquals("foobar", f_too_many("foo", "bar", "baz"))
+
+
+// Test constructor called with too few arguments.
+function c_too_few(a, b) {
+  assertSame(undefined, b);
+  this.x = a + 1;
+}
+function f_too_few(a) {
+  var obj = new c_too_few(a);
+  return obj.x;
+}
+assertEquals(12, f_too_few(11));
+assertEquals(24, f_too_few(23));
+%OptimizeFunctionOnNextCall(f_too_few);
+assertEquals(2, f_too_few(1));
+assertEquals("foo1", f_too_few("foo"))
+
+
+// Test constructor that cannot be inlined.
+function c_unsupported_syntax(a, b, counter) {
+  try {
+    this.x = a + b;
+    counter.value++;
+  } catch(e) {
+    throw new Error();
+  }
+}
+TestInAllContexts(c_unsupported_syntax);
+
+
+// Regression test: Inlined constructors called as functions do not get their
+// implicit receiver object set to undefined, even in strict mode.
+function c_strict(a, b, counter) {
+  "use strict";
+  this.x = a + b;
+  counter.value++;
+}
+TestInAllContexts(c_strict);
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/compiler/inline-context-slots.js
similarity index 78%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/compiler/inline-context-slots.js
index 2502b53..d0e907b 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/compiler/inline-context-slots.js
@@ -25,22 +25,25 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Test inlining of functions with context slots.
 
-var e = new Error();
-assertEquals('Error', e + '');
+// Flags: --allow-natives-syntax
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+// Caller/callee without a local context.
+
+(function() {
+  var X = 5;
+  var Y = 10;
+  function F() {}
+  F.prototype.max = function() {
+    return X > Y ? X : Y;
+  }
+  F.prototype.run = function() {
+    return this.max();
+  }
+  var f = new F();
+  for (var i=0; i<5; i++) f.run();
+  %OptimizeFunctionOnNextCall(f.run);
+  assertEquals(10, f.run());
+})();
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/compiler/inline-literals.js
similarity index 70%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/compiler/inline-literals.js
index d3f2e35..f78abe8 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/compiler/inline-literals.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,26 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// Test that we can inline functions containing materialized literals.
 
-assertThrows('eval(delete eval)');
+function o2(b, c) {
+  return { 'b':b, 'c':c, 'y':b + c };
+}
 
+function o1(a, b, c) {
+  return { 'a':a, 'x':o2(b, c) };
+}
+
+function TestObjectLiteral(a, b, c) {
+  var expected = { 'a':a, 'x':{ 'b':b, 'c':c, 'y':b + c } };
+  var result = o1(a, b, c);
+  assertEquals(expected, result, "TestObjectLiteral");
+}
+
+TestObjectLiteral(1, 2, 3);
+TestObjectLiteral(1, 2, 3);
+%OptimizeFunctionOnNextCall(TestObjectLiteral);
+TestObjectLiteral(1, 2, 3);
+TestObjectLiteral('a', 'b', 'c');
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/compiler/lazy-const-lookup.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/compiler/lazy-const-lookup.js
index d3f2e35..b4f15a1 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/compiler/lazy-const-lookup.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+function outer() {
+  const x = 1;
+  function inner() {
+    return x;
+  }
+  inner();
+  %OptimizeFunctionOnNextCall(inner);
+  inner();
+}
 
-assertThrows('eval(delete eval)');
+outer();
 
diff --git a/test/mjsunit/compiler/literals-optimized.js b/test/mjsunit/compiler/literals-optimized.js
new file mode 100644
index 0000000..049e21a
--- /dev/null
+++ b/test/mjsunit/compiler/literals-optimized.js
@@ -0,0 +1,121 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test optimized versions of array and object literals.
+
+function TestOptimizedLiteral(create, verify) {
+  verify(create(1, 2, 3), 1, 2, 3);
+  verify(create(3, 5, 7), 3, 5, 7);
+  %OptimizeFunctionOnNextCall(create);
+  verify(create(11, 23, 42), 11, 23, 42);
+}
+
+
+// Test shallow array literal.
+function create_arr_shallow(a, b, c) {
+  return [0, a, 0, b, 0, c];
+}
+function verify_arr_shallow(array, a, b, c) {
+  assertSame(6, array.length);
+  assertSame(0, array[0]);
+  assertSame(a, array[1]);
+  assertSame(0, array[2]);
+  assertSame(b, array[3]);
+  assertSame(0, array[4]);
+  assertSame(c, array[5]);
+}
+TestOptimizedLiteral(create_arr_shallow, verify_arr_shallow);
+
+
+// Test nested array literal.
+function create_arr_nested(a, b, c) {
+  return [[0, a], [b, c], [1, 2], 3];
+}
+function verify_arr_nested(array, a, b, c) {
+  assertSame(4, array.length);
+  assertSame(2, array[0].length);
+  assertSame(0, array[0][0]);
+  assertSame(a, array[0][1]);
+  assertSame(2, array[1].length);
+  assertSame(b, array[1][0]);
+  assertSame(c, array[1][1]);
+  assertSame(2, array[2].length);
+  assertSame(1, array[2][0]);
+  assertSame(2, array[2][1]);
+  assertSame(3, array[3]);
+}
+TestOptimizedLiteral(create_arr_nested, verify_arr_nested);
+
+
+// Test shallow object literal.
+function create_obj_shallow(a, b, c) {
+  return { x:a, y:b, z:c, v:'foo', 9:'bar' };
+}
+function verify_obj_shallow(object, a, b, c) {
+  assertSame(a, object.x);
+  assertSame(b, object.y);
+  assertSame(c, object.z);
+  assertSame('foo', object.v);
+  assertSame('bar', object[9]);
+}
+TestOptimizedLiteral(create_obj_shallow, verify_obj_shallow);
+
+
+// Test nested object literal.
+function create_obj_nested(a, b, c) {
+  return { x:{ v:a, w:b }, y:{ v:1, w:2 }, z:c, v:'foo', 9:'bar' };
+}
+function verify_obj_nested(object, a, b, c) {
+  assertSame(a, object.x.v);
+  assertSame(b, object.x.w);
+  assertSame(1, object.y.v);
+  assertSame(2, object.y.w);
+  assertSame(c, object.z);
+  assertSame('foo', object.v);
+  assertSame('bar', object[9]);
+}
+TestOptimizedLiteral(create_obj_nested, verify_obj_nested);
+
+
+// Test mixed array and object literal.
+function create_mixed_nested(a, b, c) {
+  return { x:[1, 2], y:[a, b], z:c, v:{ v:'foo' }, 9:'bar' };
+}
+function verify_mixed_nested(object, a, b, c) {
+  assertSame(2, object.x.length);
+  assertSame(1, object.x[0]);
+  assertSame(2, object.x[1]);
+  assertSame(2, object.y.length);
+  assertSame(a, object.y[0]);
+  assertSame(b, object.y[1]);
+  assertSame(c, object.z);
+  assertSame('foo', object.v.v);
+  assertSame('bar', object[9]);
+}
+TestOptimizedLiteral(create_mixed_nested, verify_mixed_nested);
diff --git a/test/mjsunit/compiler/math-floor-global.js b/test/mjsunit/compiler/math-floor-global.js
new file mode 100644
index 0000000..9ec183f
--- /dev/null
+++ b/test/mjsunit/compiler/math-floor-global.js
@@ -0,0 +1,161 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --max-new-space-size=256 --allow-natives-syntax
+
+// Test inlining of Math.floor when assigned to a global.
+var flo = Math.floor;
+var test_id = 0;
+
+function testFloor(expect, input) {
+  var test = new Function('n',
+                          '"' + (test_id++) + '";return flo(n)');
+  assertEquals(expect, test(input));
+  assertEquals(expect, test(input));
+  assertEquals(expect, test(input));
+  %OptimizeFunctionOnNextCall(test);
+  assertEquals(expect, test(input));
+}
+
+function zero() {
+  var x = 0.5;
+  return (function() { return x - 0.5; })();
+}
+
+function test() {
+  testFloor(0, 0);
+  testFloor(0, zero());
+  testFloor(-0, -0);
+  testFloor(Infinity, Infinity);
+  testFloor(-Infinity, -Infinity);
+  testFloor(NaN, NaN);
+
+  // Ensure that a negative zero coming from Math.floor is properly handled
+  // by other operations.
+  function ifloor(x) {
+    return 1 / Math.floor(x);
+  }
+  assertEquals(-Infinity, ifloor(-0));
+  assertEquals(-Infinity, ifloor(-0));
+  assertEquals(-Infinity, ifloor(-0));
+  %OptimizeFunctionOnNextCall(ifloor);
+  assertEquals(-Infinity, ifloor(-0));
+
+  testFloor(0, 0.1);
+  testFloor(0, 0.49999999999999994);
+  testFloor(0, 0.5);
+  testFloor(0, 0.7);
+  testFloor(-1, -0.1);
+  testFloor(-1, -0.49999999999999994);
+  testFloor(-1, -0.5);
+  testFloor(-1, -0.7);
+  testFloor(1, 1);
+  testFloor(1, 1.1);
+  testFloor(1, 1.5);
+  testFloor(1, 1.7);
+  testFloor(-1, -1);
+  testFloor(-2, -1.1);
+  testFloor(-2, -1.5);
+  testFloor(-2, -1.7);
+
+  testFloor(0, Number.MIN_VALUE);
+  testFloor(-1, -Number.MIN_VALUE);
+  testFloor(Number.MAX_VALUE, Number.MAX_VALUE);
+  testFloor(-Number.MAX_VALUE, -Number.MAX_VALUE);
+  testFloor(Infinity, Infinity);
+  testFloor(-Infinity, -Infinity);
+
+  // 2^30 is a smi boundary.
+  var two_30 = 1 << 30;
+
+  testFloor(two_30, two_30);
+  testFloor(two_30, two_30 + 0.1);
+  testFloor(two_30, two_30 + 0.5);
+  testFloor(two_30, two_30 + 0.7);
+
+  testFloor(two_30 - 1, two_30 - 1);
+  testFloor(two_30 - 1, two_30 - 1 + 0.1);
+  testFloor(two_30 - 1, two_30 - 1 + 0.5);
+  testFloor(two_30 - 1, two_30 - 1 + 0.7);
+
+  testFloor(-two_30, -two_30);
+  testFloor(-two_30, -two_30 + 0.1);
+  testFloor(-two_30, -two_30 + 0.5);
+  testFloor(-two_30, -two_30 + 0.7);
+
+  testFloor(-two_30 + 1, -two_30 + 1);
+  testFloor(-two_30 + 1, -two_30 + 1 + 0.1);
+  testFloor(-two_30 + 1, -two_30 + 1 + 0.5);
+  testFloor(-two_30 + 1, -two_30 + 1 + 0.7);
+
+  // 2^52 is a precision boundary.
+  var two_52 = (1 << 30) * (1 << 22);
+
+  testFloor(two_52, two_52);
+  testFloor(two_52, two_52 + 0.1);
+  assertEquals(two_52, two_52 + 0.5);
+  testFloor(two_52, two_52 + 0.5);
+  assertEquals(two_52 + 1, two_52 + 0.7);
+  testFloor(two_52 + 1, two_52 + 0.7);
+
+  testFloor(two_52 - 1, two_52 - 1);
+  testFloor(two_52 - 1, two_52 - 1 + 0.1);
+  testFloor(two_52 - 1, two_52 - 1 + 0.5);
+  testFloor(two_52 - 1, two_52 - 1 + 0.7);
+
+  testFloor(-two_52, -two_52);
+  testFloor(-two_52, -two_52 + 0.1);
+  testFloor(-two_52, -two_52 + 0.5);
+  testFloor(-two_52, -two_52 + 0.7);
+
+  testFloor(-two_52 + 1, -two_52 + 1);
+  testFloor(-two_52 + 1, -two_52 + 1 + 0.1);
+  testFloor(-two_52 + 1, -two_52 + 1 + 0.5);
+  testFloor(-two_52 + 1, -two_52 + 1 + 0.7);
+}
+
+
+// Test in a loop to cover the custom IC and GC-related issues.
+for (var i = 0; i < 50; i++) {
+  test();
+}
+
+
+// Regression test for a bug where a negative zero coming from Math.floor
+// was not properly handled by other operations.
+function floorsum(i, n) {
+  var ret = Math.floor(n);
+  while (--i > 0) {
+    ret += Math.floor(n);
+  }
+  return ret;
+}
+assertEquals(-0, floorsum(1, -0));
+%OptimizeFunctionOnNextCall(floorsum);
+// The optimized function will deopt.  Run it with enough iterations to try
+// to optimize via OSR (triggering the bug).
+assertEquals(-0, floorsum(100000, -0));
diff --git a/test/mjsunit/compiler/math-floor-local.js b/test/mjsunit/compiler/math-floor-local.js
new file mode 100644
index 0000000..e44b15c
--- /dev/null
+++ b/test/mjsunit/compiler/math-floor-local.js
@@ -0,0 +1,161 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --max-new-space-size=256 --allow-natives-syntax
+
+// Test inlining of Math.floor when assigned to a local.
+var test_id = 0;
+
+function testFloor(expect, input) {
+  var test = new Function('n',
+                          '"' + (test_id++) +
+                          '";var f = Math.floor; return f(n)');
+  assertEquals(expect, test(input));
+  assertEquals(expect, test(input));
+  assertEquals(expect, test(input));
+  %OptimizeFunctionOnNextCall(test);
+  assertEquals(expect, test(input));
+}
+
+function zero() {
+  var x = 0.5;
+  return (function() { return x - 0.5; })();
+}
+
+function test() {
+  testFloor(0, 0);
+  testFloor(0, zero());
+  testFloor(-0, -0);
+  testFloor(Infinity, Infinity);
+  testFloor(-Infinity, -Infinity);
+  testFloor(NaN, NaN);
+
+  // Ensure that a negative zero coming from Math.floor is properly handled
+  // by other operations.
+  function ifloor(x) {
+    return 1 / Math.floor(x);
+  }
+  assertEquals(-Infinity, ifloor(-0));
+  assertEquals(-Infinity, ifloor(-0));
+  assertEquals(-Infinity, ifloor(-0));
+  %OptimizeFunctionOnNextCall(ifloor);
+  assertEquals(-Infinity, ifloor(-0));
+
+  testFloor(0, 0.1);
+  testFloor(0, 0.49999999999999994);
+  testFloor(0, 0.5);
+  testFloor(0, 0.7);
+  testFloor(-1, -0.1);
+  testFloor(-1, -0.49999999999999994);
+  testFloor(-1, -0.5);
+  testFloor(-1, -0.7);
+  testFloor(1, 1);
+  testFloor(1, 1.1);
+  testFloor(1, 1.5);
+  testFloor(1, 1.7);
+  testFloor(-1, -1);
+  testFloor(-2, -1.1);
+  testFloor(-2, -1.5);
+  testFloor(-2, -1.7);
+
+  testFloor(0, Number.MIN_VALUE);
+  testFloor(-1, -Number.MIN_VALUE);
+  testFloor(Number.MAX_VALUE, Number.MAX_VALUE);
+  testFloor(-Number.MAX_VALUE, -Number.MAX_VALUE);
+  testFloor(Infinity, Infinity);
+  testFloor(-Infinity, -Infinity);
+
+  // 2^30 is a smi boundary.
+  var two_30 = 1 << 30;
+
+  testFloor(two_30, two_30);
+  testFloor(two_30, two_30 + 0.1);
+  testFloor(two_30, two_30 + 0.5);
+  testFloor(two_30, two_30 + 0.7);
+
+  testFloor(two_30 - 1, two_30 - 1);
+  testFloor(two_30 - 1, two_30 - 1 + 0.1);
+  testFloor(two_30 - 1, two_30 - 1 + 0.5);
+  testFloor(two_30 - 1, two_30 - 1 + 0.7);
+
+  testFloor(-two_30, -two_30);
+  testFloor(-two_30, -two_30 + 0.1);
+  testFloor(-two_30, -two_30 + 0.5);
+  testFloor(-two_30, -two_30 + 0.7);
+
+  testFloor(-two_30 + 1, -two_30 + 1);
+  testFloor(-two_30 + 1, -two_30 + 1 + 0.1);
+  testFloor(-two_30 + 1, -two_30 + 1 + 0.5);
+  testFloor(-two_30 + 1, -two_30 + 1 + 0.7);
+
+  // 2^52 is a precision boundary.
+  var two_52 = (1 << 30) * (1 << 22);
+
+  testFloor(two_52, two_52);
+  testFloor(two_52, two_52 + 0.1);
+  assertEquals(two_52, two_52 + 0.5);
+  testFloor(two_52, two_52 + 0.5);
+  assertEquals(two_52 + 1, two_52 + 0.7);
+  testFloor(two_52 + 1, two_52 + 0.7);
+
+  testFloor(two_52 - 1, two_52 - 1);
+  testFloor(two_52 - 1, two_52 - 1 + 0.1);
+  testFloor(two_52 - 1, two_52 - 1 + 0.5);
+  testFloor(two_52 - 1, two_52 - 1 + 0.7);
+
+  testFloor(-two_52, -two_52);
+  testFloor(-two_52, -two_52 + 0.1);
+  testFloor(-two_52, -two_52 + 0.5);
+  testFloor(-two_52, -two_52 + 0.7);
+
+  testFloor(-two_52 + 1, -two_52 + 1);
+  testFloor(-two_52 + 1, -two_52 + 1 + 0.1);
+  testFloor(-two_52 + 1, -two_52 + 1 + 0.5);
+  testFloor(-two_52 + 1, -two_52 + 1 + 0.7);
+}
+
+
+// Test in a loop to cover the custom IC and GC-related issues.
+for (var i = 0; i < 50; i++) {
+  test();
+}
+
+
+// Regression test for a bug where a negative zero coming from Math.floor
+// was not properly handled by other operations.
+function floorsum(i, n) {
+  var ret = Math.floor(n);
+  while (--i > 0) {
+    ret += Math.floor(n);
+  }
+  return ret;
+}
+assertEquals(-0, floorsum(1, -0));
+%OptimizeFunctionOnNextCall(floorsum);
+// The optimized function will deopt.  Run it with enough iterations to try
+// to optimize via OSR (triggering the bug).
+assertEquals(-0, floorsum(100000, -0));
diff --git a/test/mjsunit/compiler/optimized-for-in.js b/test/mjsunit/compiler/optimized-for-in.js
new file mode 100644
index 0000000..cb8c66d
--- /dev/null
+++ b/test/mjsunit/compiler/optimized-for-in.js
@@ -0,0 +1,300 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --optimize-for-in --allow-natives-syntax
+
+// Test for-in support in Crankshaft.  For simplicity this tests assumes certain
+// fixed iteration order for properties and will have to be adjusted if V8
+// stops following insertion order.
+
+
+function a(t) {
+  var result = [];
+  for (var i in t) {
+    result.push(i + t[i]);
+  }
+  return result.join('');
+}
+
+// Check that we correctly deoptimize on map check.
+function b(t) {
+  var result = [];
+  for (var i in t) {
+    result.push(i + t[i]);
+    delete t[i];
+  }
+  return result.join('');
+}
+
+// Check that we correctly deoptimize during preparation step.
+function c(t) {
+  var result = [];
+  for (var i in t) {
+    result.push(i + t[i]);
+  }
+  return result.join('');
+}
+
+// Check that we deoptimize to the place after side effect in the right state.
+function d(t) {
+  var result = [];
+  var o;
+  for (var i in (o = t())) {
+    result.push(i + o[i]);
+  }
+  return result.join('');
+}
+
+// Check that we correctly deoptimize on map check inserted for fused load.
+function e(t) {
+  var result = [];
+  for (var i in t) {
+    delete t[i];
+    t[i] = i;
+    result.push(i + t[i]);
+  }
+  return result.join('');
+}
+
+// Nested for-in loops.
+function f(t) {
+  var result = [];
+  for (var i in t) {
+    for (var j in t) {
+      result.push(i + j + t[i] + t[j]);
+    }
+  }
+  return result.join('');
+}
+
+// Deoptimization from the inner for-in loop.
+function g(t) {
+  var result = [];
+  for (var i in t) {
+    for (var j in t) {
+      result.push(i + j + t[i] + t[j]);
+      var v = t[i];
+      delete t[i];
+      t[i] = v;
+    }
+  }
+  return result.join('');
+}
+
+
+// Break from the inner for-in loop.
+function h(t, deopt) {
+  var result = [];
+  for (var i in t) {
+    for (var j in t) {
+      result.push(i + j + t[i] + t[j]);
+      break;
+    }
+  }
+  deopt.deopt;
+  return result.join('');
+}
+
+// Continue in the inner loop.
+function j(t, deopt) {
+  var result = [];
+  for (var i in t) {
+    for (var j in t) {
+      result.push(i + j + t[i] + t[j]);
+      continue;
+    }
+  }
+  deopt.deopt;
+  return result.join('');
+}
+
+// Continue of the outer loop.
+function k(t, deopt) {
+  var result = [];
+  outer: for (var i in t) {
+    for (var j in t) {
+      result.push(i + j + t[i] + t[j]);
+      continue outer;
+    }
+  }
+  deopt.deopt;
+  return result.join('');
+}
+
+// Break of the outer loop.
+function l(t, deopt) {
+  var result = [];
+  outer: for (var i in t) {
+    for (var j in t) {
+      result.push(i + j + t[i] + t[j]);
+      break outer;
+    }
+  }
+  deopt.deopt;
+  return result.join('');
+}
+
+// Test deoptimization from inlined frame (currently it is not inlined).
+function m0(t, deopt) {
+  for (var i in t) {
+    for (var j in t) {
+      deopt.deopt;
+      return i + j + t[i] + t[j];
+    }
+  }
+}
+
+function m(t, deopt) {
+  return m0(t, deopt);
+}
+
+
+function tryFunction(s, mkT, f) {
+  var d = {deopt: false};
+  assertEquals(s, f(mkT(), d));
+  assertEquals(s, f(mkT(), d));
+  assertEquals(s, f(mkT(), d));
+  %OptimizeFunctionOnNextCall(f);
+  assertEquals(s, f(mkT(), d));
+  assertEquals(s, f(mkT(), {}));
+}
+
+var s = "a1b2c3d4";
+function mkTable() { return { a: "1", b: "2", c: "3", d: "4" }; }
+
+
+tryFunction(s, mkTable, a);
+tryFunction(s, mkTable, b);
+tryFunction("0a1b2c3d", function () { return "abcd"; }, c);
+tryFunction("0a1b2c3d", function () {
+  var cnt = false;
+  return function () {
+    cnt = true;
+    return "abcd";
+  }
+}, d);
+tryFunction("aabbccdd", mkTable, e);
+
+function mkSmallTable() { return { a: "1", b: "2" }; }
+
+tryFunction("aa11ab12ba21bb22", mkSmallTable, f);
+tryFunction("aa11ab12bb22ba21", mkSmallTable, g);
+tryFunction("aa11ba21", mkSmallTable, h);
+tryFunction("aa11ab12ba21bb22", mkSmallTable, j);
+tryFunction("aa11ba21", mkSmallTable, h);
+tryFunction("aa11ba21", mkSmallTable, k);
+tryFunction("aa11", mkSmallTable, l);
+tryFunction("aa11", mkSmallTable, m);
+
+// Test handling of null.
+tryFunction("", function () {
+  return function () { return null; }
+}, function (t) {
+  for (var i in t()) { return i; }
+  return "";
+});
+
+// Test smis.
+tryFunction("", function () {
+  return function () { return 11; }
+}, function (t) {
+  for (var i in t()) { return i; }
+  return "";
+});
+
+// Test LoadFieldByIndex for out of object properties.
+function O() { this.a = 1; }
+for (var i = 0; i < 10; i++) new O();
+tryFunction("a1b2c3d4e5f6", function () {
+  var o = new O();
+  o.b = 2;
+  o.c = 3;
+  o.d = 4;
+  o.e = 5;
+  o.f = 6;
+  return o;
+}, function (t) {
+  var r = [];
+  for (var i in t) r.push(i + t[i]);
+  return r.join('');
+});
+
+// Test OSR inside for-in.
+function osr_inner(t, limit) {
+  var r = 1;
+  for (var x in t) {
+    if (t.hasOwnProperty(x)) {
+      for (var i = 0; i < t[x].length; i++) {
+        r += t[x][i];
+        if (i === limit) {
+          %OptimizeFunctionOnNextCall(osr_inner, "osr");
+        }
+      }
+      r += x;
+    }
+  }
+  return r;
+}
+
+function osr_outer(t, osr_after) {
+  var r = 1;
+  for (var x in t) {
+    for (var i = 0; i < t[x].length; i++) {
+      r += t[x][i];
+    }
+    if (x === osr_after) {
+      %OptimizeFunctionOnNextCall(osr_outer, "osr");
+    }
+    r += x;
+  }
+  return r;
+}
+
+function osr_outer_and_deopt(t, osr_after) {
+  var r = 1;
+  for (var x in t) {
+    r += x;
+    if (x == osr_after) {
+      %OptimizeFunctionOnNextCall(osr_outer_and_deopt, "osr");
+    }
+  }
+  return r;
+}
+
+function test_osr() {
+  with ({}) {}  // Disable optimizations of this function.
+  var arr = new Array(20);
+  for (var i = 0; i < arr.length; i++) {
+    arr[i] = i + 1;
+  }
+  arr.push(":");  // Force deopt at the end of the loop.
+  assertEquals("211:x1234567891011121314151617181920:y", osr_inner({x: arr, y: arr}, (arr.length / 2) | 0));
+  assertEquals("7x456y", osr_outer({x: [1,2,3], y: [4,5,6]}, "x"));
+  assertEquals("101234567", osr_outer_and_deopt([1,2,3,4,5,6,7,8], "5"));
+}
+
+test_osr();
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/compiler/regress-96989.js
similarity index 81%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/compiler/regress-96989.js
index d3f2e35..aedeb24 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/compiler/regress-96989.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// Flags: --allow-natives-syntax
 
-assertThrows('eval(delete eval)');
+// Test correct handling of uninitialized const.
 
+function test() {
+  for (var i = 41; i < 42; i++) {
+    var c = t ^ i;
+  }
+  const t;
+  return c;
+}
+
+for (var i=0; i<10; i++) test();
+%OptimizeFunctionOnNextCall(test);
+assertEquals(41, test());
diff --git a/src/extensions/experimental/i18n-extension.h b/test/mjsunit/compiler/regress-deopt-call-as-function.js
similarity index 66%
copy from src/extensions/experimental/i18n-extension.h
copy to test/mjsunit/compiler/regress-deopt-call-as-function.js
index 5401f25..c408096 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/test/mjsunit/compiler/regress-deopt-call-as-function.js
@@ -25,30 +25,38 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+// Test deoptimization after inlined call.
 
-#include "include/v8.h"
+function bar(a, b) {try { return a; } finally { } }
 
-namespace v8 {
-namespace internal {
+function test_context() {
+  function foo(x) { return 42; }
+  var s, t;
+  for (var i = 0x7fff0000; i < 0x80000000; i++) {
+    bar(t = foo(i) ? bar(42 + i - i) : bar(0), s = i + t);
+  }
+  return s;
+}
+assertEquals(0x7fffffff + 42, test_context());
 
 
-class I18NExtension : public v8::Extension {
- public:
-  I18NExtension();
+function value_context() {
+  function foo(x) { return 42; }
+  var s, t;
+  for (var i = 0x7fff0000; i < 0x80000000; i++) {
+    bar(t = foo(i), s = i + t);
+  }
+  return s;
+}
+assertEquals(0x7fffffff + 42, value_context());
 
-  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
-      v8::Handle<v8::String> name);
 
-  // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
-  static void Register();
-  static I18NExtension* get();
-
- private:
-  static I18NExtension* extension_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+function effect_context() {
+  function foo(x) { return 42; }
+  var s, t;
+  for (var i = 0x7fff0000; i < 0x80000000; i++) {
+    bar(foo(i), s = i + 42);
+  }
+  return s;
+}
+assertEquals(0x7fffffff + 42, effect_context());
diff --git a/test/mjsunit/compiler/regress-funarguments.js b/test/mjsunit/compiler/regress-funarguments.js
index cea40bc..c913bd9 100644
--- a/test/mjsunit/compiler/regress-funarguments.js
+++ b/test/mjsunit/compiler/regress-funarguments.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 // Test function.arguments.
 
 function A() {}
@@ -60,13 +62,16 @@
   return o.g(x, "z");
 }
 
-function stress() {
-  for (var i=0; i<5000000; i++) o.g(i, "g");
-  for (var j=0; j<5000000; j++) hej(j);
+function opt() {
+  for (var k=0; k<2; k++) {
+    for (var i=0; i<5; i++) o.g(i, "g");
+    for (var j=0; j<5; j++) hej(j);
+  }
+  %OptimizeFunctionOnNextCall(o.g);
+  %OptimizeFunctionOnNextCall(hej);
 }
 
-stress();
-
+opt();
 assertArrayEquals([0, "g"], o.g(0, "g"));
 assertArrayEquals([1, "f"], o.g(1, "g"));
 assertArrayEquals([0, "h"], hej(0));
@@ -74,8 +79,7 @@
 
 o = new B();
 
-stress();
-
+opt();
 assertArrayEquals([0, "f"], o.g(0, "g"));
 assertArrayEquals([1, "g"], o.g(1, "g"));
 assertArrayEquals([0, "f"], hej(0));
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/compiler/regress-inline-callfunctionstub.js
similarity index 80%
rename from test/mjsunit/cyclic-error-to-string.js
rename to test/mjsunit/compiler/regress-inline-callfunctionstub.js
index 2502b53..a39d26d 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/compiler/regress-inline-callfunctionstub.js
@@ -25,22 +25,22 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --allow-natives-syntax
 
-var e = new Error();
-assertEquals('Error', e + '');
+// Test inlined of calls-as-function two levels deep.
+function f() { return 42; }
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+var o = {g : function () { return f(); } }
+function main(func) {
+  var v=0;
+  for (var i=0; i<1; i++) {
+    if (func()) v = 42;
+  }
+}
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+main(o.g);
+main(o.g);
+main(o.g);
+%OptimizeFunctionOnNextCall(main);
+main(o.g);
+
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/compiler/regress-toint32.js
similarity index 81%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/compiler/regress-toint32.js
index d3f2e35..54c2f76 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/compiler/regress-toint32.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,21 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax --noenable-sse3
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+var a = new Int32Array(1);
+var G = 0x80000000;
 
-assertThrows('eval(delete eval)');
+function f(x) {
+  var v = x;
+  v = v + 1;
+  a[0] = v;
+  v = v - 1;
+  return v;
+}
+
+assertEquals(G, f(G));
+assertEquals(G, f(G));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(G, f(G));
 
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/compiler/strict-recompile.js
similarity index 77%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/compiler/strict-recompile.js
index 2502b53..96e8bca 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/compiler/strict-recompile.js
@@ -25,22 +25,27 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --allow-natives-syntax
 
-var e = new Error();
-assertEquals('Error', e + '');
+function foo() {
+  try {
+    var o = {};
+    Object.defineProperty(o, 'x', {value: 12, writable: false});
+    o.x = 13;
+  } catch(e) {
+    return true;
+  }
+  return false;
+}
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+assertFalse(foo());
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+function do_eval(str) {
+  "use strict";
+  return eval(str);
+}
+
+var eval_foo = do_eval('(' + foo + ')');
+for (var i = 0; i < 5; i++) assertTrue(eval_foo());
+%OptimizeFunctionOnNextCall(eval_foo);
+assertTrue(eval_foo());
diff --git a/test/mjsunit/const-redecl.js b/test/mjsunit/const-redecl.js
index 9459708..c0b97e6 100644
--- a/test/mjsunit/const-redecl.js
+++ b/test/mjsunit/const-redecl.js
@@ -98,7 +98,8 @@
   var msg = s;
   if (opt_e) { e = opt_e; msg += "; " + opt_e; }
   assertEquals(expected, TestLocal(s,e), "local:'" + msg + "'");
-  assertEquals(expected, TestGlobal(s,e), "global:'" + msg + "'");
+  // Redeclarations of global consts do not throw, they are silently ignored.
+  assertEquals(42, TestGlobal(s, 42), "global:'" + msg + "'");
   assertEquals(expected, TestContext(s,e), "context:'" + msg + "'");
 }
 
@@ -218,3 +219,62 @@
 // Test that const inside with behaves correctly.
 TestAll(87, "with ({x:42}) { const x = 87; }", "x");
 TestAll(undefined, "with ({x:42}) { const x; }", "x");
+
+
+// Additional tests for how various combinations of re-declarations affect
+// the values of the var/const in question.
+try {
+  eval("var undefined;");
+} catch (ex) {
+  assertUnreachable("undefined (1) has thrown");
+}
+
+var original_undef = undefined;
+var undefined = 1;  // Should be silently ignored.
+assertEquals(original_undef, undefined, "undefined got overwritten");
+undefined = original_undef;
+
+var a; const a; const a = 1;
+assertEquals(1, a, "a has wrong value");
+a = 2;
+assertEquals(2, a, "a should be writable");
+
+var b = 1; const b = 2;
+assertEquals(2, b, "b has wrong value");
+
+var c = 1; const c = 2; const c = 3;
+assertEquals(3, c, "c has wrong value");
+
+const d = 1; const d = 2;
+assertEquals(1, d, "d has wrong value");
+
+const e = 1; var e = 2;
+assertEquals(1, e, "e has wrong value");
+
+const f = 1; const f;
+assertEquals(1, f, "f has wrong value");
+
+var g; const g = 1;
+assertEquals(1, g, "g has wrong value");
+g = 2;
+assertEquals(2, g, "g should be writable");
+
+const h; var h = 1;
+assertEquals(undefined,h,  "h has wrong value");
+
+eval("Object.defineProperty(this, 'i', { writable: true });"
+   + "const i = 7;"
+   + "assertEquals(7, i, \"i has wrong value\");");
+
+var global = this;
+assertThrows(function() {
+  Object.defineProperty(global, 'j', { writable: true })
+}, TypeError);
+const j = 2;  // This is what makes the function above throw, because the
+// const declaration gets hoisted and makes the property non-configurable.
+assertEquals(2, j, "j has wrong value");
+
+var k = 1; const k;
+// You could argue about the expected result here. For now, the winning
+// argument is that "const k;" is equivalent to "const k = undefined;".
+assertEquals(undefined, k, "k has wrong value");
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/count-based-osr.js
similarity index 79%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/count-based-osr.js
index d3f2e35..125c4e2 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/count-based-osr.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --count-based-interrupts --interrupt-budget=10 --weighted-back-edges --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// Test that OSR works properly when using count-based interrupting/profiling.
 
-assertThrows('eval(delete eval)');
-
+function osr_this() {
+  var a = 1;
+  // Trigger OSR.
+  while (%GetOptimizationStatus(osr_this) == 2) {}
+  return a;
+}
+assertEquals(1, osr_this());
diff --git a/test/mjsunit/d8-os.js b/test/mjsunit/d8-os.js
index 5640326..239938c 100644
--- a/test/mjsunit/d8-os.js
+++ b/test/mjsunit/d8-os.js
@@ -54,6 +54,8 @@
 
 
 if (this.os && os.system) {
+  // Ensure that we have a valid working directory.
+  os.chdir("/tmp");
   try {
     // Delete the dir if it is lying around from last time.
     os.system("ls", [TEST_DIR]);
@@ -61,52 +63,53 @@
   } catch (e) {
   }
   os.mkdirp(TEST_DIR);
-  os.chdir(TEST_DIR);
   try {
     // Check the chdir worked.
     os.system('ls', [TEST_DIR]);
     // Simple create dir.
-    os.mkdirp("dir");
+    os.mkdirp(TEST_DIR + "/dir");
     // Create dir in dir.
-    os.mkdirp("dir/foo");
+    os.mkdirp(TEST_DIR + "/dir/foo");
     // Check that they are there.
-    os.system('ls', ['dir/foo']);
+    os.system('ls', [TEST_DIR + '/dir/foo']);
     // Check that we can detect when something is not there.
-    assertThrows("os.system('ls', ['dir/bar']);", "dir not there");
+    assertThrows("os.system('ls', [TEST_DIR + '/dir/bar']);", "dir not there");
     // Check that mkdirp makes intermediate directories.
-    os.mkdirp("dir2/foo");
-    os.system("ls", ["dir2/foo"]);
+    os.mkdirp(TEST_DIR + "/dir2/foo");
+    os.system("ls", [TEST_DIR + "/dir2/foo"]);
     // Check that mkdirp doesn't mind if the dir is already there.
-    os.mkdirp("dir2/foo");
-    os.mkdirp("dir2/foo/");
+    os.mkdirp(TEST_DIR + "/dir2/foo");
+    os.mkdirp(TEST_DIR + "/dir2/foo/");
     // Check that mkdirp can cope with trailing /
-    os.mkdirp("dir3/");
-    os.system("ls", ["dir3"]);
+    os.mkdirp(TEST_DIR + "/dir3/");
+    os.system("ls", [TEST_DIR + "/dir3"]);
     // Check that we get an error if the name is taken by a file.
-    os.system("sh", ["-c", "echo foo > file1"]);
-    os.system("ls", ["file1"]);
-    assertThrows("os.mkdirp('file1');", "mkdir over file1");
-    assertThrows("os.mkdirp('file1/foo');", "mkdir over file2");
-    assertThrows("os.mkdirp('file1/');", "mkdir over file3");
-    assertThrows("os.mkdirp('file1/foo/');", "mkdir over file4");
+    os.system("sh", ["-c", "echo foo > " + TEST_DIR + "/file1"]);
+    os.system("ls", [TEST_DIR + "/file1"]);
+    assertThrows("os.mkdirp(TEST_DIR + '/file1');", "mkdir over file1");
+    assertThrows("os.mkdirp(TEST_DIR + '/file1/foo');", "mkdir over file2");
+    assertThrows("os.mkdirp(TEST_DIR + '/file1/');", "mkdir over file3");
+    assertThrows("os.mkdirp(TEST_DIR + '/file1/foo/');", "mkdir over file4");
     // Create a dir we cannot read.
-    os.mkdirp("dir4", 0);
+    os.mkdirp(TEST_DIR + "/dir4", 0);
     // This test fails if you are root since root can read any dir.
-    assertThrows("os.chdir('dir4');", "chdir dir4 I");
-    os.rmdir("dir4");
-    assertThrows("os.chdir('dir4');", "chdir dir4 II");
-    // Set umask.
+    assertThrows("os.chdir(TEST_DIR + '/dir4');", "chdir dir4 I");
+    os.rmdir(TEST_DIR + "/dir4");
+    assertThrows("os.chdir(TEST_DIR + '/dir4');", "chdir dir4 II");
+
+    // Set umask.  This changes the umask for the whole process and is
+    // the reason why the test cannot be run multi-threaded.
     var old_umask = os.umask(0777);
     // Create a dir we cannot read.
-    os.mkdirp("dir5");
+    os.mkdirp(TEST_DIR + "/dir5");
     // This test fails if you are root since root can read any dir.
-    assertThrows("os.chdir('dir5');", "cd dir5 I");
-    os.rmdir("dir5");
-    assertThrows("os.chdir('dir5');", "chdir dir5 II");
+    assertThrows("os.chdir(TEST_DIR + '/dir5');", "cd dir5 I");
+    os.rmdir(TEST_DIR + "/dir5");
+    assertThrows("os.chdir(TEST_DIR + '/dir5');", "chdir dir5 II");
     os.umask(old_umask);
 
-    os.mkdirp("hest/fisk/../fisk/ged");
-    os.system("ls", ["hest/fisk/ged"]);
+    os.mkdirp(TEST_DIR + "/hest/fisk/../fisk/ged");
+    os.system("ls", [TEST_DIR + "/hest/fisk/ged"]);
 
     os.setenv("FOO", "bar");
     var environment = os.system("printenv");
@@ -143,42 +146,43 @@
       assertEquals("baz\n", os.system("echo", ["baz"]));
       //}
     }
+
+    // Too few args.
+    arg_error("os.umask();");
+    arg_error("os.system();");
+    arg_error("os.mkdirp();");
+    arg_error("os.chdir();");
+    arg_error("os.setenv();");
+    arg_error("os.rmdir();");
+
+    // Too many args.
+    arg_error("os.setenv('FOO=bar');");
+    arg_error("os.umask(0, 0);");
+    arg_error("os.system('ls', [], -1, -1, -1);");
+    arg_error("os.mkdirp('foo', 0, 0)");
+    arg_error("os.chdir('foo', 'bar')");
+    arg_error("os.rmdir('foo', 'bar');");
+
+    // Wrong kind of args.
+    arg_error("os.umask([]);");
+    arg_error("os.system('ls', 'foo');");
+    arg_error("os.system('ls', 123);");
+    arg_error("os.system('ls', [], 'foo');");
+    arg_error("os.system('ls', [], -1, 'foo');");
+    arg_error("os.mkdirp('foo', 'bar');");
+
+    // Test broken toString().
+    str_error("os.system(e);");
+    str_error("os.system('ls', [e]);");
+    str_error("os.system('ls', ['.', e]);");
+    str_error("os.system('ls', [e, '.']);");
+    str_error("os.mkdirp(e);");
+    str_error("os.setenv(e, 'goo');");
+    str_error("os.setenv('goo', e);");
+    str_error("os.chdir(e);");
+    str_error("os.rmdir(e);");
+
   } finally {
     os.system("rm", ["-r", TEST_DIR]);
   }
-
-  // Too few args.
-  arg_error("os.umask();");
-  arg_error("os.system();");
-  arg_error("os.mkdirp();");
-  arg_error("os.chdir();");
-  arg_error("os.setenv();");
-  arg_error("os.rmdir();");
-
-  // Too many args.
-  arg_error("os.setenv('FOO=bar');");
-  arg_error("os.umask(0, 0);");
-  arg_error("os.system('ls', [], -1, -1, -1);");
-  arg_error("os.mkdirp('foo', 0, 0)");
-  arg_error("os.chdir('foo', 'bar')");
-  arg_error("os.rmdir('foo', 'bar');");
-
-  // Wrong kind of args.
-  arg_error("os.umask([]);");
-  arg_error("os.system('ls', 'foo');");
-  arg_error("os.system('ls', 123);");
-  arg_error("os.system('ls', [], 'foo');");
-  arg_error("os.system('ls', [], -1, 'foo');");
-  arg_error("os.mkdirp('foo', 'bar');");
-
-  // Test broken toString().
-  str_error("os.system(e);");
-  str_error("os.system('ls', [e]);");
-  str_error("os.system('ls', ['.', e]);");
-  str_error("os.system('ls', [e, '.']);");
-  str_error("os.mkdirp(e);");
-  str_error("os.setenv(e, 'goo');");
-  str_error("os.setenv('goo', e);");
-  str_error("os.chdir(e);");
-  str_error("os.rmdir(e);");
 }
diff --git a/test/mjsunit/date.js b/test/mjsunit/date.js
index a7f6cfa..3e153ab 100644
--- a/test/mjsunit/date.js
+++ b/test/mjsunit/date.js
@@ -157,7 +157,7 @@
 // Test that -0 is treated correctly in MakeDay.
 var d = new Date();
 assertDoesNotThrow("d.setDate(-0)");
-assertDoesNotThrow("new Date(-0, -0, -0, -0, -0, -0. -0)");
+assertDoesNotThrow("new Date(-0, -0, -0, -0, -0, -0, -0)");
 assertDoesNotThrow("new Date(0x40000000, 0x40000000, 0x40000000," +
                    "0x40000000, 0x40000000, 0x40000000, 0x40000000)")
 assertDoesNotThrow("new Date(-0x40000001, -0x40000001, -0x40000001," +
@@ -178,7 +178,7 @@
 assertTrue(isNaN(Date.UTC(-271821, 3, 19)));
 
 
-// Test creation of large date values.
+// Test creation with large date values.
 d = new Date(1969, 12, 1, 99999999999);
 assertTrue(isNaN(d.getTime()));
 d = new Date(1969, 12, 1, -99999999999);
@@ -187,6 +187,18 @@
 assertTrue(isNaN(d.getTime()));
 d = new Date(1969, 12, 1, -Infinity);
 assertTrue(isNaN(d.getTime()));
+d = new Date(1969, 12, 1, 0);
+d.setTime(Math.pow(2, 64));
+assertTrue(isNaN(d.getTime()));
+d = new Date(1969, 12, 1, 0);
+d.setTime(Math.pow(-2, 64));
+assertTrue(isNaN(d.getTime()));
+
+
+// Test creation with obscure date values.
+assertEquals(8640000000000000, Date.UTC(1970, 0, 1 + 100000001, -24));
+assertEquals(-8640000000000000, Date.UTC(1970, 0, 1 - 100000001, 24));
+
 
 // Parsing ES5 ISO-8601 dates.
 // When TZ is omitted, it defaults to 'Z' meaning UTC.
diff --git a/test/mjsunit/debug-break-inline.js b/test/mjsunit/debug-break-inline.js
new file mode 100644
index 0000000..4418fa8
--- /dev/null
+++ b/test/mjsunit/debug-break-inline.js
@@ -0,0 +1,100 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// This test tests that deoptimization due to debug breaks works for
+// inlined functions where the full-code is generated before the
+// debugger is attached.
+//
+//See http://code.google.com/p/chromium/issues/detail?id=105375
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug;
+
+var count = 0;
+var break_count = 0;
+
+// Debug event listener which sets a breakpoint first time it is hit
+// and otherwise counts break points hit and checks that the expected
+// state is reached.
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    break_count++;
+    if (break_count == 1) {
+      Debug.setBreakPoint(g, 3);
+
+      for (var i = 0; i < exec_state.frameCount(); i++) {
+        var frame = exec_state.frame(i);
+        // When function f is optimized (1 means YES, see runtime.cc) we
+        // expect an optimized frame for f and g.
+        if (%GetOptimizationStatus(f) == 1) {
+          if (i == 1) {
+            assertTrue(frame.isOptimizedFrame());
+            assertTrue(frame.isInlinedFrame());
+            assertEquals(4 - i, frame.inlinedFrameIndex());
+          } else if (i == 2) {
+            assertTrue(frame.isOptimizedFrame());
+            assertFalse(frame.isInlinedFrame());
+          } else {
+            assertFalse(frame.isOptimizedFrame());
+            assertFalse(frame.isInlinedFrame());
+          }
+        }
+      }
+    }
+  }
+}
+
+function f() {
+  g();
+}
+
+function g() {
+  count++;
+  h();
+  var b = 1;  // Break point is set here.
+}
+
+function h() {
+  debugger;
+}
+
+f();f();f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+f();
+
+assertEquals(5, count);
+assertEquals(2, break_count);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/test/mjsunit/debug-evaluate-locals-optimized-double.js b/test/mjsunit/debug-evaluate-locals-optimized-double.js
index 8447df5..cf25c0c 100644
--- a/test/mjsunit/debug-evaluate-locals-optimized-double.js
+++ b/test/mjsunit/debug-evaluate-locals-optimized-double.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --expose-gc --allow-natives-syntax --inline-construct
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug
 
@@ -34,6 +34,27 @@
 
 var testingConstructCall = false;
 
+var input = [
+  {a: 1, b: 2},
+  {a: 3, b: 4},
+  {a: 5, b: 6},
+  {a: 7, b: 8},
+  {a: 9, b: 10}
+];
+
+var expected = [
+  { locals: {a0: 1.01, b0: 2.02}, args: { names: ["i", "x0", "y0"], values: [0, 3.03, 4.04] } },
+  { locals: {a1: 3.03, b1: 4.04}, args: { names: ["i", "x1", "y1"], values: [1, 5.05, 6.06] } },
+  { locals: {a2: 5.05, b2: 6.06}, args: { names: ["i"], values: [2] } },
+  { locals: {a3: 7.07, b3: 8.08}, args: { names: ["i", "x3", "y3", "z3"],
+                                          values: [3, 9.09, 10.10, undefined] }
+  },
+  { locals: {a4: 9.09, b4: 10.10}, args: { names: ["i", "x4", "y4"], values: [4, 11.11, 12.12] } }
+];
+
+function arraySum(arr) {
+  return arr.reduce(function (a, b) { return a + b; }, 0);
+}
 
 function listener(event, exec_state, event_data, data) {
   try {
@@ -44,42 +65,63 @@
       for (var i = 0; i < exec_state.frameCount(); i++) {
         var frame = exec_state.frame(i);
         if (i < exec_state.frameCount() - 1) {
-          var expected_a = i * 2 + 1 + (i * 2 + 1) / 100;
-          var expected_b = i * 2 + 2 + (i * 2 + 2) / 100;
-          var expected_x = (i + 1) * 2 + 1 + ((i + 1) * 2 + 1) / 100;
-          var expected_y = (i + 1) * 2 + 2 + ((i + 1) * 2 + 2) / 100;
+          var expected_args = expected[i].args;
+          var expected_locals = expected[i].locals;
 
-          // All frames except the bottom one has normal variables a and b.
-          var a = ('a' === frame.localName(0)) ? 0 : 1;
-          var b = 1 - a;
-          assertEquals('a', frame.localName(a));
-          assertEquals('b', frame.localName(b));
-          assertEquals(expected_a, frame.localValue(a).value());
-          assertEquals(expected_b, frame.localValue(b).value());
+          // All frames except the bottom one have expected locals.
+          var locals = {};
+          for (var j = 0; j < frame.localCount(); j++) {
+            locals[frame.localName(j)] = frame.localValue(j).value();
+          }
+          assertPropertiesEqual(expected_locals, locals);
 
-          // All frames except the bottom one has arguments variables x and y.
-          assertEquals('x', frame.argumentName(0));
-          assertEquals('y', frame.argumentName(1));
-          assertEquals(expected_x, frame.argumentValue(0).value());
-          assertEquals(expected_y, frame.argumentValue(1).value());
+          // All frames except the bottom one have expected arguments.
+          for (var j = 0; j < expected_args.names.length; j++) {
+            assertEquals(expected_args.names[j], frame.argumentName(j));
+            assertEquals(expected_args.values[j], frame.argumentValue(j).value());
+          }
 
           // All frames except the bottom one have two scopes.
           assertEquals(2, frame.scopeCount());
           assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
           assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
-          assertEquals(expected_a, frame.scope(0).scopeObject().value()['a']);
-          assertEquals(expected_b, frame.scope(0).scopeObject().value()['b']);
-          assertEquals(expected_x, frame.scope(0).scopeObject().value()['x']);
-          assertEquals(expected_y, frame.scope(0).scopeObject().value()['y']);
+
+          Object.keys(expected_locals).forEach(function (name) {
+            assertEquals(expected_locals[name], frame.scope(0).scopeObject().value()[name]);
+          });
+
+          for (var j = 0; j < expected_args.names.length; j++) {
+            var arg_name = expected_args.names[j];
+            var arg_value = expected_args.values[j];
+            assertEquals(arg_value, frame.scope(0).scopeObject().value()[arg_name]);
+          }
 
           // Evaluate in the inlined frame.
-          assertEquals(expected_a, frame.evaluate('a').value());
-          assertEquals(expected_x, frame.evaluate('x').value());
-          assertEquals(expected_x, frame.evaluate('arguments[0]').value());
-          assertEquals(expected_a + expected_b + expected_x + expected_y,
-                       frame.evaluate('a + b + x + y').value());
-          assertEquals(expected_x + expected_y,
-                       frame.evaluate('arguments[0] + arguments[1]').value());
+          Object.keys(expected_locals).forEach(function (name) {
+            assertEquals(expected_locals[name], frame.evaluate(name).value());
+          });
+
+          for (var j = 0; j < expected_args.names.length; j++) {
+            var arg_name = expected_args.names[j];
+            var arg_value = expected_args.values[j];
+            assertEquals(arg_value, frame.evaluate(arg_name).value());
+            assertEquals(arg_value, frame.evaluate('arguments['+j+']').value());
+          }
+
+          var expected_args_sum = arraySum(expected_args.values);
+          var expected_locals_sum =
+              arraySum(Object.keys(expected_locals).
+                       map(function (k) { return expected_locals[k]; }));
+
+          assertEquals(expected_locals_sum + expected_args_sum,
+                       frame.evaluate(Object.keys(expected_locals).join('+') + ' + ' +
+                                      expected_args.names.join('+')).value());
+
+          var arguments_sum = expected_args.names.map(function(_, idx) {
+            return "arguments[" + idx + "]";
+          }).join('+');
+          assertEquals(expected_args_sum,
+                       frame.evaluate(arguments_sum).value());
         } else {
           // The bottom frame only have the global scope.
           assertEquals(1, frame.scopeCount());
@@ -98,7 +140,13 @@
         }
 
         // Check for construct call.
-        assertEquals(testingConstructCall && i == 4, frame.isConstructCall());
+        if (i == 4) {
+          assertEquals(testingConstructCall, frame.isConstructCall());
+        } else if (i == 2) {
+          assertTrue(frame.isConstructCall());
+        } else {
+          assertFalse(frame.isConstructCall());
+        }
 
         // When function f is optimized (1 means YES, see runtime.cc) we
         // expect an optimized frame for f with g1, g2 and g3 inlined.
@@ -121,65 +169,70 @@
       listenerComplete = true;
     }
   } catch (e) {
-    exception = e
+    exception = e.toString() + e.stack;
   };
 };
 
-f();f();f();
+for (var i = 0; i < 4; i++) f(input.length - 1, 11.11, 12.12);
 %OptimizeFunctionOnNextCall(f);
-f();
+f(input.length - 1, 11.11, 12.12);
 
 // Add the debug event listener.
 Debug.setListener(listener);
 
-function h(x, y) {
-  var a = 1;
-  var b = 2;
-  a = a + a / 100;
-  b = b + b / 100;
+function h(i, x0, y0) {
+  var a0 = input[i].a;
+  var b0 = input[i].b;
+  a0 = a0 + a0 / 100;
+  b0 = b0 + b0 / 100;
   debugger;  // Breakpoint.
 };
 
-function g3(x, y) {
-  var a = 3;
-  var b = 4;
-  a = a + a / 100;
-  b = b + b / 100;
-  h(a, b);
-  return a+b;
+function g3(i, x1, y1) {
+  var a1 = input[i].a;
+  var b1 = input[i].b;
+  a1 = a1 + a1 / 100;
+  b1 = b1 + b1 / 100;
+  h(i - 1, a1, b1);
+  return a1+b1;
 };
 
-function g2(x, y) {
-  var a = 5;
-  var b = 6;
-  a = a + a / 100;
-  b = b + b / 100;
-  g3(a, b);
+function g2(i) {
+  var a2 = input[i].a;
+  var b2 = input[i].b;
+  a2 = a2 + a2 / 100;
+  b2 = b2 + b2 / 100;
+  g3(i - 1, a2, b2);
 };
 
-function g1(x, y) {
-  var a = 7;
-  var b = 8;
-  a = a + a / 100;
-  b = b + b / 100;
-  g2(a, b);
+function g1(i, x3, y3, z3) {
+  var a3 = input[i].a;
+  var b3 = input[i].b;
+  a3 = a3 + a3 / 100;
+  b3 = b3 + b3 / 100;
+  new g2(i - 1, a3, b3);
 };
 
-function f(x, y) {
-  var a = 9;
-  var b = 10;
-  a = a + a / 100;
-  b = b + b / 100;
-  g1(a, b);
+function f(i, x4, y4) {
+  var a4 = input[i].a;
+  var b4 = input[i].b;
+  a4 = a4 + a4 / 100;
+  b4 = b4 + b4 / 100;
+  g1(i - 1, a4, b4);
 };
 
 // Test calling f normally and as a constructor.
-f(11.11, 12.12);
+f(input.length - 1, 11.11, 12.12);
+f(input.length - 1, 11.11, 12.12, "");
 testingConstructCall = true;
-new f(11.11, 12.12);
+new f(input.length - 1, 11.11, 12.12);
+new f(input.length - 1, 11.11, 12.12, "");
 
-// Make sure that the debug event listener vas invoked.
+// Make sure that the debug event listener was invoked.
 assertFalse(exception, "exception in listener " + exception)
 assertTrue(listenerComplete);
 
+//Throw away type information for next run.
+gc();
+
 Debug.setListener(null);
diff --git a/test/mjsunit/debug-evaluate-locals-optimized.js b/test/mjsunit/debug-evaluate-locals-optimized.js
index c3cd5eb..c88a683 100644
--- a/test/mjsunit/debug-evaluate-locals-optimized.js
+++ b/test/mjsunit/debug-evaluate-locals-optimized.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --expose-gc --allow-natives-syntax --inline-construct
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug
 
@@ -34,6 +34,17 @@
 
 var testingConstructCall = false;
 
+var expected = [
+  { locals: {a0: 1, b0: 2}, args: { names: ["i", "x0", "y0"], values: [0, 3, 4] } },
+  { locals: {a1: 3, b1: 4}, args: { names: ["i", "x1", "y1"], values: [1, 5, 6] } },
+  { locals: {a2: 5, b2: 6}, args: { names: ["i"], values: [2] } },
+  { locals: {a3: 7, b3: 8}, args: { names: ["i", "x3", "y3", "z3"], values: [3, 9, 10, undefined] } },
+  { locals: {a4: 9, b4: 10}, args: { names: ["i", "x4", "y4"], values: [4, 11, 12] } }
+];
+
+function arraySum(arr) {
+  return arr.reduce(function (a, b) { return a + b; }, 0);
+}
 
 function listener(event, exec_state, event_data, data) {
   try {
@@ -44,42 +55,63 @@
       for (var i = 0; i < exec_state.frameCount(); i++) {
         var frame = exec_state.frame(i);
         if (i < exec_state.frameCount() - 1) {
-          var expected_a = i * 2 + 1;
-          var expected_b = i * 2 + 2;
-          var expected_x = (i + 1) * 2 + 1;
-          var expected_y = (i + 1) * 2 + 2;
+          var expected_args = expected[i].args;
+          var expected_locals = expected[i].locals;
 
-          // All frames except the bottom one has normal variables a and b.
-          var a = ('a' === frame.localName(0)) ? 0 : 1;
-          var b = 1 - a;
-          assertEquals('a', frame.localName(a));
-          assertEquals('b', frame.localName(b));
-          assertEquals(expected_a, frame.localValue(a).value());
-          assertEquals(expected_b, frame.localValue(b).value());
+          // All frames except the bottom one have expected locals.
+          var locals = {};
+          for (var j = 0; j < frame.localCount(); j++) {
+            locals[frame.localName(j)] = frame.localValue(j).value();
+          }
+          assertPropertiesEqual(expected_locals, locals);
 
-          // All frames except the bottom one has arguments variables x and y.
-          assertEquals('x', frame.argumentName(0));
-          assertEquals('y', frame.argumentName(1));
-          assertEquals(expected_x, frame.argumentValue(0).value());
-          assertEquals(expected_y, frame.argumentValue(1).value());
+          // All frames except the bottom one have expected arguments.
+          for (var j = 0; j < expected_args.names.length; j++) {
+            assertEquals(expected_args.names[j], frame.argumentName(j));
+            assertEquals(expected_args.values[j], frame.argumentValue(j).value());
+          }
 
           // All frames except the bottom one have two scopes.
           assertEquals(2, frame.scopeCount());
           assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
           assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
-          assertEquals(expected_a, frame.scope(0).scopeObject().value()['a']);
-          assertEquals(expected_b, frame.scope(0).scopeObject().value()['b']);
-          assertEquals(expected_x, frame.scope(0).scopeObject().value()['x']);
-          assertEquals(expected_y, frame.scope(0).scopeObject().value()['y']);
+
+          Object.keys(expected_locals).forEach(function (name) {
+            assertEquals(expected_locals[name], frame.scope(0).scopeObject().value()[name]);
+          });
+
+          for (var j = 0; j < expected_args.names.length; j++) {
+            var arg_name = expected_args.names[j];
+            var arg_value = expected_args.values[j];
+            assertEquals(arg_value, frame.scope(0).scopeObject().value()[arg_name]);
+          }
 
           // Evaluate in the inlined frame.
-          assertEquals(expected_a, frame.evaluate('a').value());
-          assertEquals(expected_x, frame.evaluate('x').value());
-          assertEquals(expected_x, frame.evaluate('arguments[0]').value());
-          assertEquals(expected_a + expected_b + expected_x + expected_y,
-                       frame.evaluate('a + b + x + y').value());
-          assertEquals(expected_x + expected_y,
-                       frame.evaluate('arguments[0] + arguments[1]').value());
+          Object.keys(expected_locals).forEach(function (name) {
+            assertEquals(expected_locals[name], frame.evaluate(name).value());
+          });
+
+          for (var j = 0; j < expected_args.names.length; j++) {
+            var arg_name = expected_args.names[j];
+            var arg_value = expected_args.values[j];
+            assertEquals(arg_value, frame.evaluate(arg_name).value());
+            assertEquals(arg_value, frame.evaluate('arguments['+j+']').value());
+          }
+
+          var expected_args_sum = arraySum(expected_args.values);
+          var expected_locals_sum =
+              arraySum(Object.keys(expected_locals).
+                       map(function (k) { return expected_locals[k]; }));
+
+          assertEquals(expected_locals_sum + expected_args_sum,
+                       frame.evaluate(Object.keys(expected_locals).join('+') + ' + ' +
+                                      expected_args.names.join('+')).value());
+
+          var arguments_sum = expected_args.names.map(function(_, idx) {
+            return "arguments[" + idx + "]";
+          }).join('+');
+          assertEquals(expected_args_sum,
+                       frame.evaluate(arguments_sum).value());
         } else {
           // The bottom frame only have the global scope.
           assertEquals(1, frame.scopeCount());
@@ -98,7 +130,13 @@
         }
 
         // Check for construct call.
-        assertEquals(testingConstructCall && i == 4, frame.isConstructCall());
+        if (i == 4) {
+          assertEquals(testingConstructCall, frame.isConstructCall());
+        } else if (i == 2) {
+          assertTrue(frame.isConstructCall());
+        } else {
+          assertFalse(frame.isConstructCall());
+        }
 
         // When function f is optimized (1 means YES, see runtime.cc) we
         // expect an optimized frame for f with g1, g2 and g3 inlined.
@@ -121,54 +159,59 @@
       listenerComplete = true;
     }
   } catch (e) {
-    exception = e.stack;
+    exception = e.toString() + e.stack;
   };
 };
 
-f();f();f();
+for (var i = 0; i < 4; i++) f(expected.length - 1, 11, 12);
 %OptimizeFunctionOnNextCall(f);
-f();
+f(expected.length - 1, 11, 12);
 
 // Add the debug event listener.
 Debug.setListener(listener);
 
-function h(x, y) {
-  var a = 1;
-  var b = 2;
+function h(i, x0, y0) {
+  var a0 = expected[i].locals.a0;
+  var b0 = expected[i].locals.b0;
   debugger;  // Breakpoint.
-};
+}
 
-function g3(x, y) {
-  var a = 3;
-  var b = 4;
-  h(a, b);
-};
+function g3(i, x1, y1) {
+  var a1 = expected[i].locals.a1;
+  var b1 = expected[i].locals.b1;
+  h(i - 1, a1, b1);
+}
 
-function g2(x, y) {
-  var a = 5;
-  var b = 6;
-  g3(a, b);
-};
+function g2(i) {
+  var a2 = expected[i].locals.a2;
+  var b2 = expected[i].locals.b2;
+  g3(i - 1, a2, b2);
+}
 
-function g1(x, y) {
-  var a = 7;
-  var b = 8;
-  g2(a, b);
-};
+function g1(i, x3, y3, z3) {
+  var a3 = expected[i].locals.a3;
+  var b3 = expected[i].locals.b3;
+  new g2(i - 1, a3, b3);
+}
 
-function f(x, y) {
-  var a = 9;
-  var b = 10;
-  g1(a, b);
-};
+function f(i, x4, y4) {
+  var a4 = expected[i].locals.a4;
+  var b4 = expected[i].locals.b4;
+  g1(i - 1, a4, b4);
+}
 
 // Test calling f normally and as a constructor.
-f(11, 12);
+f(expected.length - 1, 11, 12);
+f(expected.length - 1, 11, 12, 0);
 testingConstructCall = true;
-new f(11, 12);
+new f(expected.length - 1, 11, 12);
+new f(expected.length - 1, 11, 12, 0);
 
-// Make sure that the debug event listener vas invoked.
+// Make sure that the debug event listener was invoked.
 assertFalse(exception, "exception in listener " + exception)
 assertTrue(listenerComplete);
 
+// Throw away type information for next run.
+gc();
+
 Debug.setListener(null);
diff --git a/test/mjsunit/debug-scopes.js b/test/mjsunit/debug-scopes.js
index 1c23b0b..942bd2b 100644
--- a/test/mjsunit/debug-scopes.js
+++ b/test/mjsunit/debug-scopes.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --allow-natives-syntax
 // The functions used for testing backtraces. They are at the top to make the
 // testing of source line/column easier.
 
-
 // Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug;
+var Debug = debug.Debug;
 
 var test_name;
 var listener_delegate;
@@ -439,6 +438,26 @@
 EndTest();
 
 
+// With block in function that is marked for optimization while being executed.
+BeginTest("With 7");
+
+function with_7() {
+  with({}) {
+    %OptimizeFunctionOnNextCall(with_7);
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+};
+with_7();
+EndTest();
+
+
 // Simple closure formed by returning an inner function referering the outer
 // functions arguments.
 BeginTest("Closure 1");
@@ -950,6 +969,28 @@
 EndTest();
 
 
+// Catch block in function that is marked for optimization while being executed.
+BeginTest("Catch block 7");
+function catch_block_7() {
+  %OptimizeFunctionOnNextCall(catch_block_7);
+  try {
+    throw 'Exception';
+  } catch (e) {
+    debugger;
+  }
+};
+
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Catch,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({e:'Exception'}, 0, exec_state);
+};
+catch_block_7();
+EndTest();
+
+
 assertEquals(begin_test_count, break_count,
              'one or more tests did not enter the debugger');
 assertEquals(begin_test_count, end_test_count,
diff --git a/test/mjsunit/debug-set-script-source.js b/test/mjsunit/debug-set-script-source.js
new file mode 100644
index 0000000..34ae848
--- /dev/null
+++ b/test/mjsunit/debug-set-script-source.js
@@ -0,0 +1,64 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var script_number = 0;
+var script_names = [];
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.BeforeCompile) {
+    event_data.script().setSource(event_data.script().source() +
+        " //@ sourceURL=proper_location_" + (++script_number));
+  } else if (event == Debug.DebugEvent.AfterCompile) {
+    try {
+      event_data.script().setSource("a=1 //@ sourceURL=wrong_location");
+    } catch(e) {
+      exception = e;
+    }
+    script_names.push(event_data.script().name());
+  }
+};
+
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Compile different sources.
+eval('a=1');
+eval('(function(){})');
+
+assertEquals(2, script_names.length);
+assertEquals("proper_location_1", script_names[0]);
+assertEquals("proper_location_2", script_names[1]);
+
+assertEquals("illegal access", exception);
+
+Debug.setListener(null);
diff --git a/test/mjsunit/debug-step-3.js b/test/mjsunit/debug-step-3.js
new file mode 100644
index 0000000..9cac0f5
--- /dev/null
+++ b/test/mjsunit/debug-step-3.js
@@ -0,0 +1,94 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// This test tests that full code compiled without debug break slots
+// is recompiled with debug break slots when debugging is started.
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var bp;
+var done = false;
+var step_count = 0;
+var set_bp = false
+
+// Debug event listener which steps until the global variable done is true.
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    if (!done) exec_state.prepareStep(Debug.StepAction.StepNext);
+    step_count++;
+  }
+};
+
+// Set the global variables state to prpare the stepping test.
+function prepare_step_test() {
+  done = false;
+  step_count = 0;
+}
+
+// Test function to step through.
+function f() {
+  var a = 0;
+  if (set_bp) { bp = Debug.setBreakPoint(f, 3); }
+  var i = 1;
+  var j = 2;
+  done = true;
+};
+
+prepare_step_test();
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Make f set a breakpoint with an activation on the stack.
+prepare_step_test();
+set_bp = true;
+f();
+// TODO(1782): Fix issue to bring back this assert.
+//assertEquals(4, step_count);
+Debug.clearBreakPoint(bp);
+
+// Set a breakpoint on the first var statement (line 1).
+set_bp = false;
+bp = Debug.setBreakPoint(f, 3);
+
+// Step through the function ensuring that the var statements are hit as well.
+prepare_step_test();
+f();
+assertEquals(4, step_count);
+
+// Clear the breakpoint and check that no stepping happens.
+Debug.clearBreakPoint(bp);
+prepare_step_test();
+f();
+assertEquals(0, step_count);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/test/mjsunit/debug-stepin-accessor.js b/test/mjsunit/debug-stepin-accessor.js
index 2c9c8c3..70acd5e 100644
--- a/test/mjsunit/debug-stepin-accessor.js
+++ b/test/mjsunit/debug-stepin-accessor.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -112,8 +112,8 @@
 function testGetter1_3() {
   expected_function_name = 'getter1';
   expected_source_line_text = '    return this.name;  // getter 1';
-  debugger;
   for (var i = 1; i < 2; i++) {
+    debugger;
     var x = c['getter' + i];
   }
 }
diff --git a/test/mjsunit/debug-stepin-function-call.js b/test/mjsunit/debug-stepin-function-call.js
index 385fcb2..3b5240c 100644
--- a/test/mjsunit/debug-stepin-function-call.js
+++ b/test/mjsunit/debug-stepin-function-call.js
@@ -135,8 +135,15 @@
   var yetAnotherLocal = 10;
 }
 
+// Test step into bound function.
+function bind1() {
+  var bound = g.bind(null, 3);
+  debugger;
+  bound();
+}
+
 var testFunctions =
-    [call1, call2, call3, call4, apply1, apply2, apply3, apply4];
+    [call1, call2, call3, call4, apply1, apply2, apply3, apply4, bind1];
 
 for (var i = 0; i < testFunctions.length; i++) {
   state = 0;
@@ -145,5 +152,13 @@
   assertEquals(3, state);
 }
 
+// Test global bound function.
+state = 0;
+var globalBound = g.bind(null, 3);
+debugger;
+globalBound();
+assertNull(exception);
+assertEquals(3, state);
+
 // Get rid of the debug event listener.
 Debug.setListener(null);
\ No newline at end of file
diff --git a/test/mjsunit/debug-stepout-scope.js b/test/mjsunit/debug-stepout-scope.js
new file mode 100644
index 0000000..9c040da
--- /dev/null
+++ b/test/mjsunit/debug-stepout-scope.js
@@ -0,0 +1,423 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --expose-natives-as=builtins
+
+// Check that the ScopeIterator can properly recreate the scope at
+// every point when stepping through functions.
+
+var Debug = debug.Debug;
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    // Access scope details.
+    var scope_count = exec_state.frame().scopeCount();
+    for (var i = 0; i < scope_count; i++) {
+      var scope = exec_state.frame().scope(i);
+      // assertTrue(scope.isScope());
+      scope.scopeType();
+      scope.scopeObject();
+    }
+
+    // Do steps until we reach the global scope again.
+    if (true) {
+      exec_state.prepareStep(Debug.StepAction.StepInMin, 1);
+    }
+  }
+}
+
+Debug.setListener(listener);
+
+
+function test1() {
+  debugger;
+  with ({x:1}) {
+    x = 2;
+  }
+}
+test1();
+
+
+function test2() {
+  if (true) {
+    with ({}) {
+      debugger;
+    }
+  } else {
+    with ({}) {
+      return 10;
+    }
+  }
+}
+test2();
+
+
+function test3() {
+  if (true) {
+    debugger;
+  } else {
+    with ({}) {
+      return 10;
+    }
+  }
+}
+test3();
+
+
+function test4() {
+  debugger;
+  with ({x:1}) x = 1
+}
+test4();
+
+
+function test5() {
+  debugger;
+  var dummy = 1;
+  with ({}) {
+    with ({}) {
+      dummy = 2;
+    }
+  }
+  dummy = 3;
+}
+test5();
+
+
+function test6() {
+  debugger;
+  try {
+    throw 'stuff';
+  } catch (e) {
+    e = 1;
+  }
+}
+test6();
+
+
+function test7() {
+  debugger;
+  function foo() {}
+}
+test7();
+
+
+function test8() {
+  debugger;
+  (function foo() {})();
+}
+test8();
+
+
+var q = 42;
+var prefixes = [ "debugger; ",
+                 "if (false) { try { throw 0; } catch(x) { return x; } }; debugger; " ];
+var bodies = [ "1",
+               "1 ",
+               "1;",
+               "1; ",
+               "q",
+               "q ",
+               "q;",
+               "q; ",
+               "try { throw 'stuff' } catch (e) { e = 1; }",
+               "try { throw 'stuff' } catch (e) { e = 1; } ",
+               "try { throw 'stuff' } catch (e) { e = 1; };",
+               "try { throw 'stuff' } catch (e) { e = 1; }; " ];
+var with_bodies = [ "with ({}) {}",
+                    "with ({x:1}) x",
+                    "with ({x:1}) x = 1",
+                    "with ({x:1}) x ",
+                    "with ({x:1}) x = 1 ",
+                    "with ({x:1}) x;",
+                    "with ({x:1}) x = 1;",
+                    "with ({x:1}) x; ",
+                    "with ({x:1}) x = 1; " ];
+
+
+function test9() {
+  debugger;
+  for (var i = 0; i < prefixes.length; ++i) {
+    var pre = prefixes[i];
+    for (var j = 0; j < bodies.length; ++j) {
+      var body = bodies[j];
+      eval(pre + body);
+      eval("'use strict'; " + pre + body);
+    }
+    for (var j = 0; j < with_bodies.length; ++j) {
+      var body = with_bodies[j];
+      eval(pre + body);
+    }
+  }
+}
+test9();
+
+
+function test10() {
+  debugger;
+  with ({}) {
+    return 10;
+  }
+}
+test10();
+
+
+function test11() {
+  debugger;
+  try {
+    throw 'stuff';
+  } catch (e) {
+    return 10;
+  }
+}
+test11();
+
+
+// Test global eval and function constructor.
+for (var i = 0; i < prefixes.length; ++i) {
+  var pre = prefixes[i];
+  for (var j = 0; j < bodies.length; ++j) {
+    var body = bodies[j];
+    eval(pre + body);
+    eval("'use strict'; " + pre + body);
+    Function(pre + body)();
+  }
+  for (var j = 0; j < with_bodies.length; ++j) {
+    var body = with_bodies[j];
+    eval(pre + body);
+    Function(pre + body)();
+  }
+}
+
+
+try {
+  with({}) {
+    debugger;
+    eval("{}$%:^");
+  }
+} catch(e) {
+  nop();
+}
+
+// Return from function constructed with Function constructor.
+var anon = 12;
+for (var i = 0; i < prefixes.length; ++i) {
+  var pre = prefixes[i];
+  Function(pre + "return 42")();
+  Function(pre + "return 42 ")();
+  Function(pre + "return 42;")();
+  Function(pre + "return 42; ")();
+  Function(pre + "return anon")();
+  Function(pre + "return anon ")();
+  Function(pre + "return anon;")();
+  Function(pre + "return anon; ")();
+}
+
+
+function nop() {}
+
+
+function stress() {
+  debugger;
+
+  L: with ({x:12}) {
+    break L;
+  }
+
+
+  with ({x: 'outer'}) {
+    label: {
+      with ({x: 'inner'}) {
+        break label;
+      }
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    label: {
+      with ({x: 'inner'}) {
+        break label;
+      }
+    }
+    nop();
+  }
+
+
+  with ({x: 'outer'}) {
+    label: {
+      with ({x: 'middle'}) {
+        with ({x: 'inner'}) {
+          break label;
+        }
+      }
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    label: {
+      with ({x: 'middle'}) {
+        with ({x: 'inner'}) {
+          break label;
+        }
+      }
+    }
+    nop();
+  }
+
+
+  with ({x: 'outer'}) {
+    for (var i = 0; i < 3; ++i) {
+      with ({x: 'inner' + i}) {
+        continue;
+      }
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    label: for (var i = 0; i < 3; ++i) {
+      with ({x: 'middle' + i}) {
+        for (var j = 0; j < 3; ++j) {
+          with ({x: 'inner' + j}) {
+            continue label;
+          }
+        }
+      }
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    try {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    } catch (e) {
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    try {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    } catch (e) {
+      nop();
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    try {
+      with ({x: 'middle'}) {
+        with ({x: 'inner'}) {
+          throw 0;
+        }
+      }
+    } catch (e) {
+    }
+  }
+
+
+  try {
+    with ({x: 'outer'}) {
+      try {
+        with ({x: 'inner'}) {
+          throw 0;
+        }
+      } finally {
+      }
+    }
+  } catch (e) {
+  }
+
+
+  try {
+    with ({x: 'outer'}) {
+      try {
+        with ({x: 'inner'}) {
+          throw 0;
+        }
+      } finally {
+        nop();
+      }
+    }
+  } catch (e) {
+  }
+
+
+  function stress1() {
+    with ({x:12}) {
+      return x;
+    }
+  }
+  stress1();
+
+
+  function stress2() {
+    with ({x: 'outer'}) {
+      with ({x: 'inner'}) {
+        return x;
+      }
+    }
+  }
+  stress2();
+
+  function stress3() {
+    try {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    } catch (e) {
+      return e;
+    }
+  }
+  stress3();
+
+
+  function stress4() {
+    try {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    } catch (e) {
+      with ({x: 'inner'}) {
+        return e;
+      }
+    }
+  }
+  stress4();
+
+}
+stress();
+
+
+// With block as the last(!) statement in global code.
+with ({}) { debugger; }
\ No newline at end of file
diff --git a/test/mjsunit/element-kind.js b/test/mjsunit/element-kind.js
deleted file mode 100644
index 48a029f..0000000
--- a/test/mjsunit/element-kind.js
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-// Test element kind of objects
-
-var element_kind = {
-  fast_elements                     :  1,
-  fast_double_elements              :  2,
-  dictionary_elements               :  3,
-  external_byte_elements            :  4,
-  external_unsigned_byte_elements   :  5,
-  external_short_elements           :  6,
-  external_unsigned_short_elements  :  7,
-  external_int_elements             :  8,
-  external_unsigned_int_elements    :  9,
-  external_float_elements           : 10,
-  external_double_elements          : 11,
-  external_pixel_elements           : 12
-}
-
-// We expect an object to only be of one element kind.
-function assertKind(expected, obj){
-  assertEquals(expected == element_kind.fast_elements,
-               %HasFastElements(obj));
-  assertEquals(expected == element_kind.fast_double_elements,
-               %HasFastDoubleElements(obj));
-  assertEquals(expected == element_kind.dictionary_elements,
-               %HasDictionaryElements(obj));
-  assertEquals(expected == element_kind.external_byte_elements,
-               %HasExternalByteElements(obj));
-  assertEquals(expected == element_kind.external_unsigned_byte_elements,
-               %HasExternalUnsignedByteElements(obj));
-  assertEquals(expected == element_kind.external_short_elements,
-               %HasExternalShortElements(obj));
-  assertEquals(expected == element_kind.external_unsigned_short_elements,
-               %HasExternalUnsignedShortElements(obj));
-  assertEquals(expected == element_kind.external_int_elements,
-               %HasExternalIntElements(obj));
-  assertEquals(expected == element_kind.external_unsigned_int_elements,
-               %HasExternalUnsignedIntElements(obj));
-  assertEquals(expected == element_kind.external_float_elements,
-               %HasExternalFloatElements(obj));
-  assertEquals(expected == element_kind.external_double_elements,
-               %HasExternalDoubleElements(obj));
-  assertEquals(expected == element_kind.external_pixel_elements,
-               %HasExternalPixelElements(obj));
-  // every external kind is also an external array
-  assertEquals(expected >= element_kind.external_byte_elements,
-               %HasExternalArrayElements(obj));
-}
-
-var me = {};
-assertKind(element_kind.fast_elements, me);
-me.dance = 0xD15C0;
-me.drink = 0xC0C0A;
-assertKind(element_kind.fast_elements, me);
-
-var you = new Array();
-for(i = 0; i < 1337; i++) {
-  you[i] = i;
-}
-assertKind(element_kind.fast_elements, you);
-
-assertKind(element_kind.dictionary_elements, new Array(0xC0C0A));
-
-// fast_double_elements not yet available
-
-
-assertKind(element_kind.external_byte_elements,           new Int8Array(9001));
-assertKind(element_kind.external_unsigned_byte_elements,  new Uint8Array(007));
-assertKind(element_kind.external_short_elements,          new Int16Array(666));
-assertKind(element_kind.external_unsigned_short_elements, new Uint16Array(42));
-assertKind(element_kind.external_int_elements,            new Int32Array(0xF));
-assertKind(element_kind.external_unsigned_int_elements,   new Uint32Array(23));
-assertKind(element_kind.external_float_elements,          new Float32Array(7));
-assertKind(element_kind.external_double_elements,         new Float64Array(0));
-assertKind(element_kind.external_pixel_elements,          new PixelArray(512));
diff --git a/src/extensions/experimental/i18n-extension.h b/test/mjsunit/elements-kind-depends.js
similarity index 68%
rename from src/extensions/experimental/i18n-extension.h
rename to test/mjsunit/elements-kind-depends.js
index 5401f25..82f188b 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/test/mjsunit/elements-kind-depends.js
@@ -25,30 +25,50 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+// Flags: --allow-natives-syntax --smi-only-arrays
 
-#include "include/v8.h"
+function burn() {
+  var a = new Array(3);
+  a[0] = 10;
+  a[1] = 15.5;
+  a[2] = 20;
+  return a;
+}
 
-namespace v8 {
-namespace internal {
+function check(a) {
+  assertEquals(10, a[0]);
+  assertEquals(15.5, a[1]);
+  assertEquals(20, a[2]);
+}
+
+var b;
+for (var i = 0; i < 3; ++i) {
+  b = burn();
+  check(b);  // all OK
+}
+%OptimizeFunctionOnNextCall(burn);
+b = burn();
+check(b);  // fails
 
 
-class I18NExtension : public v8::Extension {
- public:
-  I18NExtension();
+function loop_test(x) {
+  for (i=0;i<3;i++) {
+    x[i] = (i+1) * 0.5;
+  }
+}
 
-  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
-      v8::Handle<v8::String> name);
+function check2(b) {
+  assertEquals(0.5, b[0]);
+  assertEquals(1.0, b[1]);
+  assertEquals(1.5, b[2]);
+}
 
-  // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
-  static void Register();
-  static I18NExtension* get();
-
- private:
-  static I18NExtension* extension_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+for (var i = 0; i < 3; ++i) {
+  b = [0,1,2];
+  loop_test(b);
+  check2(b);
+}
+%OptimizeFunctionOnNextCall(loop_test);
+b = [0,1,2];
+loop_test(b);
+check2(b);
diff --git a/test/mjsunit/elements-kind.js b/test/mjsunit/elements-kind.js
new file mode 100644
index 0000000..4aa79de
--- /dev/null
+++ b/test/mjsunit/elements-kind.js
@@ -0,0 +1,346 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+
+// Test element kind of objects.
+// Since --smi-only-arrays affects builtins, its default setting at compile
+// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
+// by default, only a no-snapshot build actually has smi-only arrays enabled
+// in this test case.  Depending on whether smi-only arrays are actually
+// enabled, this test takes the appropriate code path to check smi-only arrays.
+
+support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6,7,8));
+
+if (support_smi_only_arrays) {
+  print("Tests include smi-only arrays.");
+} else {
+  print("Tests do NOT include smi-only arrays.");
+}
+
+var elements_kind = {
+  fast_smi_only            :  'fast smi only elements',
+  fast                     :  'fast elements',
+  fast_double              :  'fast double elements',
+  dictionary               :  'dictionary elements',
+  external_byte            :  'external byte elements',
+  external_unsigned_byte   :  'external unsigned byte elements',
+  external_short           :  'external short elements',
+  external_unsigned_short  :  'external unsigned short elements',
+  external_int             :  'external int elements',
+  external_unsigned_int    :  'external unsigned int elements',
+  external_float           :  'external float elements',
+  external_double          :  'external double elements',
+  external_pixel           :  'external pixel elements'
+}
+
+function getKind(obj) {
+  if (%HasFastSmiOnlyElements(obj)) return elements_kind.fast_smi_only;
+  if (%HasFastElements(obj)) return elements_kind.fast;
+  if (%HasFastDoubleElements(obj)) return elements_kind.fast_double;
+  if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
+  // Every external kind is also an external array.
+  assertTrue(%HasExternalArrayElements(obj));
+  if (%HasExternalByteElements(obj)) {
+    return elements_kind.external_byte;
+  }
+  if (%HasExternalUnsignedByteElements(obj)) {
+    return elements_kind.external_unsigned_byte;
+  }
+  if (%HasExternalShortElements(obj)) {
+    return elements_kind.external_short;
+  }
+  if (%HasExternalUnsignedShortElements(obj)) {
+    return elements_kind.external_unsigned_short;
+  }
+  if (%HasExternalIntElements(obj)) {
+    return elements_kind.external_int;
+  }
+  if (%HasExternalUnsignedIntElements(obj)) {
+    return elements_kind.external_unsigned_int;
+  }
+  if (%HasExternalFloatElements(obj)) {
+    return elements_kind.external_float;
+  }
+  if (%HasExternalDoubleElements(obj)) {
+    return elements_kind.external_double;
+  }
+  if (%HasExternalPixelElements(obj)) {
+    return elements_kind.external_pixel;
+  }
+}
+
+function assertKind(expected, obj, name_opt) {
+  if (!support_smi_only_arrays &&
+      expected == elements_kind.fast_smi_only) {
+    expected = elements_kind.fast;
+  }
+  assertEquals(expected, getKind(obj), name_opt);
+}
+
+var me = {};
+assertKind(elements_kind.fast, me);
+me.dance = 0xD15C0;
+me.drink = 0xC0C0A;
+assertKind(elements_kind.fast, me);
+
+if (support_smi_only_arrays) {
+  var too = [1,2,3];
+  assertKind(elements_kind.fast_smi_only, too);
+  too.dance = 0xD15C0;
+  too.drink = 0xC0C0A;
+  assertKind(elements_kind.fast_smi_only, too);
+}
+
+// Make sure the element kind transitions from smionly when a non-smi is stored.
+var you = new Array();
+assertKind(elements_kind.fast_smi_only, you);
+for (var i = 0; i < 1337; i++) {
+  var val = i;
+  if (i == 1336) {
+    assertKind(elements_kind.fast_smi_only, you);
+    val = new Object();
+  }
+  you[i] = val;
+}
+assertKind(elements_kind.fast, you);
+
+assertKind(elements_kind.dictionary, new Array(0xDECAF));
+
+var fast_double_array = new Array(0xDECAF);
+for (var i = 0; i < 0xDECAF; i++) fast_double_array[i] = i / 2;
+assertKind(elements_kind.fast_double, fast_double_array);
+
+assertKind(elements_kind.external_byte,           new Int8Array(9001));
+assertKind(elements_kind.external_unsigned_byte,  new Uint8Array(007));
+assertKind(elements_kind.external_short,          new Int16Array(666));
+assertKind(elements_kind.external_unsigned_short, new Uint16Array(42));
+assertKind(elements_kind.external_int,            new Int32Array(0xF));
+assertKind(elements_kind.external_unsigned_int,   new Uint32Array(23));
+assertKind(elements_kind.external_float,          new Float32Array(7));
+assertKind(elements_kind.external_double,         new Float64Array(0));
+assertKind(elements_kind.external_pixel,          new PixelArray(512));
+
+// Crankshaft support for smi-only array elements.
+function monomorphic(array) {
+  assertKind(elements_kind.fast_smi_only, array);
+  for (var i = 0; i < 3; i++) {
+    array[i] = i + 10;
+  }
+  assertKind(elements_kind.fast_smi_only, array);
+  for (var i = 0; i < 3; i++) {
+    var a = array[i];
+    assertEquals(i + 10, a);
+  }
+}
+var smi_only = new Array(1, 2, 3);
+assertKind(elements_kind.fast_smi_only, smi_only);
+for (var i = 0; i < 3; i++) monomorphic(smi_only);
+%OptimizeFunctionOnNextCall(monomorphic);
+monomorphic(smi_only);
+
+if (support_smi_only_arrays) {
+  function construct_smis() {
+    var a = [0, 0, 0];
+    a[0] = 0;  // Send the COW array map to the steak house.
+    assertKind(elements_kind.fast_smi_only, a);
+    return a;
+  }
+  function construct_doubles() {
+    var a = construct_smis();
+    a[0] = 1.5;
+    assertKind(elements_kind.fast_double, a);
+    return a;
+  }
+  function construct_objects() {
+    var a = construct_smis();
+    a[0] = "one";
+    assertKind(elements_kind.fast, a);
+    return a;
+  }
+
+  // Test crankshafted transition SMI->DOUBLE.
+  function convert_to_double(array) {
+    array[1] = 2.5;
+    assertKind(elements_kind.fast_double, array);
+    assertEquals(2.5, array[1]);
+  }
+  var smis = construct_smis();
+  for (var i = 0; i < 3; i++) convert_to_double(smis);
+  %OptimizeFunctionOnNextCall(convert_to_double);
+  smis = construct_smis();
+  convert_to_double(smis);
+  // Test crankshafted transitions SMI->FAST and DOUBLE->FAST.
+  function convert_to_fast(array) {
+    array[1] = "two";
+    assertKind(elements_kind.fast, array);
+    assertEquals("two", array[1]);
+  }
+  smis = construct_smis();
+  for (var i = 0; i < 3; i++) convert_to_fast(smis);
+  var doubles = construct_doubles();
+  for (var i = 0; i < 3; i++) convert_to_fast(doubles);
+  smis = construct_smis();
+  doubles = construct_doubles();
+  %OptimizeFunctionOnNextCall(convert_to_fast);
+  convert_to_fast(smis);
+  convert_to_fast(doubles);
+  // Test transition chain SMI->DOUBLE->FAST (crankshafted function will
+  // transition to FAST directly).
+  function convert_mixed(array, value, kind) {
+    array[1] = value;
+    assertKind(kind, array);
+    assertEquals(value, array[1]);
+  }
+  smis = construct_smis();
+  for (var i = 0; i < 3; i++) {
+    convert_mixed(smis, 1.5, elements_kind.fast_double);
+  }
+  doubles = construct_doubles();
+  for (var i = 0; i < 3; i++) {
+    convert_mixed(doubles, "three", elements_kind.fast);
+  }
+  smis = construct_smis();
+  doubles = construct_doubles();
+  %OptimizeFunctionOnNextCall(convert_mixed);
+  convert_mixed(smis, 1, elements_kind.fast);
+  convert_mixed(doubles, 1, elements_kind.fast);
+  assertTrue(%HaveSameMap(smis, doubles));
+}
+
+// Crankshaft support for smi-only elements in dynamic array literals.
+function get(foo) { return foo; }  // Used to generate dynamic values.
+
+function crankshaft_test() {
+  if (support_smi_only_arrays) {
+    var a1 = [get(1), get(2), get(3)];
+    assertKind(elements_kind.fast_smi_only, a1);
+  }
+  var a2 = new Array(get(1), get(2), get(3));
+  assertKind(elements_kind.fast_smi_only, a2);
+  var b = [get(1), get(2), get("three")];
+  assertKind(elements_kind.fast, b);
+  var c = [get(1), get(2), get(3.5)];
+  if (support_smi_only_arrays) {
+    assertKind(elements_kind.fast_double, c);
+  }
+}
+for (var i = 0; i < 3; i++) {
+  crankshaft_test();
+}
+%OptimizeFunctionOnNextCall(crankshaft_test);
+crankshaft_test();
+
+// Elements_kind transitions for arrays.
+
+// A map can have three different elements_kind transitions: SMI->DOUBLE,
+// DOUBLE->OBJECT, and SMI->OBJECT. No matter in which order these three are
+// created, they must always end up with the same FAST map.
+
+// This test is meaningless without FAST_SMI_ONLY_ELEMENTS.
+if (support_smi_only_arrays) {
+  // Preparation: create one pair of identical objects for each case.
+  var a = [1, 2, 3];
+  var b = [1, 2, 3];
+  assertTrue(%HaveSameMap(a, b));
+  assertKind(elements_kind.fast_smi_only, a);
+  var c = [1, 2, 3];
+  c["case2"] = true;
+  var d = [1, 2, 3];
+  d["case2"] = true;
+  assertTrue(%HaveSameMap(c, d));
+  assertFalse(%HaveSameMap(a, c));
+  assertKind(elements_kind.fast_smi_only, c);
+  var e = [1, 2, 3];
+  e["case3"] = true;
+  var f = [1, 2, 3];
+  f["case3"] = true;
+  assertTrue(%HaveSameMap(e, f));
+  assertFalse(%HaveSameMap(a, e));
+  assertFalse(%HaveSameMap(c, e));
+  assertKind(elements_kind.fast_smi_only, e);
+  // Case 1: SMI->DOUBLE, DOUBLE->OBJECT, SMI->OBJECT.
+  a[0] = 1.5;
+  assertKind(elements_kind.fast_double, a);
+  a[0] = "foo";
+  assertKind(elements_kind.fast, a);
+  b[0] = "bar";
+  assertTrue(%HaveSameMap(a, b));
+  // Case 2: SMI->DOUBLE, SMI->OBJECT, DOUBLE->OBJECT.
+  c[0] = 1.5;
+  assertKind(elements_kind.fast_double, c);
+  assertFalse(%HaveSameMap(c, d));
+  d[0] = "foo";
+  assertKind(elements_kind.fast, d);
+  assertFalse(%HaveSameMap(c, d));
+  c[0] = "bar";
+  assertTrue(%HaveSameMap(c, d));
+  // Case 3: SMI->OBJECT, SMI->DOUBLE, DOUBLE->OBJECT.
+  e[0] = "foo";
+  assertKind(elements_kind.fast, e);
+  assertFalse(%HaveSameMap(e, f));
+  f[0] = 1.5;
+  assertKind(elements_kind.fast_double, f);
+  assertFalse(%HaveSameMap(e, f));
+  f[0] = "bar";
+  assertKind(elements_kind.fast, f);
+  assertTrue(%HaveSameMap(e, f));
+}
+
+// Test if Array.concat() works correctly with DOUBLE elements.
+if (support_smi_only_arrays) {
+  var a = [1, 2];
+  assertKind(elements_kind.fast_smi_only, a);
+  var b = [4.5, 5.5];
+  assertKind(elements_kind.fast_double, b);
+  var c = a.concat(b);
+  assertEquals([1, 2, 4.5, 5.5], c);
+  // TODO(1810): Change implementation so that we get DOUBLE elements here?
+  assertKind(elements_kind.fast, c);
+}
+
+// Test that Array.push() correctly handles SMI elements.
+if (support_smi_only_arrays) {
+  var a = [1, 2];
+  assertKind(elements_kind.fast_smi_only, a);
+  a.push(3, 4, 5);
+  assertKind(elements_kind.fast_smi_only, a);
+  assertEquals([1, 2, 3, 4, 5], a);
+}
+
+// Test that Array.splice() and Array.slice() return correct ElementsKinds.
+if (support_smi_only_arrays) {
+  var a = ["foo", "bar"];
+  assertKind(elements_kind.fast, a);
+  var b = a.splice(0, 1);
+  assertKind(elements_kind.fast, b);
+  var c = a.slice(0, 1);
+  assertKind(elements_kind.fast, c);
+}
+
+// Throw away type information in the ICs for next stress run.
+gc();
diff --git a/test/mjsunit/elements-transition-hoisting.js b/test/mjsunit/elements-transition-hoisting.js
new file mode 100644
index 0000000..5e78f10
--- /dev/null
+++ b/test/mjsunit/elements-transition-hoisting.js
@@ -0,0 +1,211 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+
+// Ensure that ElementsKind transitions in various situations are hoisted (or
+// not hoisted) correctly, don't change the semantics programs and don't trigger
+// deopt through hoisting in important situations.
+
+support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6));
+
+if (support_smi_only_arrays) {
+  print("Tests include smi-only arrays.");
+} else {
+  print("Tests do NOT include smi-only arrays.");
+}
+
+// Force existing ICs from previous stress runs to be flushed, otherwise the
+// assumptions in this test about when deoptimizations get triggered are not
+// valid.
+gc();
+
+if (support_smi_only_arrays) {
+  // Make sure that a simple elements array transitions inside a loop before
+  // stores to an array gets hoisted in a way that doesn't generate a deopt in
+  // simple cases.}
+  function testDoubleConversion4(a) {
+    var object = new Object();
+    a[0] = 0;
+    var count = 3;
+    do {
+      a[0] = object;
+    } while (--count > 0);
+  }
+
+  testDoubleConversion4(new Array(5));
+  %OptimizeFunctionOnNextCall(testDoubleConversion4);
+  testDoubleConversion4(new Array(5));
+  testDoubleConversion4(new Array(5));
+  assertTrue(2 != %GetOptimizationStatus(testDoubleConversion4));
+
+  // Make sure that non-element related map checks that are not preceded by
+  // transitions in a loop still get hoisted in a way that doesn't generate a
+  // deopt in simple cases.
+  function testExactMapHoisting(a) {
+    var object = new Object();
+    a.foo = 0;
+    a[0] = 0;
+    a[1] = 1;
+    var count = 3;
+    do {
+      a.foo = object; // This map check should be hoistable
+      a[1] = object;
+      result = a.foo == object && a[1] == object;
+    } while (--count > 0);
+  }
+
+  testExactMapHoisting(new Array(5));
+  %OptimizeFunctionOnNextCall(testExactMapHoisting);
+  testExactMapHoisting(new Array(5));
+  testExactMapHoisting(new Array(5));
+  assertTrue(2 != %GetOptimizationStatus(testExactMapHoisting));
+
+  // Make sure that non-element related map checks do NOT get hoisted if they
+  // depend on an elements transition before them and it's not possible to hoist
+  // that transition.
+  function testExactMapHoisting2(a) {
+    var object = new Object();
+    a.foo = 0;
+    a[0] = 0;
+    a[1] = 1;
+    var count = 3;
+    do {
+      if (a.bar === undefined) {
+        a[1] = 2.5;
+      }
+      a.foo = object; // This map check should NOT be hoistable because it
+                      // includes a check for the FAST_ELEMENTS map as well as
+                      // the FAST_DOUBLE_ELEMENTS map, which depends on the
+                      // double transition above in the if, which cannot be
+                      // hoisted.
+    } while (--count > 0);
+  }
+
+  testExactMapHoisting2(new Array(5));
+  %OptimizeFunctionOnNextCall(testExactMapHoisting2);
+  testExactMapHoisting2(new Array(5));
+  testExactMapHoisting2(new Array(5));
+  assertTrue(2 != %GetOptimizationStatus(testExactMapHoisting2));
+
+  // Make sure that non-element related map checks do get hoisted if they use
+  // the transitioned map for the check and all transitions that they depend
+  // upon can hoisted, too.
+  function testExactMapHoisting3(a) {
+    var object = new Object();
+    a.foo = 0;
+    a[0] = 0;
+    a[1] = 1;
+    var count = 3;
+    do {
+      a[1] = 2.5;
+      a.foo = object; // This map check should be hoistable because all elements
+                      // transitions in the loop can also be hoisted.
+    } while (--count > 0);
+  }
+
+  var add_transition = new Array(5);
+  add_transition.foo = 0;
+  add_transition[0] = new Object(); // For FAST_ELEMENT transition to be created
+  testExactMapHoisting3(new Array(5));
+  %OptimizeFunctionOnNextCall(testExactMapHoisting3);
+  testExactMapHoisting3(new Array(5));
+  testExactMapHoisting3(new Array(5));
+  assertTrue(2 != %GetOptimizationStatus(testExactMapHoisting3));
+
+  function testDominatingTransitionHoisting1(a) {
+    var object = new Object();
+    a[0] = 0;
+    var count = 3;
+    do {
+      if (a.baz != true) {
+        a[1] = 2.5;
+      }
+      a[0] = object;
+    } while (--count > 3);
+  }
+
+  testDominatingTransitionHoisting1(new Array(5));
+  %OptimizeFunctionOnNextCall(testDominatingTransitionHoisting1);
+  testDominatingTransitionHoisting1(new Array(5));
+  testDominatingTransitionHoisting1(new Array(5));
+  assertTrue(2 != %GetOptimizationStatus(testDominatingTransitionHoisting1));
+
+  function testHoistingWithSideEffect(a) {
+    var object = new Object();
+    a[0] = 0;
+    var count = 3;
+    do {
+      assertTrue(true);
+      a[0] = object;
+    } while (--count > 3);
+  }
+
+  testHoistingWithSideEffect(new Array(5));
+  %OptimizeFunctionOnNextCall(testHoistingWithSideEffect);
+  testHoistingWithSideEffect(new Array(5));
+  testHoistingWithSideEffect(new Array(5));
+  assertTrue(2 != %GetOptimizationStatus(testHoistingWithSideEffect));
+
+  function testStraightLineDupeElinination(a,b,c,d,e,f) {
+    var count = 3;
+    do {
+      assertTrue(true);
+      a[0] = b;
+      a[1] = c;
+      a[2] = d;
+      assertTrue(true);
+      a[3] = e; // TransitionElementsKind should be eliminated despite call.
+      a[4] = f;
+    } while (--count > 3);
+  }
+
+  testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,0,0,0,.5);
+  testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,0,0,.5,0);
+  testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,0,.5,0,0);
+  testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,.5,0,0,0);
+  testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),.5,0,0,0,0);
+  testStraightLineDupeElinination(new Array(.1,.1,.1,.1,.1),0,0,0,0,.5);
+  testStraightLineDupeElinination(new Array(.1,.1,.1,.1,.1),0,0,0,.5,0);
+  testStraightLineDupeElinination(new Array(.1,.1,.1,.1,.1),0,0,.5,0,0);
+  testStraightLineDupeElinination(new Array(.1,.1,.1,.1,.1),0,.5,0,0,0);
+  testStraightLineDupeElinination(new Array(.1,.1,.1,.1,.1),.5,0,0,0,0);
+  testStraightLineDupeElinination(new Array(5),.5,0,0,0,0);
+  testStraightLineDupeElinination(new Array(5),0,.5,0,0,0);
+  testStraightLineDupeElinination(new Array(5),0,0,.5,0,0);
+  testStraightLineDupeElinination(new Array(5),0,0,0,.5,0);
+  testStraightLineDupeElinination(new Array(5),0,0,0,0,.5);
+  testStraightLineDupeElinination(new Array(5),.5,0,0,0,0);
+  testStraightLineDupeElinination(new Array(5),0,.5,0,0,0);
+  testStraightLineDupeElinination(new Array(5),0,0,.5,0,0);
+  testStraightLineDupeElinination(new Array(5),0,0,0,.5,0);
+  testStraightLineDupeElinination(new Array(5),0,0,0,0,.5);
+  %OptimizeFunctionOnNextCall(testStraightLineDupeElinination);
+  testStraightLineDupeElinination(new Array(5));
+  testStraightLineDupeElinination(new Array(5));
+  assertTrue(2 != %GetOptimizationStatus(testStraightLineDupeElinination));
+}
diff --git a/test/mjsunit/elements-transition.js b/test/mjsunit/elements-transition.js
new file mode 100644
index 0000000..60e051b
--- /dev/null
+++ b/test/mjsunit/elements-transition.js
@@ -0,0 +1,113 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays
+
+support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6,7,8));
+
+if (support_smi_only_arrays) {
+  print("Tests include smi-only arrays.");
+} else {
+  print("Tests do NOT include smi-only arrays.");
+}
+
+if (support_smi_only_arrays) {
+  function test(test_double, test_object, set, length) {
+    // We apply the same operations to two identical arrays.  The first array
+    // triggers an IC miss, upon which the conversion stub is generated, but the
+    // actual conversion is done in runtime.  The second array, arriving at
+    // the previously patched IC, is then converted using the conversion stub.
+    var array_1 = new Array(length);
+    var array_2 = new Array(length);
+
+    assertTrue(%HasFastSmiOnlyElements(array_1));
+    assertTrue(%HasFastSmiOnlyElements(array_2));
+    for (var i = 0; i < length; i++) {
+      if (i == length - 5 && test_double) {
+        // Trigger conversion to fast double elements at length-5.
+        set(array_1, i, 0.5);
+        set(array_2, i, 0.5);
+        assertTrue(%HasFastDoubleElements(array_1));
+        assertTrue(%HasFastDoubleElements(array_2));
+      } else if (i == length - 3 && test_object) {
+        // Trigger conversion to fast object elements at length-3.
+        set(array_1, i, 'object');
+        set(array_2, i, 'object');
+        assertTrue(%HasFastElements(array_1));
+        assertTrue(%HasFastElements(array_2));
+      } else if (i != length - 7) {
+        // Set the element to an integer but leave a hole at length-7.
+        set(array_1, i, 2*i+1);
+        set(array_2, i, 2*i+1);
+      }
+    }
+
+    for (var i = 0; i < length; i++) {
+      if (i == length - 5 && test_double) {
+        assertEquals(0.5, array_1[i]);
+        assertEquals(0.5, array_2[i]);
+      } else if (i == length - 3 && test_object) {
+        assertEquals('object', array_1[i]);
+        assertEquals('object', array_2[i]);
+      } else if (i != length - 7) {
+        assertEquals(2*i+1, array_1[i]);
+        assertEquals(2*i+1, array_2[i]);
+      } else {
+        assertEquals(undefined, array_1[i]);
+        assertEquals(undefined, array_2[i]);
+      }
+    }
+
+    assertEquals(length, array_1.length);
+    assertEquals(length, array_2.length);
+  }
+
+  test(false, false, function(a,i,v){ a[i] = v; }, 20);
+  test(true,  false, function(a,i,v){ a[i] = v; }, 20);
+  test(false, true,  function(a,i,v){ a[i] = v; }, 20);
+  test(true,  true,  function(a,i,v){ a[i] = v; }, 20);
+
+  test(false, false, function(a,i,v){ a[i] = v; }, 10000);
+  test(true,  false, function(a,i,v){ a[i] = v; }, 10000);
+  test(false, true,  function(a,i,v){ a[i] = v; }, 10000);
+  test(true,  true,  function(a,i,v){ a[i] = v; }, 10000);
+
+  // Check COW arrays
+  function get_cow() { return [1, 2, 3]; }
+
+  function transition(x) { x[0] = 1.5; }
+
+  var ignore = get_cow();
+  transition(ignore);  // Handled by runtime.
+  var a = get_cow();
+  var b = get_cow();
+  transition(a);  // Handled by IC.
+  assertEquals(1.5, a[0]);
+  assertEquals(1, b[0]);
+} else {
+  print("Test skipped because smi only arrays are not supported.");
+}
diff --git a/test/mjsunit/error-tostring.js b/test/mjsunit/error-tostring.js
new file mode 100644
index 0000000..a285641
--- /dev/null
+++ b/test/mjsunit/error-tostring.js
@@ -0,0 +1,85 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Test default string representation of an Error object.
+
+var e = new Error();
+assertEquals('Error', e.toString());
+
+
+// Test printing of cyclic errors which return the empty string for
+// compatibility with Safari and Firefox.
+
+e = new Error();
+e.name = e;
+e.message = e;
+e.stack = "Does not occur in output";
+e.arguments = "Does not occur in output";
+e.type = "Does not occur in output";
+assertEquals('', e.toString());
+
+e = new Error();
+e.name = [ e ];
+e.message = [ e ];
+e.stack = "Does not occur in output";
+e.arguments = "Does not occur in output";
+e.type = "Does not occur in output";
+assertEquals('', e.toString());
+
+
+// Test the sequence in which getters and toString operations are called
+// on a given Error object.  Verify the produced string representation.
+
+function testErrorToString(nameValue, messageValue) {
+  var seq = [];
+  var e = {
+    get name() {
+      seq.push(1);
+      return (nameValue === undefined) ? nameValue : {
+        toString: function() { seq.push(2); return nameValue; }
+      };
+    },
+    get message() {
+      seq.push(3);
+      return (messageValue === undefined) ? messageValue : {
+        toString: function() { seq.push(4); return messageValue; }
+      };
+    }
+  };
+  var string = Error.prototype.toString.call(e);
+  return [string,seq];
+}
+
+assertEquals(["Error",[1,3]], testErrorToString(undefined, undefined));
+assertEquals(["e1",[1,2,3]], testErrorToString("e1", undefined));
+assertEquals(["e1: null",[1,2,3,4]], testErrorToString("e1", null));
+assertEquals(["e1",[1,2,3,4]], testErrorToString("e1", ""));
+assertEquals(["Error: e2",[1,3,4]], testErrorToString(undefined, "e2"));
+assertEquals(["null: e2",[1,2,3,4]], testErrorToString(null, "e2"));
+assertEquals(["e2",[1,2,3,4]], testErrorToString("", "e2"));
+assertEquals(["e1: e2",[1,2,3,4]], testErrorToString("e1", "e2"));
diff --git a/test/mjsunit/eval.js b/test/mjsunit/eval.js
index b6284ba..100f216 100644
--- a/test/mjsunit/eval.js
+++ b/test/mjsunit/eval.js
@@ -39,7 +39,7 @@
 
 try {
   eval('hest 7 &*^*&^');
-  assertTrue(false, 'Did not throw on syntax error.');
+  assertUnreachable('Did not throw on syntax error.');
 } catch (e) {
   assertEquals('SyntaxError', e.name);
 }
@@ -108,6 +108,7 @@
 result =
   (function() {
     var foo = 2;
+    // Should be non-direct call.
     return x.eval('foo');
   })();
 assertEquals(0, result);
@@ -115,12 +116,33 @@
 foo = 0;
 result =
   (function() {
+    var foo = 2;
+    // Should be non-direct call.
+    return (1,eval)('foo');
+  })();
+assertEquals(0, result);
+
+foo = 0;
+result =
+  (function() {
     var eval = function(x) { return x; };
     var foo = eval(2);
+    // Should be non-direct call.
     return e('foo');
   })();
 assertEquals(0, result);
 
+foo = 0;
+result =
+  (function() {
+    var foo = 2;
+    // Should be direct call.
+    with ({ eval : e }) {
+      return eval('foo');
+    }
+  })();
+assertEquals(2, result);
+
 result =
   (function() {
     var eval = function(x) { return 2 * x; };
@@ -135,19 +157,17 @@
   })();
 assertEquals(this, result);
 
-result =
-  (function() {
-    var obj = { f: function(eval) { return eval("this"); } };
-    return obj.f(eval);
-  })();
-assertEquals(this, result);
+(function() {
+  var obj = { f: function(eval) { return eval("this"); } };
+  result = obj.f(eval);
+  assertEquals(obj, result);
+})();
 
-result =
-  (function() {
-    var obj = { f: function(eval) { arguments; return eval("this"); } };
-    return obj.f(eval);
-  })();
-assertEquals(this, result);
+(function() {
+  var obj = { f: function(eval) { arguments; return eval("this"); } };
+  result = obj.f(eval);
+  assertEquals(obj, result);
+})();
 
 eval = function(x) { return 2 * x; };
 result =
@@ -156,6 +176,9 @@
   })();
 assertEquals(4, result);
 
+
+
+
 // Regression test: calling a function named eval found in a context that is
 // not the global context should get the global object as receiver.
 result =
diff --git a/test/mjsunit/external-array.js b/test/mjsunit/external-array.js
index 81c6cfe..32f78a7 100644
--- a/test/mjsunit/external-array.js
+++ b/test/mjsunit/external-array.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,6 +43,50 @@
 assertEquals(0, a[0]);
 assertEquals(0, a[1]);
 
+// No-parameter constructor should fail right now.
+function abfunc1() {
+  return new ArrayBuffer();
+}
+assertThrows(abfunc1);
+
+// Test derivation from an ArrayBuffer
+var ab = new ArrayBuffer(12);
+var derived_uint8 = new Uint8Array(ab);
+assertEquals(12, derived_uint8.length);
+var derived_uint32 = new Uint32Array(ab);
+assertEquals(3, derived_uint32.length);
+var derived_uint32_2 = new Uint32Array(ab,4);
+assertEquals(2, derived_uint32_2.length);
+var derived_uint32_3 = new Uint32Array(ab,4,1);
+assertEquals(1, derived_uint32_3.length);
+
+// If a given byteOffset and length references an area beyond the end of the
+// ArrayBuffer an exception is raised.
+function abfunc3() {
+  new Uint32Array(ab,4,3);
+}
+assertThrows(abfunc3);
+function abfunc4() {
+  new Uint32Array(ab,16);
+}
+assertThrows(abfunc4);
+
+// The given byteOffset must be a multiple of the element size of the specific
+// type, otherwise an exception is raised.
+function abfunc5() {
+  new Uint32Array(ab,5);
+}
+assertThrows(abfunc5);
+
+// If length is not explicitly specified, the length of the ArrayBuffer minus
+// the byteOffset must be a multiple of the element size of the specific type,
+// or an exception is raised.
+var ab2 = new ArrayBuffer(13);
+function abfunc6() {
+  new Uint32Array(ab2,4);
+}
+assertThrows(abfunc6);
+
 // Test the correct behavior of the |BYTES_PER_ELEMENT| property (which is
 // "constant", but not read-only).
 a = new Int32Array(2);
@@ -273,3 +317,37 @@
   %DeoptimizeFunction(array_load_set_smi_check2);
   gc();  // Makes V8 forget about type information for array_load_set_smi_check.
 }
+
+// Check handling of undefined in 32- and 64-bit external float arrays.
+
+function store_float32_undefined(ext_array) {
+  ext_array[0] = undefined;
+}
+
+var float32_array = new Float32Array(1);
+// Make sure runtime does it right
+store_float32_undefined(float32_array);
+assertTrue(isNaN(float32_array[0]));
+// Make sure the ICs do it right
+store_float32_undefined(float32_array);
+assertTrue(isNaN(float32_array[0]));
+// Make sure that Cranskshft does it right.
+%OptimizeFunctionOnNextCall(store_float32_undefined);
+store_float32_undefined(float32_array);
+assertTrue(isNaN(float32_array[0]));
+
+function store_float64_undefined(ext_array) {
+  ext_array[0] = undefined;
+}
+
+var float64_array = new Float64Array(1);
+// Make sure runtime does it right
+store_float64_undefined(float64_array);
+assertTrue(isNaN(float64_array[0]));
+// Make sure the ICs do it right
+store_float64_undefined(float64_array);
+assertTrue(isNaN(float64_array[0]));
+// Make sure that Cranskshft does it right.
+%OptimizeFunctionOnNextCall(store_float64_undefined);
+store_float64_undefined(float64_array);
+assertTrue(isNaN(float64_array[0]));
diff --git a/test/mjsunit/function-bind.js b/test/mjsunit/function-bind.js
index e9d0221..4a8f2d2 100644
--- a/test/mjsunit/function-bind.js
+++ b/test/mjsunit/function-bind.js
@@ -29,29 +29,31 @@
 
 // Simple tests.
 function foo(x, y, z) {
-  return x + y + z;
+  return [this, arguments.length, x];
 }
 
+assertEquals(3, foo.length);
+
 var f = foo.bind(foo);
-assertEquals(3, f(1, 1, 1));
+assertEquals([foo, 3, 1], f(1, 2, 3));
 assertEquals(3, f.length);
 
-f = foo.bind(foo, 2);
-assertEquals(4, f(1, 1));
+f = foo.bind(foo, 1);
+assertEquals([foo, 3, 1], f(2, 3));
 assertEquals(2, f.length);
 
-f = foo.bind(foo, 2, 2);
-assertEquals(5, f(1));
+f = foo.bind(foo, 1, 2);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo, 2, 2, 2);
-assertEquals(6, f());
+f = foo.bind(foo, 1, 2, 3);
+assertEquals([foo, 3, 1], f());
 assertEquals(0, f.length);
 
 // Test that length works correctly even if more than the actual number
 // of arguments are given when binding.
 f = foo.bind(foo, 1, 2, 3, 4, 5, 6, 7, 8, 9);
-assertEquals(6, f());
+assertEquals([foo, 9, 1], f());
 assertEquals(0, f.length);
 
 // Use a different bound object.
@@ -78,65 +80,98 @@
 // When only giving the thisArg, any number of binds should have
 // the same effect.
 f = foo.bind(foo);
-assertEquals(3, f(1, 1, 1));
-f = foo.bind(foo).bind(foo).bind(foo).bind(foo);
-assertEquals(3, f(1, 1, 1));
+assertEquals([foo, 3, 1], f(1, 2, 3));
+
+var not_foo = {};
+f = foo.bind(foo).bind(not_foo).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(1, 2, 3));
 assertEquals(3, f.length);
 
 // Giving bound parameters should work at any place in the chain.
-f = foo.bind(foo, 1).bind(foo).bind(foo).bind(foo);
-assertEquals(3, f(1, 1));
+f = foo.bind(foo, 1).bind(not_foo).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(2, 3));
 assertEquals(2, f.length);
 
-f = foo.bind(foo).bind(foo, 1).bind(foo).bind(foo);
-assertEquals(3, f(1, 1));
+f = foo.bind(foo).bind(not_foo, 1).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(2, 3));
 assertEquals(2, f.length);
 
-f = foo.bind(foo).bind(foo).bind(foo,1 ).bind(foo);
-assertEquals(3, f(1, 1));
+f = foo.bind(foo).bind(not_foo).bind(not_foo,1 ).bind(not_foo);
+assertEquals([foo, 3, 1], f(2, 3));
 assertEquals(2, f.length);
 
-f = foo.bind(foo).bind(foo).bind(foo).bind(foo, 1);
-assertEquals(3, f(1, 1));
+f = foo.bind(foo).bind(not_foo).bind(not_foo).bind(not_foo, 1);
+assertEquals([foo, 3, 1], f(2, 3));
 assertEquals(2, f.length);
 
-// Several parameters can be given, and given in different bind invokations.
-f = foo.bind(foo, 1, 1).bind(foo).bind(foo).bind(foo);
-assertEquals(3, f(1));
+// Several parameters can be given, and given in different bind invocations.
+f = foo.bind(foo, 1, 2).bind(not_foo).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo).bind(foo, 1, 1).bind(foo).bind(foo);
-assertEquals(3, f(1));
+f = foo.bind(foo).bind(not_foo, 1, 2).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(1));
 assertEquals(1, f.length);
 
-f = foo.bind(foo).bind(foo, 1, 1).bind(foo).bind(foo);
-assertEquals(3, f(1));
+f = foo.bind(foo).bind(not_foo, 1, 2).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo).bind(foo).bind(foo, 1, 1).bind(foo);
-assertEquals(3, f(1));
+f = foo.bind(foo).bind(not_foo).bind(not_foo, 1, 2).bind(not_foo);
+assertEquals([foo, 3, 1], f(1));
 assertEquals(1, f.length);
 
-f = foo.bind(foo).bind(foo).bind(foo).bind(foo, 1, 1);
-assertEquals(3, f(1));
+f = foo.bind(foo).bind(not_foo).bind(not_foo).bind(not_foo, 1, 2);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo, 1).bind(foo, 1).bind(foo).bind(foo);
-assertEquals(3, f(1));
+f = foo.bind(foo, 1).bind(not_foo, 2).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo, 1).bind(foo).bind(foo, 1).bind(foo);
-assertEquals(3, f(1));
+f = foo.bind(foo, 1).bind(not_foo).bind(not_foo, 2).bind(not_foo);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo, 1).bind(foo).bind(foo).bind(foo, 1);
-assertEquals(3, f(1));
+f = foo.bind(foo, 1).bind(not_foo).bind(not_foo).bind(not_foo, 2);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo).bind(foo, 1).bind(foo).bind(foo, 1);
-assertEquals(3, f(1));
+f = foo.bind(foo).bind(not_foo, 1).bind(not_foo).bind(not_foo, 2);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
+// The wrong number of arguments can be given to bound functions too.
+f = foo.bind(foo);
+assertEquals(3, f.length);
+assertEquals([foo, 0, undefined], f());
+assertEquals([foo, 1, 1], f(1));
+assertEquals([foo, 2, 1], f(1, 2));
+assertEquals([foo, 3, 1], f(1, 2, 3));
+assertEquals([foo, 4, 1], f(1, 2, 3, 4));
+
+f = foo.bind(foo, 1);
+assertEquals(2, f.length);
+assertEquals([foo, 1, 1], f());
+assertEquals([foo, 2, 1], f(2));
+assertEquals([foo, 3, 1], f(2, 3));
+assertEquals([foo, 4, 1], f(2, 3, 4));
+
+f = foo.bind(foo, 1, 2);
+assertEquals(1, f.length);
+assertEquals([foo, 2, 1], f());
+assertEquals([foo, 3, 1], f(3));
+assertEquals([foo, 4, 1], f(3, 4));
+
+f = foo.bind(foo, 1, 2, 3);
+assertEquals(0, f.length);
+assertEquals([foo, 3, 1], f());
+assertEquals([foo, 4, 1], f(4));
+
+f = foo.bind(foo, 1, 2, 3, 4);
+assertEquals(0, f.length);
+assertEquals([foo, 4, 1], f());
+
 // Test constructor calls.
 
 function bar(x, y, z) {
@@ -171,13 +206,91 @@
 
 
 // Test bind chains when used as a constructor.
-
 f = bar.bind(bar, 1).bind(bar, 2).bind(bar, 3);
 obj2 = new f();
 assertEquals(1, obj2.x);
 assertEquals(2, obj2.y);
 assertEquals(3, obj2.z);
 
-// Test instanceof obj2 is bar, not f.
+// Test obj2 is instanceof both bar and f.
 assertTrue(obj2 instanceof bar);
-assertFalse(obj2 instanceof f);
+assertTrue(obj2 instanceof f);
+
+// This-args are not relevant to instanceof.
+f = bar.bind(foo.prototype, 1).
+    bind(String.prototype, 2).
+    bind(Function.prototype, 3);
+var obj3 = new f();
+assertTrue(obj3 instanceof bar);
+assertTrue(obj3 instanceof f);
+assertFalse(obj3 instanceof foo);
+assertFalse(obj3 instanceof Function);
+assertFalse(obj3 instanceof String);
+
+// thisArg is converted to object.
+f = foo.bind(undefined);
+assertEquals([this, 0, undefined], f());
+
+f = foo.bind(null);
+assertEquals([this, 0, undefined], f());
+
+f = foo.bind(42);
+assertEquals([Object(42), 0, undefined], f());
+
+f = foo.bind("foo");
+assertEquals([Object("foo"), 0, undefined], f());
+
+f = foo.bind(true);
+assertEquals([Object(true), 0, undefined], f());
+
+// Strict functions don't convert thisArg.
+function soo(x, y, z) {
+  "use strict";
+  return [this, arguments.length, x];
+}
+
+var s = soo.bind(undefined);
+assertEquals([undefined, 0, undefined], s());
+
+s = soo.bind(null);
+assertEquals([null, 0, undefined], s());
+
+s = soo.bind(42);
+assertEquals([42, 0, undefined], s());
+
+s = soo.bind("foo");
+assertEquals(["foo", 0, undefined], s());
+
+s = soo.bind(true);
+assertEquals([true, 0, undefined], s());
+
+// Test that .arguments and .caller are poisoned according to the ES5 spec.
+
+// Check that property descriptors are correct (unconfigurable, unenumerable,
+// and both get and set is the ThrowTypeError function).
+var cdesc = Object.getOwnPropertyDescriptor(f, "caller");
+var adesc = Object.getOwnPropertyDescriptor(f, "arguments");
+
+assertFalse(cdesc.enumerable);
+assertFalse(cdesc.configurable);
+
+assertFalse(adesc.enumerable);
+assertFalse(adesc.configurable);
+
+assertSame(cdesc.get, cdesc.set);
+assertSame(cdesc.get, adesc.get);
+assertSame(cdesc.get, adesc.set);
+
+assertTrue(cdesc.get instanceof Function);
+assertEquals(0, cdesc.get.length);
+assertThrows(cdesc.get, TypeError);
+
+assertThrows(function() { return f.caller; }, TypeError);
+assertThrows(function() { f.caller = 42; }, TypeError);
+assertThrows(function() { return f.arguments; }, TypeError);
+assertThrows(function() { f.arguments = 42; }, TypeError);
+
+// Shouldn't throw. Accessing the functions caller must throw if
+// the caller is strict and the callee isn't. A bound function is built-in,
+// but not considered strict.
+(function foo() { return foo.caller; }).bind()();
diff --git a/test/mjsunit/function-call.js b/test/mjsunit/function-call.js
index 06479ad..26890ed 100644
--- a/test/mjsunit/function-call.js
+++ b/test/mjsunit/function-call.js
@@ -68,8 +68,7 @@
      String.prototype.toUpperCase,
      String.prototype.toLocaleUpperCase,
      String.prototype.trim,
-     Number.prototype.toLocaleString,
-     Error.prototype.toString];
+     Number.prototype.toLocaleString];
 
 // Non generic natives do not work on any input other than the specific
 // type, but since this change will allow call to be invoked with undefined
@@ -134,7 +133,8 @@
      Date.prototype.toJSON,
      RegExp.prototype.exec,
      RegExp.prototype.test,
-     RegExp.prototype.toString];
+     RegExp.prototype.toString,
+     Error.prototype.toString];
 
 
 // Mapping functions.
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/function-named-self-reference.js
similarity index 82%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/function-named-self-reference.js
index d3f2e35..5b03b09 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/function-named-self-reference.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,21 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+var fn = function fn(val) {
+  if (val) return val;
 
-assertThrows('eval(delete eval)');
+  %OptimizeFunctionOnNextCall(fn);
 
+  function run(val) {
+    var res =  fn((val + 1) << 1);
+
+    return res;
+  }
+
+  return run(0);
+}
+
+var res = fn();
+assertEquals(res, 2);
diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js
index ff6677e..2965e74 100644
--- a/test/mjsunit/fuzz-natives.js
+++ b/test/mjsunit/fuzz-natives.js
@@ -163,6 +163,9 @@
   "PromoteScheduledException": true,
   "DeleteHandleScopeExtensions": true,
 
+  // Vararg with minimum number > 0.
+  "Call": true,
+
   // Requires integer arguments to be non-negative.
   "Apply": true,
 
@@ -181,8 +184,9 @@
   "RegExpConstructResult": true,
   "_RegExpConstructResult": true,
 
-  // This function performs some checks compile time (it requires its first
-  // argument to be a compile time smi).
+  // This functions perform some checks compile time (they require one of their
+  // arguments to be a compile time smi).
+  "_DateField": true,
   "_GetFromCache": true,
 
   // This function expects its first argument to be a non-smi.
diff --git a/test/mjsunit/get-own-property-descriptor.js b/test/mjsunit/get-own-property-descriptor.js
index abb2420..159c63b 100644
--- a/test/mjsunit/get-own-property-descriptor.js
+++ b/test/mjsunit/get-own-property-descriptor.js
@@ -73,7 +73,7 @@
 var a = new String('foobar');
 for (var i = 0; i < a.length; i++) {
   var descStringObject = Object.getOwnPropertyDescriptor(a, i);
-  assertFalse(descStringObject.enumerable);
+  assertTrue(descStringObject.enumerable);
   assertFalse(descStringObject.configurable);
   assertFalse(descStringObject.writable);
   assertEquals(descStringObject.value, a.substring(i, i+1));
diff --git a/test/mjsunit/getter-in-value-prototype.js b/test/mjsunit/getter-in-value-prototype.js
index b55320a..abe2cb1 100644
--- a/test/mjsunit/getter-in-value-prototype.js
+++ b/test/mjsunit/getter-in-value-prototype.js
@@ -31,5 +31,5 @@
 // JSObject.
 
 String.prototype.__defineGetter__('x', function() { return this; });
-assertEquals('asdf', 'asdf'.x);
+assertEquals(Object('asdf'), 'asdf'.x);
 
diff --git a/test/mjsunit/global-const-var-conflicts.js b/test/mjsunit/global-const-var-conflicts.js
index d38d0ee..2fca96f 100644
--- a/test/mjsunit/global-const-var-conflicts.js
+++ b/test/mjsunit/global-const-var-conflicts.js
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Check that dynamically introducing conflicting consts/vars
-// leads to exceptions.
+// is silently ignored (and does not lead to exceptions).
 
 var caught = 0;
 
@@ -46,12 +46,12 @@
 try { eval("const c"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
 assertTrue(typeof c == 'undefined');
 try { eval("const c = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertTrue(typeof c == 'undefined');
+assertEquals(1, c);
 
 eval("var d = 0");
 try { eval("const d"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(0, d);
+assertEquals(undefined, d);
 try { eval("const d = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(0, d);
+assertEquals(1, d);
 
-assertEquals(8, caught);
+assertEquals(0, caught);
diff --git a/test/mjsunit/harmony/block-conflicts.js b/test/mjsunit/harmony/block-conflicts.js
index 8d3de6f..8388504 100644
--- a/test/mjsunit/harmony/block-conflicts.js
+++ b/test/mjsunit/harmony/block-conflicts.js
@@ -25,10 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-block-scoping
+// Flags: --harmony-scoping
 
 // Test for conflicting variable bindings.
 
+// TODO(ES6): properly activate extended mode
+"use strict";
+
 function CheckException(e) {
   var string = e.toString();
   assertTrue(string.indexOf("has already been declared") >= 0 ||
@@ -80,6 +83,11 @@
                  "let x = function() {}",
                  "let x, y",
                  "let y, x",
+                 "const x = 0",
+                 "const x = undefined",
+                 "const x = function() {}",
+                 "const x = 2, y = 3",
+                 "const y = 4, x = 5",
                  ];
 var varbinds = [ "var x",
                  "var x = 0",
@@ -122,5 +130,5 @@
 
 // Test conflicting parameter/var bindings.
 for (var v = 0; v < varbinds.length; ++v) {
-  TestConflict('(function (x) { ' + varbinds[v] + '; })()');
+  TestNoConflict('(function (x) { ' + varbinds[v] + '; })()');
 }
diff --git a/test/mjsunit/harmony/block-const-assign.js b/test/mjsunit/harmony/block-const-assign.js
new file mode 100644
index 0000000..8297a55
--- /dev/null
+++ b/test/mjsunit/harmony/block-const-assign.js
@@ -0,0 +1,131 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-scoping
+
+// Test that we throw early syntax errors in harmony mode
+// when using an immutable binding in an assigment or with
+// prefix/postfix decrement/increment operators.
+// TODO(ES6): properly activate extended mode
+"use strict";
+
+
+// Function local const.
+function constDecl0(use) {
+  return "(function() { const constvar = 1; " + use + "; });";
+}
+
+
+function constDecl1(use) {
+  return "(function() { " + use + "; const constvar = 1; });";
+}
+
+
+// Function local const, assign from eval.
+function constDecl2(use) {
+  use = "eval('(function() { " + use + " })')";
+  return "(function() { const constvar = 1; " + use + "; })();";
+}
+
+
+function constDecl3(use) {
+  use = "eval('(function() { " + use + " })')";
+  return "(function() { " + use + "; const constvar = 1; })();";
+}
+
+
+// Block local const.
+function constDecl4(use) {
+  return "(function() { { const constvar = 1; " + use + "; } });";
+}
+
+
+function constDecl5(use) {
+  return "(function() { { " + use + "; const constvar = 1; } });";
+}
+
+
+// Block local const, assign from eval.
+function constDecl6(use) {
+  use = "eval('(function() {" + use + "})')";
+  return "(function() { { const constvar = 1; " + use + "; } })();";
+}
+
+
+function constDecl7(use) {
+  use = "eval('(function() {" + use + "})')";
+  return "(function() { { " + use + "; const constvar = 1; } })();";
+}
+
+
+// Function expression name.
+function constDecl8(use) {
+  return "(function constvar() { " + use + "; });";
+}
+
+
+// Function expression name, assign from eval.
+function constDecl9(use) {
+  use = "eval('(function(){" + use + "})')";
+  return "(function constvar() { " + use + "; })();";
+}
+
+let decls = [ constDecl0,
+              constDecl1,
+              constDecl2,
+              constDecl3,
+              constDecl4,
+              constDecl5,
+              constDecl6,
+              constDecl7,
+              constDecl8,
+              constDecl9
+              ];
+let uses = [ 'constvar = 1;',
+             'constvar += 1;',
+             '++constvar;',
+             'constvar++;'
+             ];
+
+function Test(d,u) {
+  'use strict';
+  try {
+    print(d(u));
+    eval(d(u));
+  } catch (e) {
+    assertInstanceof(e, SyntaxError);
+    assertTrue(e.toString().indexOf("Assignment to constant variable") >= 0);
+    return;
+  }
+  assertUnreachable();
+}
+
+for (var d = 0; d < decls.length; ++d) {
+  for (var u = 0; u < uses.length; ++u) {
+    Test(decls[d], uses[u]);
+  }
+}
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/harmony/block-early-errors.js
similarity index 72%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/harmony/block-early-errors.js
index 2502b53..791f001 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/harmony/block-early-errors.js
@@ -25,22 +25,31 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --harmony-scoping
 
-var e = new Error();
-assertEquals('Error', e + '');
+function CheckException(e) {
+  var string = e.toString();
+  assertInstanceof(e, SyntaxError);
+  assertTrue(string.indexOf("Illegal let") >= 0);
+}
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+function Check(str) {
+  try {
+    eval("(function () { " + str + " })");
+    assertUnreachable();
+  } catch (e) {
+    CheckException(e);
+  }
+  try {
+    eval("(function () { { " + str + " } })");
+    assertUnreachable();
+  } catch (e) {
+    CheckException(e);
+  }
+}
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+// Check for early syntax errors when using let
+// declarations outside of extended mode.
+Check("let x;");
+Check("let x = 1;");
+Check("let x, y;");
diff --git a/test/mjsunit/harmony/block-for.js b/test/mjsunit/harmony/block-for.js
new file mode 100644
index 0000000..e84f0d2
--- /dev/null
+++ b/test/mjsunit/harmony/block-for.js
@@ -0,0 +1,146 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-scoping
+
+// TODO(ES6): properly activate extended mode
+"use strict";
+
+function props(x) {
+  var array = [];
+  for (let p in x) array.push(p);
+  return array.sort();
+}
+
+assertEquals(0, props({}).length);
+assertEquals(1, props({x:1}).length);
+assertEquals(2, props({x:1, y:2}).length);
+
+assertArrayEquals(["x"], props({x:1}));
+assertArrayEquals(["x", "y"], props({x:1, y:2}));
+assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}));
+
+assertEquals(0, props([]).length);
+assertEquals(1, props([1]).length);
+assertEquals(2, props([1,2]).length);
+
+assertArrayEquals(["0"], props([1]));
+assertArrayEquals(["0", "1"], props([1,2]));
+assertArrayEquals(["0", "1", "2"], props([1,2,3]));
+
+var o = {};
+var a = [];
+let i = "outer_i";
+let s = "outer_s";
+for (let i = 0x0020; i < 0x01ff; i+=2) {
+  let s = 'char:' + String.fromCharCode(i);
+  a.push(s);
+  o[s] = i;
+}
+assertArrayEquals(a, props(o));
+assertEquals(i, "outer_i");
+assertEquals(s, "outer_s");
+
+var a = [];
+assertEquals(0, props(a).length);
+a[Math.pow(2,30)-1] = 0;
+assertEquals(1, props(a).length);
+a[Math.pow(2,31)-1] = 0;
+assertEquals(2, props(a).length);
+a[1] = 0;
+assertEquals(3, props(a).length);
+
+var result = '';
+for (let p in {a : [0], b : 1}) { result += p; }
+assertEquals('ab', result);
+
+var result = '';
+for (let p in {a : {v:1}, b : 1}) { result += p; }
+assertEquals('ab', result);
+
+var result = '';
+for (let p in { get a() {}, b : 1}) { result += p; }
+assertEquals('ab', result);
+
+var result = '';
+for (let p in { get a() {}, set a(x) {}, b : 1}) { result += p; }
+assertEquals('ab', result);
+
+
+// Check that there is exactly one variable without initializer
+// in a for-in statement with let variables.
+// TODO(ES6): properly activate extended mode
+assertThrows("function foo() { 'use strict'; for (let in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x = 3 in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x, y in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x = 3, y in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x, y = 4 in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x = 3, y = 4 in {}) { } }", SyntaxError);
+
+
+// In a normal for statement the iteration variable is not
+// freshly allocated for each iteration.
+function closures1() {
+  let a = [];
+  for (let i = 0; i < 5; ++i) {
+    a.push(function () { return i; });
+  }
+  for (let j = 0; j < 5; ++j) {
+    assertEquals(5, a[j]());
+  }
+}
+closures1();
+
+
+function closures2() {
+  let a = [], b = [];
+  for (let i = 0, j = 10; i < 5; ++i, ++j) {
+    a.push(function () { return i; });
+    b.push(function () { return j; });
+  }
+  for (let k = 0; k < 5; ++k) {
+    assertEquals(5, a[k]());
+    assertEquals(15, b[k]());
+  }
+}
+closures2();
+
+
+// In a for-in statement the iteration variable is fresh
+// for earch iteration.
+function closures3(x) {
+  let a = [];
+  for (let p in x) {
+    a.push(function () { return p; });
+  }
+  let k = 0;
+  for (let q in x) {
+    assertEquals(q, a[k]());
+    ++k;
+  }
+}
+closures3({a : [0], b : 1, c : {v : 1}, get d() {}, set e(x) {}});
diff --git a/test/mjsunit/harmony/block-leave.js b/test/mjsunit/harmony/block-leave.js
index 73eaf29..a7f6b69 100644
--- a/test/mjsunit/harmony/block-leave.js
+++ b/test/mjsunit/harmony/block-leave.js
@@ -25,7 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-block-scoping
+// Flags: --harmony-scoping
+
+// TODO(ES6): properly activate extended mode
+"use strict";
 
 // We want to test the context chain shape.  In each of the tests cases
 // below, the outer with is to force a runtime lookup of the identifier 'x'
@@ -64,31 +67,30 @@
 } catch (e) {
   caught = true;
   assertEquals(25, e);
-  with ({y:19}) {
-    assertEquals(19, y);
+  (function () {
     try {
       // NOTE: This checks that the block scope containing xx has been
       // removed from the context chain.
-      xx;
+      eval('xx');
       assertTrue(false);  // should not reach here
     } catch (e2) {
       assertTrue(e2 instanceof ReferenceError);
     }
-  }
+  })();
 }
 assertTrue(caught);
 
 
-with ({x: 'outer'}) {
+(function(x) {
   label: {
     let x = 'inner';
     break label;
   }
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   label: {
     let x = 'middle';
     {
@@ -96,20 +98,20 @@
       break label;
     }
   }
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   for (var i = 0; i < 10; ++i) {
     let x = 'inner' + i;
     continue;
   }
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   label: for (var i = 0; i < 10; ++i) {
     let x = 'middle' + i;
     for (var j = 0; j < 10; ++j) {
@@ -117,21 +119,21 @@
       continue label;
     }
   }
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   try {
     let x = 'inner';
     throw 0;
   } catch (e) {
-    assertEquals('outer', x);
+    assertEquals('outer', eval('x'));
   }
-}
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   try {
     let x = 'middle';
     {
@@ -139,27 +141,27 @@
       throw 0;
     }
   } catch (e) {
-    assertEquals('outer', x);
+    assertEquals('outer', eval('x'));
   }
-}
+})('outer');
 
 
 try {
-  with ({x: 'outer'}) {
+  (function(x) {
     try {
       let x = 'inner';
       throw 0;
     } finally {
-      assertEquals('outer', x);
+      assertEquals('outer', eval('x'));
     }
-  }
+  })('outer');
 } catch (e) {
   if (e instanceof MjsUnitAssertionError) throw e;
 }
 
 
 try {
-  with ({x: 'outer'}) {
+  (function(x) {
     try {
       let x = 'middle';
       {
@@ -167,9 +169,9 @@
         throw 0;
       }
     } finally {
-      assertEquals('outer', x);
+      assertEquals('outer', eval('x'));
     }
-  }
+  })('outer');
 } catch (e) {
   if (e instanceof MjsUnitAssertionError) throw e;
 }
@@ -179,47 +181,47 @@
 // from with.
 function f() {}
 
-with ({x: 'outer'}) {
+(function(x) {
   label: {
     let x = 'inner';
     break label;
   }
   f();  // The context could be restored from the stack after the call.
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   for (var i = 0; i < 10; ++i) {
     let x = 'inner';
     continue;
   }
   f();
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   try {
     let x = 'inner';
     throw 0;
   } catch (e) {
     f();
-    assertEquals('outer', x);
+    assertEquals('outer', eval('x'));
   }
-}
+})('outer');
 
 
 try {
-  with ({x: 'outer'}) {
+  (function(x) {
     try {
       let x = 'inner';
       throw 0;
     } finally {
       f();
-      assertEquals('outer', x);
+      assertEquals('outer', eval('x'));
     }
-  }
+  })('outer');
 } catch (e) {
   if (e instanceof MjsUnitAssertionError) throw e;
 }
diff --git a/test/mjsunit/harmony/block-let-crankshaft.js b/test/mjsunit/harmony/block-let-crankshaft.js
index c2fb96b..1db1792 100644
--- a/test/mjsunit/harmony/block-let-crankshaft.js
+++ b/test/mjsunit/harmony/block-let-crankshaft.js
@@ -25,10 +25,209 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-block-scoping --allow-natives-syntax
+// Flags: --harmony-scoping --allow-natives-syntax
+
+// TODO(ES6): properly activate extended mode
+"use strict";
+
+// Check that the following functions are optimizable.
+var functions = [ f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
+                  f15, f16, f17, f18, f19, f20, f21, f22, f23 ];
+
+for (var i = 0; i < functions.length; ++i) {
+  var func = functions[i];
+  print("Testing:");
+  print(func);
+  for (var j = 0; j < 10; ++j) {
+    func(12);
+  }
+  %OptimizeFunctionOnNextCall(func);
+  func(12);
+  assertTrue(%GetOptimizationStatus(func) != 2);
+}
+
+function f1() { }
+
+function f2(x) { }
+
+function f3() {
+  let x;
+}
+
+function f4() {
+  function foo() {
+  }
+}
+
+function f5() {
+  let x = 1;
+}
+
+function f6() {
+  const x = 1;
+}
+
+function f7(x) {
+  return x;
+}
+
+function f8() {
+  let x;
+  return x;
+}
+
+function f9() {
+  function x() {
+  }
+  return x;
+}
+
+function f10(x) {
+  x = 1;
+}
+
+function f11() {
+  let x;
+  x = 1;
+}
+
+function f12() {
+  function x() {};
+  x = 1;
+}
+
+function f13(x) {
+  (function() { x; });
+}
+
+function f14() {
+  let x;
+  (function() { x; });
+}
+
+function f15() {
+  function x() {
+  }
+  (function() { x; });
+}
+
+function f16() {
+  let x = 1;
+  (function() { x; });
+}
+
+function f17() {
+  const x = 1;
+  (function() { x; });
+}
+
+function f18(x) {
+  return x;
+  (function() { x; });
+}
+
+function f19() {
+  let x;
+  return x;
+  (function() { x; });
+}
+
+function f20() {
+  function x() {
+  }
+  return x;
+  (function() { x; });
+}
+
+function f21(x) {
+  x = 1;
+  (function() { x; });
+}
+
+function f22() {
+  let x;
+  x = 1;
+  (function() { x; });
+}
+
+function f23() {
+  function x() { }
+  x = 1;
+  (function() { x; });
+}
+
 
 // Test that temporal dead zone semantics for function and block scoped
-// ket bindings are handled by the optimizing compiler.
+// let bindings are handled by the optimizing compiler.
+
+function TestFunctionLocal(s) {
+  'use strict';
+  var func = eval("(function baz(){" + s + "; })");
+  print("Testing:");
+  print(func);
+  for (var i = 0; i < 5; ++i) {
+    try {
+      func();
+      assertUnreachable();
+    } catch (e) {
+      assertInstanceof(e, ReferenceError);
+    }
+  }
+  %OptimizeFunctionOnNextCall(func);
+  try {
+    func();
+    assertUnreachable();
+  } catch (e) {
+    assertInstanceof(e, ReferenceError);
+  }
+}
+
+function TestFunctionContext(s) {
+  'use strict';
+  var func = eval("(function baz(){ " + s + "; (function() { x; }); })");
+  print("Testing:");
+  print(func);
+  for (var i = 0; i < 5; ++i) {
+    print(i);
+    try {
+      func();
+      assertUnreachable();
+    } catch (e) {
+      assertInstanceof(e, ReferenceError);
+    }
+  }
+  print("optimize");
+  %OptimizeFunctionOnNextCall(func);
+  try {
+    print("call");
+    func();
+    assertUnreachable();
+  } catch (e) {
+    print("catch");
+    assertInstanceof(e, ReferenceError);
+  }
+}
+
+function TestAll(s) {
+  TestFunctionLocal(s);
+  TestFunctionContext(s);
+}
+
+// Use before initialization in declaration statement.
+TestAll('let x = x + 1');
+TestAll('let x = x += 1');
+TestAll('let x = x++');
+TestAll('let x = ++x');
+TestAll('const x = x + 1');
+
+// Use before initialization in prior statement.
+TestAll('x + 1; let x;');
+TestAll('x = 1; let x;');
+TestAll('x += 1; let x;');
+TestAll('++x; let x;');
+TestAll('x++; let x;');
+TestAll('let y = x; const x = 1;');
+
 
 function f(x, b) {
   let y = (b ? y : x) + 42;
diff --git a/test/mjsunit/harmony/block-let-declaration.js b/test/mjsunit/harmony/block-let-declaration.js
index 49b6348..480e033 100644
--- a/test/mjsunit/harmony/block-let-declaration.js
+++ b/test/mjsunit/harmony/block-let-declaration.js
@@ -25,41 +25,113 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-block-scoping
+// Flags: --harmony-scoping
 
 // Test let declarations in various settings.
+// TODO(ES6): properly activate extended mode
+"use strict";
 
 // Global
 let x;
 let y = 2;
+const z = 4;
 
 // Block local
 {
   let y;
   let x = 3;
+  const z = 5;
 }
 
 assertEquals(undefined, x);
 assertEquals(2,y);
+assertEquals(4,z);
 
 if (true) {
   let y;
   assertEquals(undefined, y);
 }
 
+// Invalid declarations are early errors in harmony mode and thus should trigger
+// an exception in eval code during parsing, before even compiling or executing
+// the code. Thus the generated function is not called here.
 function TestLocalThrows(str, expect) {
-  assertThrows("(function(){" + str + "})()", expect);
+  assertThrows("(function(){ 'use strict'; " + str + "})", expect);
 }
 
 function TestLocalDoesNotThrow(str) {
-  assertDoesNotThrow("(function(){" + str + "})()");
+  assertDoesNotThrow("(function(){ 'use strict'; " + str + "})()");
 }
 
-// Unprotected statement
+// Test let declarations in statement positions.
 TestLocalThrows("if (true) let x;", SyntaxError);
+TestLocalThrows("if (true) {} else let x;", SyntaxError);
 TestLocalThrows("do let x; while (false)", SyntaxError);
 TestLocalThrows("while (false) let x;", SyntaxError);
+TestLocalThrows("label: let x;", SyntaxError);
+TestLocalThrows("for (;false;) let x;", SyntaxError);
+TestLocalThrows("switch (true) { case true: let x; }", SyntaxError);
+TestLocalThrows("switch (true) { default: let x; }", SyntaxError);
 
+// Test const declarations with initialisers in statement positions.
+TestLocalThrows("if (true) const x = 1;", SyntaxError);
+TestLocalThrows("if (true) {} else const x = 1;", SyntaxError);
+TestLocalThrows("do const x = 1; while (false)", SyntaxError);
+TestLocalThrows("while (false) const x = 1;", SyntaxError);
+TestLocalThrows("label: const x = 1;", SyntaxError);
+TestLocalThrows("for (;false;) const x = 1;", SyntaxError);
+TestLocalThrows("switch (true) { case true: const x = 1; }", SyntaxError);
+TestLocalThrows("switch (true) { default: const x = 1; }", SyntaxError);
+
+// Test const declarations without initialisers.
+TestLocalThrows("const x;", SyntaxError);
+TestLocalThrows("const x = 1, y;", SyntaxError);
+TestLocalThrows("const x, y = 1;", SyntaxError);
+
+// Test const declarations without initialisers in statement positions.
+TestLocalThrows("if (true) const x;", SyntaxError);
+TestLocalThrows("if (true) {} else const x;", SyntaxError);
+TestLocalThrows("do const x; while (false)", SyntaxError);
+TestLocalThrows("while (false) const x;", SyntaxError);
+TestLocalThrows("label: const x;", SyntaxError);
+TestLocalThrows("for (;false;) const x;", SyntaxError);
+TestLocalThrows("switch (true) { case true: const x; }", SyntaxError);
+TestLocalThrows("switch (true) { default: const x; }", SyntaxError);
+
+// Test var declarations in statement positions.
 TestLocalDoesNotThrow("if (true) var x;");
+TestLocalDoesNotThrow("if (true) {} else var x;");
 TestLocalDoesNotThrow("do var x; while (false)");
 TestLocalDoesNotThrow("while (false) var x;");
+TestLocalDoesNotThrow("label: var x;");
+TestLocalDoesNotThrow("for (;false;) var x;");
+TestLocalDoesNotThrow("switch (true) { case true: var x; }");
+TestLocalDoesNotThrow("switch (true) { default: var x; }");
+
+// Test function declarations in source element and
+// non-strict statement positions.
+function f() {
+  // Non-strict source element positions.
+  function g0() {
+    "use strict";
+    // Strict source element positions.
+    function h() { }
+    {
+      function h1() { }
+    }
+  }
+  {
+    function g1() { }
+  }
+}
+f();
+
+// Test function declarations in statement position in strict mode.
+TestLocalThrows("function f() { if (true) function g() {}", SyntaxError);
+TestLocalThrows("function f() { if (true) {} else function g() {}", SyntaxError);
+TestLocalThrows("function f() { do function g() {} while (false)", SyntaxError);
+TestLocalThrows("function f() { while (false) function g() {}", SyntaxError);
+TestLocalThrows("function f() { label: function g() {}", SyntaxError);
+TestLocalThrows("function f() { for (;false;) function g() {}", SyntaxError);
+TestLocalThrows("function f() { switch (true) { case true: function g() {} }", SyntaxError);
+TestLocalThrows("function f() { switch (true) { default: function g() {} }", SyntaxError);
diff --git a/test/mjsunit/harmony/block-let-semantics.js b/test/mjsunit/harmony/block-let-semantics.js
index 198c3b4..d14e7cd 100644
--- a/test/mjsunit/harmony/block-let-semantics.js
+++ b/test/mjsunit/harmony/block-let-semantics.js
@@ -25,7 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-block-scoping
+// Flags: --harmony-scoping
+
+// TODO(ES6): properly activate extended mode
+"use strict";
 
 // Test temporal dead zone semantics of let bound variables in
 // function and block scopes.
@@ -61,6 +64,7 @@
 TestAll('let x = x += 1');
 TestAll('let x = x++');
 TestAll('let x = ++x');
+TestAll('const x = x + 1');
 
 // Use before initialization in prior statement.
 TestAll('x + 1; let x;');
@@ -68,25 +72,36 @@
 TestAll('x += 1; let x;');
 TestAll('++x; let x;');
 TestAll('x++; let x;');
+TestAll('let y = x; const x = 1;');
 
 TestAll('f(); let x; function f() { return x + 1; }');
 TestAll('f(); let x; function f() { x = 1; }');
 TestAll('f(); let x; function f() { x += 1; }');
 TestAll('f(); let x; function f() { ++x; }');
 TestAll('f(); let x; function f() { x++; }');
+TestAll('f(); const x = 1; function f() { return x; }');
 
 TestAll('f()(); let x; function f() { return function() { return x + 1; } }');
 TestAll('f()(); let x; function f() { return function() { x = 1; } }');
 TestAll('f()(); let x; function f() { return function() { x += 1; } }');
 TestAll('f()(); let x; function f() { return function() { ++x; } }');
 TestAll('f()(); let x; function f() { return function() { x++; } }');
+TestAll('f()(); const x = 1; function f() { return function() { return x; } }');
 
-// Use in before initialization with a dynamic lookup.
+// Use before initialization with a dynamic lookup.
 TestAll('eval("x + 1;"); let x;');
 TestAll('eval("x = 1;"); let x;');
 TestAll('eval("x += 1;"); let x;');
 TestAll('eval("++x;"); let x;');
 TestAll('eval("x++;"); let x;');
+TestAll('eval("x"); const x = 1;');
+
+// Use before initialization with check for eval-shadowed bindings.
+TestAll('function f() { eval("var y = 2;"); x + 1; }; f(); let x;');
+TestAll('function f() { eval("var y = 2;"); x = 1; }; f(); let x;');
+TestAll('function f() { eval("var y = 2;"); x += 1; }; f(); let x;');
+TestAll('function f() { eval("var y = 2;"); ++x; }; f(); let x;');
+TestAll('function f() { eval("var y = 2;"); x++; }; f(); let x;');
 
 // Test that variables introduced by function declarations are created and
 // initialized upon entering a function / block scope.
@@ -115,7 +130,7 @@
 
 // Test that a function declaration sees the scope it resides in.
 function f2() {
-  let m, n;
+  let m, n, o, p;
   {
     m = g;
     function g() {
@@ -132,7 +147,44 @@
     function h() {
       return b + c;
     }
-    let b = 3;
+    let c = 3;
   }
   assertEquals(5, n());
+
+  {
+    o = i;
+    function i() {
+      return d;
+    }
+    let d = 4;
+  }
+  assertEquals(4, o());
+
+  try {
+    throw 5;
+  } catch(e) {
+    p = j;
+    function j() {
+      return e + f;
+    }
+    let f = 6;
+  }
+  assertEquals(11, p());
 }
+f2();
+
+// Test that resolution of let bound variables works with scopes that call eval.
+function outer() {
+  function middle() {
+    function inner() {
+      return x;
+    }
+    eval("1 + 1");
+    return x + inner();
+  }
+
+  let x = 1;
+  return middle();
+}
+
+assertEquals(2, outer());
diff --git a/test/mjsunit/harmony/block-scoping.js b/test/mjsunit/harmony/block-scoping.js
index 266e380..31194d9 100644
--- a/test/mjsunit/harmony/block-scoping.js
+++ b/test/mjsunit/harmony/block-scoping.js
@@ -25,9 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --harmony-block-scoping
+// Flags: --allow-natives-syntax --harmony-scoping
 // Test functionality of block scopes.
 
+// TODO(ES6): properly activate extended mode
+"use strict";
+
 // Hoisting of var declarations.
 function f1() {
   {
@@ -44,12 +47,16 @@
 function f2(one) {
   var x = one + 1;
   let y = one + 2;
+  const u = one + 4;
   {
     let z = one + 3;
+    const v = one + 5;
     assertEquals(1, eval('one'));
     assertEquals(2, eval('x'));
     assertEquals(3, eval('y'));
     assertEquals(4, eval('z'));
+    assertEquals(5, eval('u'));
+    assertEquals(6, eval('v'));
   }
 }
 f2(1);
@@ -59,12 +66,17 @@
 function f3(one) {
   var x = one + 1;
   let y = one + 2;
+  const u = one + 4;
   {
     let z = one + 3;
+    const v = one + 5;
     assertEquals(1, one);
     assertEquals(2, x);
     assertEquals(3, y);
     assertEquals(4, z);
+    assertEquals(5, u);
+    assertEquals(6, v);
+
   }
 }
 f3(1);
@@ -74,13 +86,17 @@
 function f4(one) {
   var x = one + 1;
   let y = one + 2;
+  const u = one + 4;
   {
     let z = one + 3;
+    const v = one + 5;
     function f() {
       assertEquals(1, eval('one'));
       assertEquals(2, eval('x'));
       assertEquals(3, eval('y'));
       assertEquals(4, eval('z'));
+      assertEquals(5, eval('u'));
+      assertEquals(6, eval('v'));
     };
   }
 }
@@ -91,13 +107,17 @@
 function f5(one) {
   var x = one + 1;
   let y = one + 2;
+  const u = one + 4;
   {
     let z = one + 3;
+    const v = one + 5;
     function f() {
       assertEquals(1, one);
       assertEquals(2, x);
       assertEquals(3, y);
       assertEquals(4, z);
+      assertEquals(5, u);
+      assertEquals(6, v);
     };
   }
 }
@@ -107,8 +127,10 @@
 // Return from block.
 function f6() {
   let x = 1;
+  const u = 3;
   {
     let y = 2;
+    const v = 4;
     return x + y;
   }
 }
@@ -120,13 +142,26 @@
   let b = 1;
   var c = 1;
   var d = 1;
-  { // let variables shadowing argument, let and var variables
+  const e = 1;
+  { // let variables shadowing argument, let, const and var variables
     let a = 2;
     let b = 2;
     let c = 2;
+    let e = 2;
     assertEquals(2,a);
     assertEquals(2,b);
     assertEquals(2,c);
+    assertEquals(2,e);
+  }
+  { // const variables shadowing argument, let, const and var variables
+    const a = 2;
+    const b = 2;
+    const c = 2;
+    const e = 2;
+    assertEquals(2,a);
+    assertEquals(2,b);
+    assertEquals(2,c);
+    assertEquals(2,e);
   }
   try {
     throw 'stuff1';
@@ -156,6 +191,12 @@
   } catch (c) {
     // catch variable shadowing var variable
     assertEquals('stuff3',c);
+    {
+      // const variable shadowing catch variable
+      const c = 3;
+      assertEquals(3,c);
+    }
+    assertEquals('stuff3',c);
     try {
       throw 'stuff4';
     } catch(c) {
@@ -178,14 +219,16 @@
     c = 2;
   }
   assertEquals(1,c);
-  (function(a,b,c) {
-    // arguments shadowing argument, let and var variable
+  (function(a,b,c,e) {
+    // arguments shadowing argument, let, const and var variable
     a = 2;
     b = 2;
     c = 2;
+    e = 2;
     assertEquals(2,a);
     assertEquals(2,b);
     assertEquals(2,c);
+    assertEquals(2,e);
     // var variable shadowing var variable
     var d = 2;
   })(1,1);
@@ -193,24 +236,30 @@
   assertEquals(1,b);
   assertEquals(1,c);
   assertEquals(1,d);
+  assertEquals(1,e);
 }
 f7(1);
 
 
-// Ensure let variables are block local and var variables function local.
+// Ensure let and const variables are block local
+// and var variables function local.
 function f8() {
   var let_accessors = [];
   var var_accessors = [];
+  var const_accessors = [];
   for (var i = 0; i < 10; i++) {
     let x = i;
     var y = i;
+    const z = i;
     let_accessors[i] = function() { return x; }
     var_accessors[i] = function() { return y; }
+    const_accessors[i] = function() { return z; }
   }
   for (var j = 0; j < 10; j++) {
     y = j + 10;
     assertEquals(j, let_accessors[j]());
     assertEquals(y, var_accessors[j]());
+    assertEquals(j, const_accessors[j]());
   }
 }
 f8();
diff --git a/test/mjsunit/harmony/collections.js b/test/mjsunit/harmony/collections.js
new file mode 100644
index 0000000..412e6f1
--- /dev/null
+++ b/test/mjsunit/harmony/collections.js
@@ -0,0 +1,314 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-collections --expose-gc
+
+
+// Test valid getter and setter calls on Sets.
+function TestValidSetCalls(m) {
+  assertDoesNotThrow(function () { m.add(new Object) });
+  assertDoesNotThrow(function () { m.has(new Object) });
+  assertDoesNotThrow(function () { m.delete(new Object) });
+}
+TestValidSetCalls(new Set);
+
+
+// Test valid getter and setter calls on Maps and WeakMaps
+function TestValidMapCalls(m) {
+  assertDoesNotThrow(function () { m.get(new Object) });
+  assertDoesNotThrow(function () { m.set(new Object) });
+  assertDoesNotThrow(function () { m.has(new Object) });
+  assertDoesNotThrow(function () { m.delete(new Object) });
+}
+TestValidMapCalls(new Map);
+TestValidMapCalls(new WeakMap);
+
+
+// Test invalid getter and setter calls for WeakMap only
+function TestInvalidCalls(m) {
+  assertThrows(function () { m.get(undefined) }, TypeError);
+  assertThrows(function () { m.set(undefined, 0) }, TypeError);
+  assertThrows(function () { m.get(null) }, TypeError);
+  assertThrows(function () { m.set(null, 0) }, TypeError);
+  assertThrows(function () { m.get(0) }, TypeError);
+  assertThrows(function () { m.set(0, 0) }, TypeError);
+  assertThrows(function () { m.get('a-key') }, TypeError);
+  assertThrows(function () { m.set('a-key', 0) }, TypeError);
+}
+TestInvalidCalls(new WeakMap);
+
+
+// Test expected behavior for Sets
+function TestSet(set, key) {
+  assertFalse(set.has(key));
+  set.add(key);
+  assertTrue(set.has(key));
+  set.delete(key);
+  assertFalse(set.has(key));
+}
+function TestSetBehavior(set) {
+  for (var i = 0; i < 20; i++) {
+    TestSet(set, new Object);
+    TestSet(set, i);
+    TestSet(set, i / 100);
+    TestSet(set, 'key-' + i);
+  }
+  var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
+  for (var i = 0; i < keys.length; i++) {
+    TestSet(set, keys[i]);
+  }
+}
+TestSetBehavior(new Set);
+
+
+// Test expected mapping behavior for Maps and WeakMaps
+function TestMapping(map, key, value) {
+  map.set(key, value);
+  assertSame(value, map.get(key));
+}
+function TestMapBehavior1(m) {
+  TestMapping(m, new Object, 23);
+  TestMapping(m, new Object, 'the-value');
+  TestMapping(m, new Object, new Object);
+}
+TestMapBehavior1(new Map);
+TestMapBehavior1(new WeakMap);
+
+
+// Test expected mapping behavior for Maps only
+function TestMapBehavior2(m) {
+  for (var i = 0; i < 20; i++) {
+    TestMapping(m, i, new Object);
+    TestMapping(m, i / 10, new Object);
+    TestMapping(m, 'key-' + i, new Object);
+  }
+  var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
+  for (var i = 0; i < keys.length; i++) {
+    TestMapping(m, keys[i], new Object);
+  }
+}
+TestMapBehavior2(new Map);
+
+
+// Test expected querying behavior of Maps and WeakMaps
+function TestQuery(m) {
+  var key = new Object;
+  TestMapping(m, key, 'to-be-present');
+  assertTrue(m.has(key));
+  assertFalse(m.has(new Object));
+  TestMapping(m, key, undefined);
+  assertFalse(m.has(key));
+  assertFalse(m.has(new Object));
+}
+TestQuery(new Map);
+TestQuery(new WeakMap);
+
+
+// Test expected deletion behavior of Maps and WeakMaps
+function TestDelete(m) {
+  var key = new Object;
+  TestMapping(m, key, 'to-be-deleted');
+  assertTrue(m.delete(key));
+  assertFalse(m.delete(key));
+  assertFalse(m.delete(new Object));
+  assertSame(m.get(key), undefined);
+}
+TestDelete(new Map);
+TestDelete(new WeakMap);
+
+
+// Test GC of Maps and WeakMaps with entry
+function TestGC1(m) {
+  var key = new Object;
+  m.set(key, 'not-collected');
+  gc();
+  assertSame('not-collected', m.get(key));
+}
+TestGC1(new Map);
+TestGC1(new WeakMap);
+
+
+// Test GC of Maps and WeakMaps with chained entries
+function TestGC2(m) {
+  var head = new Object;
+  for (key = head, i = 0; i < 10; i++, key = m.get(key)) {
+    m.set(key, new Object);
+  }
+  gc();
+  var count = 0;
+  for (key = head; key != undefined; key = m.get(key)) {
+    count++;
+  }
+  assertEquals(11, count);
+}
+TestGC2(new Map);
+TestGC2(new WeakMap);
+
+
+// Test property attribute [[Enumerable]]
+function TestEnumerable(func) {
+  function props(x) {
+    var array = [];
+    for (var p in x) array.push(p);
+    return array.sort();
+  }
+  assertArrayEquals([], props(func));
+  assertArrayEquals([], props(func.prototype));
+  assertArrayEquals([], props(new func()));
+}
+TestEnumerable(Set);
+TestEnumerable(Map);
+TestEnumerable(WeakMap);
+
+
+// Test arbitrary properties on Maps and WeakMaps
+function TestArbitrary(m) {
+  function TestProperty(map, property, value) {
+    map[property] = value;
+    assertEquals(value, map[property]);
+  }
+  for (var i = 0; i < 20; i++) {
+    TestProperty(m, i, 'val' + i);
+    TestProperty(m, 'foo' + i, 'bar' + i);
+  }
+  TestMapping(m, new Object, 'foobar');
+}
+TestArbitrary(new Map);
+TestArbitrary(new WeakMap);
+
+
+// Test direct constructor call
+assertTrue(Set() instanceof Set);
+assertTrue(Map() instanceof Map);
+assertTrue(WeakMap() instanceof WeakMap);
+
+
+// Test whether NaN values as keys are treated correctly.
+var s = new Set;
+assertFalse(s.has(NaN));
+assertFalse(s.has(NaN + 1));
+assertFalse(s.has(23));
+s.add(NaN);
+assertTrue(s.has(NaN));
+assertTrue(s.has(NaN + 1));
+assertFalse(s.has(23));
+var m = new Map;
+assertFalse(m.has(NaN));
+assertFalse(m.has(NaN + 1));
+assertFalse(m.has(23));
+m.set(NaN, 'a-value');
+assertTrue(m.has(NaN));
+assertTrue(m.has(NaN + 1));
+assertFalse(m.has(23));
+
+
+// Test some common JavaScript idioms for Sets
+var s = new Set;
+assertTrue(s instanceof Set);
+assertTrue(Set.prototype.add instanceof Function)
+assertTrue(Set.prototype.has instanceof Function)
+assertTrue(Set.prototype.delete instanceof Function)
+
+
+// Test some common JavaScript idioms for Maps
+var m = new Map;
+assertTrue(m instanceof Map);
+assertTrue(Map.prototype.set instanceof Function)
+assertTrue(Map.prototype.get instanceof Function)
+assertTrue(Map.prototype.has instanceof Function)
+assertTrue(Map.prototype.delete instanceof Function)
+
+
+// Test some common JavaScript idioms for WeakMaps
+var m = new WeakMap;
+assertTrue(m instanceof WeakMap);
+assertTrue(WeakMap.prototype.set instanceof Function)
+assertTrue(WeakMap.prototype.get instanceof Function)
+assertTrue(WeakMap.prototype.has instanceof Function)
+assertTrue(WeakMap.prototype.delete instanceof Function)
+
+
+// Regression test for WeakMap prototype.
+assertTrue(WeakMap.prototype.constructor === WeakMap)
+assertTrue(Object.getPrototypeOf(WeakMap.prototype) === Object.prototype)
+
+
+// Regression test for issue 1617: The prototype of the WeakMap constructor
+// needs to be unique (i.e. different from the one of the Object constructor).
+assertFalse(WeakMap.prototype === Object.prototype);
+var o = Object.create({});
+assertFalse("get" in o);
+assertFalse("set" in o);
+assertEquals(undefined, o.get);
+assertEquals(undefined, o.set);
+var o = Object.create({}, { myValue: {
+  value: 10,
+  enumerable: false,
+  configurable: true,
+  writable: true
+}});
+assertEquals(10, o.myValue);
+
+
+// Regression test for issue 1884: Invoking any of the methods for Harmony
+// maps, sets, or weak maps, with a wrong type of receiver should be throwing
+// a proper TypeError.
+var alwaysBogus = [ undefined, null, true, "x", 23, {} ];
+var bogusReceiversTestSet = [
+  { proto: Set.prototype,
+    funcs: [ 'add', 'has', 'delete' ],
+    receivers: alwaysBogus.concat([ new Map, new WeakMap ]),
+  },
+  { proto: Map.prototype,
+    funcs: [ 'get', 'set', 'has', 'delete' ],
+    receivers: alwaysBogus.concat([ new Set, new WeakMap ]),
+  },
+  { proto: WeakMap.prototype,
+    funcs: [ 'get', 'set', 'has', 'delete' ],
+    receivers: alwaysBogus.concat([ new Set, new Map ]),
+  },
+];
+function TestBogusReceivers(testSet) {
+  for (var i = 0; i < testSet.length; i++) {
+    var proto = testSet[i].proto;
+    var funcs = testSet[i].funcs;
+    var receivers = testSet[i].receivers;
+    for (var j = 0; j < funcs.length; j++) {
+      var func = proto[funcs[j]];
+      for (var k = 0; k < receivers.length; k++) {
+        assertThrows(function () { func.call(receivers[k], {}) }, TypeError);
+      }
+    }
+  }
+}
+TestBogusReceivers(bogusReceiversTestSet);
+
+
+// Stress Test
+// There is a proposed stress-test available at the es-discuss mailing list
+// which cannot be reasonably automated.  Check it out by hand if you like:
+// https://mail.mozilla.org/pipermail/es-discuss/2011-May/014096.html
\ No newline at end of file
diff --git a/test/mjsunit/harmony/debug-blockscopes.js b/test/mjsunit/harmony/debug-blockscopes.js
index 0230e84..10aac2d 100644
--- a/test/mjsunit/harmony/debug-blockscopes.js
+++ b/test/mjsunit/harmony/debug-blockscopes.js
@@ -25,13 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug --harmony-block-scoping
+// Flags: --expose-debug-as debug --harmony-scoping
 // The functions used for testing backtraces. They are at the top to make the
 // testing of source line/column easier.
 
+// TODO(ES6): properly activate extended mode
+"use strict";
 
 // Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug;
+var Debug = debug.Debug;
 
 var test_name;
 var listener_delegate;
@@ -76,6 +78,7 @@
   end_test_count++;
 }
 
+var global_object = this;
 
 // Check that the scope chain contains the expected types of scopes.
 function CheckScopeChain(scopes, exec_state) {
@@ -89,7 +92,7 @@
     if (scopes[i] == debug.ScopeType.Global) {
       // Objects don't have same class (one is "global", other is "Object",
       // so just check the properties directly.
-      assertPropertiesEqual(this, scope.scopeObject().value());
+      assertPropertiesEqual(global_object, scope.scopeObject().value());
     }
   }
 
@@ -329,114 +332,6 @@
 EndTest();
 
 
-// Single empty with block.
-BeginTest("With block 1");
-
-function with_block_1() {
-  with({}) {
-    debugger;
-  }
-}
-
-listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.With,
-                   debug.ScopeType.Local,
-                   debug.ScopeType.Global], exec_state);
-  CheckScopeContent({}, 0, exec_state);
-  CheckScopeContent({}, 1, exec_state);
-};
-with_block_1();
-EndTest();
-
-
-// Nested empty with blocks.
-BeginTest("With block 2");
-
-function with_block_2() {
-  with({}) {
-    with({}) {
-      debugger;
-    }
-  }
-}
-
-listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.With,
-                   debug.ScopeType.With,
-                   debug.ScopeType.Local,
-                   debug.ScopeType.Global], exec_state);
-  CheckScopeContent({}, 0, exec_state);
-  CheckScopeContent({}, 1, exec_state);
-  CheckScopeContent({}, 2, exec_state);
-};
-with_block_2();
-EndTest();
-
-
-// With block using an in-place object literal.
-BeginTest("With block 3");
-
-function with_block_3() {
-  with({a:1,b:2}) {
-    debugger;
-  }
-}
-
-listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.With,
-                   debug.ScopeType.Local,
-                   debug.ScopeType.Global], exec_state);
-  CheckScopeContent({a:1,b:2}, 0, exec_state);
-};
-with_block_3();
-EndTest();
-
-
-// Nested with blocks using in-place object literals.
-BeginTest("With block 4");
-
-function with_block_4() {
-  with({a:1,b:2}) {
-    with({a:2,b:1}) {
-      debugger;
-    }
-  }
-}
-
-listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.With,
-                   debug.ScopeType.With,
-                   debug.ScopeType.Local,
-                   debug.ScopeType.Global], exec_state);
-  CheckScopeContent({a:2,b:1}, 0, exec_state);
-  CheckScopeContent({a:1,b:2}, 1, exec_state);
-};
-with_block_4();
-EndTest();
-
-
-// With block and a block local variable.
-BeginTest("With block 5");
-
-function with_block_5() {
-  with({a:1}) {
-    let a = 2;
-    debugger;
-  }
-}
-
-listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.Block,
-                   debug.ScopeType.With,
-                   debug.ScopeType.Local,
-                   debug.ScopeType.Global], exec_state);
-  CheckScopeContent({a:2}, 0, exec_state);
-  CheckScopeContent({a:1}, 1, exec_state);
-};
-with_block_5();
-EndTest();
-
-
 // Simple closure formed by returning an inner function referering to an outer
 // block local variable and an outer function's parameter.
 BeginTest("Closure 1");
@@ -464,3 +359,112 @@
 };
 closure_1(1)();
 EndTest();
+
+
+// Simple for-in loop over the keys of an object.
+BeginTest("For loop 1");
+
+function for_loop_1() {
+  for (let x in {y:undefined}) {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:'y'}, 0, exec_state);
+  // The function scope contains a temporary iteration variable.
+  CheckScopeContent({x:'y'}, 1, exec_state);
+};
+for_loop_1();
+EndTest();
+
+
+// For-in loop over the keys of an object with a block scoped let variable
+// shadowing the iteration variable.
+BeginTest("For loop 2");
+
+function for_loop_2() {
+  for (let x in {y:undefined}) {
+    let x = 3;
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:3}, 0, exec_state);
+  CheckScopeContent({x:'y'}, 1, exec_state);
+  // The function scope contains a temporary iteration variable.
+  CheckScopeContent({x:'y'}, 2, exec_state);
+};
+for_loop_2();
+EndTest();
+
+
+// Simple for loop.
+BeginTest("For loop 3");
+
+function for_loop_3() {
+  for (let x = 3; x < 4; ++x) {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:3}, 0, exec_state);
+  CheckScopeContent({}, 1, exec_state);
+};
+for_loop_3();
+EndTest();
+
+
+// For loop with a block scoped let variable shadowing the iteration variable.
+BeginTest("For loop 4");
+
+function for_loop_4() {
+  for (let x = 3; x < 4; ++x) {
+    let x = 5;
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:5}, 0, exec_state);
+  CheckScopeContent({x:3}, 1, exec_state);
+  CheckScopeContent({}, 2, exec_state);
+};
+for_loop_4();
+EndTest();
+
+
+// For loop with two variable declarations.
+BeginTest("For loop 5");
+
+function for_loop_5() {
+  for (let x = 3, y = 5; x < 4; ++x) {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:3,y:5}, 0, exec_state);
+  CheckScopeContent({}, 1, exec_state);
+};
+for_loop_5();
+EndTest();
diff --git a/test/mjsunit/harmony/debug-evaluate-blockscopes.js b/test/mjsunit/harmony/debug-evaluate-blockscopes.js
index 549960a..d6ce8b2 100644
--- a/test/mjsunit/harmony/debug-evaluate-blockscopes.js
+++ b/test/mjsunit/harmony/debug-evaluate-blockscopes.js
@@ -25,11 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug --harmony-block-scoping
+// Flags: --expose-debug-as debug --harmony-scoping
 
 // Test debug evaluation for functions without local context, but with
 // nested catch contexts.
 
+// TODO(ES6): properly activate extended mode
+"use strict";
+
+var x;
+var result;
+
 function f() {
   {                   // Line 1.
     let i = 1;        // Line 2.
@@ -42,7 +48,7 @@
 };
 
 // Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
+var Debug = debug.Debug
 // Set breakpoint on line 6.
 var bp = Debug.setBreakPoint(f, 6);
 
diff --git a/test/mjsunit/harmony/module-parsing.js b/test/mjsunit/harmony/module-parsing.js
new file mode 100644
index 0000000..93e69e3
--- /dev/null
+++ b/test/mjsunit/harmony/module-parsing.js
@@ -0,0 +1,159 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-modules
+
+// Test basic module syntax, with and without automatic semicolon insertion.
+
+module A {}
+
+module A1 = A
+module A2 = A;
+module A3 = A2
+
+module B {
+  export vx
+  export vy, lz, c, f
+
+  var vx
+  var vx, vy;
+  var vx = 0, vy
+  let lx, ly
+  let lz = 1
+  const c = 9
+  function f() {}
+
+  module C0 {}
+
+  export module C {
+    let x
+    export module D { export let x }
+    let y
+  }
+
+  let zz = ""
+
+  export var x0
+  export var x1, x2 = 6, x3
+  export let y0
+  export let y1 = 0, y2
+  export const z0 = 0
+  export const z1 = 2, z2 = 3
+  export function f0() {}
+  export module M1 {}
+  export module M2 = C.D
+  export module M3 at "http://where"
+
+  import i0 from I
+  import i1, i2, i3, M from I
+  import i4, i5 from "http://where"
+}
+
+module I {
+  export let i0, i1, i2, i3;
+  export module M {}
+}
+
+module C1 = B.C;
+module D1 = B.C.D
+module D2 = C1.D
+module D3 = D2
+
+module E1 at "http://where"
+module E2 at "http://where";
+module E3 = E1.F
+
+// Check that ASI does not interfere.
+
+module X
+{
+let x
+}
+
+module Y
+=
+X
+
+module Z
+at
+"file://local"
+
+import
+x
+,
+y
+from
+"file://local"
+
+
+module Wrap {
+export
+x
+,
+y
+
+export
+var
+v1 = 1
+
+export
+let
+v2 = 2
+
+export
+const
+v3 = 3
+
+export
+function
+f
+(
+)
+{
+}
+
+export
+module V
+{
+}
+}
+
+export A, A1, A2, A3, B, I, C1, D1, D2, D3, E1, E2, E3, X, Y, Z, Wrap, x, y, UU
+
+
+
+// Check that 'module' still works as an identifier.
+
+var module
+module = {}
+module["a"] = 6
+function module() {}
+function f(module) { return module }
+try {} catch (module) {}
+
+module
+v = 20
diff --git a/test/mjsunit/harmony/module-resolution.js b/test/mjsunit/harmony/module-resolution.js
new file mode 100644
index 0000000..f9f492c
--- /dev/null
+++ b/test/mjsunit/harmony/module-resolution.js
@@ -0,0 +1,139 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-modules --harmony-scoping
+
+// Test basic module interface inference.
+
+"use strict";
+
+print("begin.")
+
+export let x = print("0")
+
+export module B = A.B
+
+export module A {
+  export let x = print("1")
+  export let f = function() { return B.x }
+  export module B {
+    module BB = B
+    export BB, x
+    let x = print("2")
+    let y = print("3")
+    let Ax = A.x
+    let ABx = A.B.x
+    let Ay = A.y
+    let BBx = BB.x
+    let Af = A.f
+    function f(x,y) { return x }
+  }
+  export let y = print("4")
+  let Ax = A.x
+  let Bx = B.x
+  let ABx = A.B.x
+  module C {
+    export let z = print("5")
+    export module D = B
+    // TODO(rossberg): turn these into proper negative test cases once we have
+    // suitable error messages.
+    // import C.z  // multiple declarations
+    import x from B
+  }
+  module D {
+    // TODO(rossberg): Handle import *.
+    // import A.*  // invalid forward import
+  }
+  module M {}
+  // TODO(rossberg): Handle import *.
+  // import M.*  // invalid forward import
+  let Cz = C.z
+  let CDx = C.D.x
+}
+
+export module Imports {
+  module A1 {
+    export module A2 {}
+  }
+  module B {
+    // TODO(rossberg): Handle import *.
+    // import A1.*
+    // import A2.*  // unbound variable A2
+  }
+}
+
+export module E {
+  export let xx = x
+  export y, B
+  let Bx = B.x
+  // TODO(rossberg): Handle import *.
+  // import A.*
+}
+
+export module M1 {
+  export module A2 = M2
+}
+export module M2 {
+  export module A1 = M1
+}
+
+// TODO(rossberg): turn these into proper negative test cases once we have
+// suitable error messages.
+// module W1 = W2.W
+// module W2 = { export module W = W3 }
+// module W3 = W1  // cyclic module definition
+
+// module W1 = W2.W3
+// module W2 = {
+//   export module W3 = W4
+//   export module W4 = W1
+// }  // cyclic module definition
+
+// TODO(rossberg): Handle import *.
+//module M3B = M3.B
+//export module M3 {
+//  export module B { export let x = "" }
+//  module C1 = { import M3.* }
+//  module C2 = { import M3.B.* }
+//  module C3 = { import M3B.* }
+//  module C4 = { export x import B.* }
+//// TODO(rossberg): turn these into proper negative test cases once we have
+//// suitable error messages.
+//// export module C5 = { import C5.* }  // invalid forward import
+//// export module C6 = { import M3.C6.* }  // invalid forward import
+//}
+
+export module External at "external.js"
+export module External1 = External
+export module ExternalA = External.A
+export module InnerExternal {
+  export module E at "external.js"
+}
+export module External2 = InnerExternal.E
+//export let xxx = InnerExternal.E.A.x
+
+print("end.")
diff --git a/test/mjsunit/harmony/proxies-example-membrane.js b/test/mjsunit/harmony/proxies-example-membrane.js
new file mode 100644
index 0000000..c6e7f9f
--- /dev/null
+++ b/test/mjsunit/harmony/proxies-example-membrane.js
@@ -0,0 +1,512 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+
+
+// A simple no-op handler. Adapted from:
+// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#examplea_no-op_forwarding_proxy
+
+function createHandler(obj) {
+  return {
+    getOwnPropertyDescriptor: function(name) {
+      var desc = Object.getOwnPropertyDescriptor(obj, name);
+      if (desc !== undefined) desc.configurable = true;
+      return desc;
+    },
+    getPropertyDescriptor: function(name) {
+      var desc = Object.getOwnPropertyDescriptor(obj, name);
+      //var desc = Object.getPropertyDescriptor(obj, name);  // not in ES5
+      if (desc !== undefined) desc.configurable = true;
+      return desc;
+    },
+    getOwnPropertyNames: function() {
+      return Object.getOwnPropertyNames(obj);
+    },
+    getPropertyNames: function() {
+      return Object.getOwnPropertyNames(obj);
+      //return Object.getPropertyNames(obj);  // not in ES5
+    },
+    defineProperty: function(name, desc) {
+      Object.defineProperty(obj, name, desc);
+    },
+    delete: function(name) {
+      return delete obj[name];
+    },
+    fix: function() {
+      if (Object.isFrozen(obj)) {
+        var result = {};
+        Object.getOwnPropertyNames(obj).forEach(function(name) {
+          result[name] = Object.getOwnPropertyDescriptor(obj, name);
+        });
+        return result;
+      }
+      // As long as obj is not frozen, the proxy won't allow itself to be fixed
+      return undefined; // will cause a TypeError to be thrown
+    },
+    has: function(name) { return name in obj; },
+    hasOwn: function(name) { return ({}).hasOwnProperty.call(obj, name); },
+    get: function(receiver, name) { return obj[name]; },
+    set: function(receiver, name, val) {
+      obj[name] = val;  // bad behavior when set fails in non-strict mode
+      return true;
+    },
+    enumerate: function() {
+      var result = [];
+      for (var name in obj) { result.push(name); };
+      return result;
+    },
+    keys: function() { return Object.keys(obj); }
+  };
+}
+
+
+
+// Auxiliary definitions enabling tracking of object identity in output.
+
+var objectMap = new WeakMap;
+var objectCounter = 0;
+
+function registerObject(x, s) {
+  if (x === Object(x) && !objectMap.has(x))
+    objectMap.set(x, ++objectCounter + (s == undefined ? "" : ":" + s));
+}
+
+registerObject(this, "global");
+registerObject(Object.prototype, "Object.prototype");
+
+function str(x) {
+  if (x === Object(x)) return "[" + typeof x + " " + objectMap.get(x) + "]";
+  if (typeof x == "string") return "\"" + x + "\"";
+  return "" + x;
+}
+
+
+
+// A simple membrane. Adapted from:
+// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#a_simple_membrane
+
+function createSimpleMembrane(target) {
+  var enabled = true;
+
+  function wrap(obj) {
+    registerObject(obj);
+    print("wrap enter", str(obj));
+    try {
+      var x = wrap2(obj);
+      registerObject(x, "wrapped");
+      print("wrap exit", str(obj), "as", str(x));
+      return x;
+    } catch(e) {
+      print("wrap exception", str(e));
+      throw e;
+    }
+  }
+
+  function wrap2(obj) {
+    if (obj !== Object(obj)) {
+      return obj;
+    }
+
+    function wrapCall(fun, that, args) {
+      registerObject(that);
+      print("wrapCall enter", fun, str(that));
+      try {
+        var x = wrapCall2(fun, that, args);
+        print("wrapCall exit", fun, str(that), "returning", str(x));
+        return x;
+      } catch(e) {
+        print("wrapCall exception", fun, str(that), str(e));
+        throw e;
+      }
+    }
+
+    function wrapCall2(fun, that, args) {
+      if (!enabled) { throw new Error("disabled"); }
+      try {
+        return wrap(fun.apply(that, Array.prototype.map.call(args, wrap)));
+      } catch (e) {
+        throw wrap(e);
+      }
+    }
+
+    var baseHandler = createHandler(obj);
+    var handler = Proxy.create(Object.freeze({
+      get: function(receiver, name) {
+        return function() {
+          var arg = (name === "get" || name == "set") ? arguments[1] : "";
+          print("handler enter", name, arg);
+          var x = wrapCall(baseHandler[name], baseHandler, arguments);
+          print("handler exit", name, arg, "returning", str(x));
+          return x;
+        }
+      }
+    }));
+    registerObject(baseHandler, "basehandler");
+    registerObject(handler, "handler");
+
+    if (typeof obj === "function") {
+      function callTrap() {
+        print("call trap enter", str(obj), str(this));
+        var x = wrapCall(obj, wrap(this), arguments);
+        print("call trap exit", str(obj), str(this), "returning", str(x));
+        return x;
+      }
+      function constructTrap() {
+        if (!enabled) { throw new Error("disabled"); }
+        try {
+          function forward(args) { return obj.apply(this, args) }
+          return wrap(new forward(Array.prototype.map.call(arguments, wrap)));
+        } catch (e) {
+          throw wrap(e);
+        }
+      }
+      return Proxy.createFunction(handler, callTrap, constructTrap);
+    } else {
+      var prototype = wrap(Object.getPrototypeOf(obj));
+      return Proxy.create(handler, prototype);
+    }
+  }
+
+  var gate = Object.freeze({
+    enable: function() { enabled = true; },
+    disable: function() { enabled = false; }
+  });
+
+  return Object.freeze({
+    wrapper: wrap(target),
+    gate: gate
+  });
+}
+
+
+var o = {
+  a: 6,
+  b: {bb: 8},
+  f: function(x) { return x },
+  g: function(x) { return x.a },
+  h: function(x) { this.q = x }
+};
+o[2] = {c: 7};
+var m = createSimpleMembrane(o);
+var w = m.wrapper;
+print("o =", str(o))
+print("w =", str(w));
+
+var f = w.f;
+var x = f(66);
+var x = f({a: 1});
+var x = w.f({a: 1});
+var a = x.a;
+assertEquals(6, w.a);
+assertEquals(8, w.b.bb);
+assertEquals(7, w[2]["c"]);
+assertEquals(undefined, w.c);
+assertEquals(1, w.f(1));
+assertEquals(1, w.f({a: 1}).a);
+assertEquals(2, w.g({a: 2}));
+assertEquals(3, (w.r = {a: 3}).a);
+assertEquals(3, w.r.a);
+assertEquals(3, o.r.a);
+w.h(3);
+assertEquals(3, w.q);
+assertEquals(3, o.q);
+assertEquals(4, (new w.h(4)).q);
+
+var wb = w.b;
+var wr = w.r;
+var wf = w.f;
+var wf3 = w.f(3);
+var wfx = w.f({a: 6});
+var wgx = w.g({a: {aa: 7}});
+var wh4 = new w.h(4);
+m.gate.disable();
+assertEquals(3, wf3);
+assertThrows(function() { w.a }, Error);
+assertThrows(function() { w.r }, Error);
+assertThrows(function() { w.r = {a: 4} }, Error);
+assertThrows(function() { o.r.a }, Error);
+assertEquals("object", typeof o.r);
+assertEquals(5, (o.r = {a: 5}).a);
+assertEquals(5, o.r.a);
+assertThrows(function() { w[1] }, Error);
+assertThrows(function() { w.c }, Error);
+assertThrows(function() { wb.bb }, Error);
+assertThrows(function() { wr.a }, Error);
+assertThrows(function() { wf(4) }, Error);
+assertThrows(function() { wfx.a }, Error);
+assertThrows(function() { wgx.aa }, Error);
+assertThrows(function() { wh4.q }, Error);
+
+m.gate.enable();
+assertEquals(6, w.a);
+assertEquals(5, w.r.a);
+assertEquals(5, o.r.a);
+assertEquals(7, w.r = 7);
+assertEquals(7, w.r);
+assertEquals(7, o.r);
+assertEquals(8, w.b.bb);
+assertEquals(7, w[2]["c"]);
+assertEquals(undefined, w.c);
+assertEquals(8, wb.bb);
+assertEquals(3, wr.a);
+assertEquals(4, wf(4));
+assertEquals(3, wf3);
+assertEquals(6, wfx.a);
+assertEquals(7, wgx.aa);
+assertEquals(4, wh4.q);
+
+
+// An identity-preserving membrane. Adapted from:
+// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#an_identity-preserving_membrane
+
+function createMembrane(wetTarget) {
+  var wet2dry = WeakMap();
+  var dry2wet = WeakMap();
+
+  function asDry(obj) {
+    registerObject(obj)
+    print("asDry enter", str(obj))
+    try {
+      var x = asDry2(obj);
+      registerObject(x, "dry");
+      print("asDry exit", str(obj), "as", str(x));
+      return x;
+    } catch(e) {
+      print("asDry exception", str(e));
+      throw e;
+    }
+  }
+  function asDry2(wet) {
+    if (wet !== Object(wet)) {
+      // primitives provide only irrevocable knowledge, so don't
+      // bother wrapping it.
+      return wet;
+    }
+    var dryResult = wet2dry.get(wet);
+    if (dryResult) { return dryResult; }
+
+    var wetHandler = createHandler(wet);
+    var dryRevokeHandler = Proxy.create(Object.freeze({
+      get: function(receiver, name) {
+        return function() {
+          var arg = (name === "get" || name == "set") ? arguments[1] : "";
+          print("dry handler enter", name, arg);
+          var optWetHandler = dry2wet.get(dryRevokeHandler);
+          try {
+            var x = asDry(optWetHandler[name].apply(
+              optWetHandler, Array.prototype.map.call(arguments, asWet)));
+            print("dry handler exit", name, arg, "returning", str(x));
+            return x;
+          } catch (eWet) {
+            var x = asDry(eWet);
+            print("dry handler exception", name, arg, "throwing", str(x));
+            throw x;
+          }
+        };
+      }
+    }));
+    dry2wet.set(dryRevokeHandler, wetHandler);
+
+    if (typeof wet === "function") {
+      function callTrap() {
+        print("dry call trap enter", str(this));
+        var x = asDry(wet.apply(
+          asWet(this), Array.prototype.map.call(arguments, asWet)));
+        print("dry call trap exit", str(this), "returning", str(x));
+        return x;
+      }
+      function constructTrap() {
+        function forward(args) { return wet.apply(this, args) }
+        return asDry(new forward(Array.prototype.map.call(arguments, asWet)));
+      }
+      dryResult =
+        Proxy.createFunction(dryRevokeHandler, callTrap, constructTrap);
+    } else {
+      dryResult =
+        Proxy.create(dryRevokeHandler, asDry(Object.getPrototypeOf(wet)));
+    }
+    wet2dry.set(wet, dryResult);
+    dry2wet.set(dryResult, wet);
+    return dryResult;
+  }
+
+  function asWet(obj) {
+    registerObject(obj)
+    print("asWet enter", str(obj))
+    try {
+      var x = asWet2(obj)
+      registerObject(x, "wet")
+      print("asWet exit", str(obj), "as", str(x))
+      return x
+    } catch(e) {
+      print("asWet exception", str(e))
+      throw e
+    }
+  }
+  function asWet2(dry) {
+    if (dry !== Object(dry)) {
+      // primitives provide only irrevocable knowledge, so don't
+      // bother wrapping it.
+      return dry;
+    }
+    var wetResult = dry2wet.get(dry);
+    if (wetResult) { return wetResult; }
+
+    var dryHandler = createHandler(dry);
+    var wetRevokeHandler = Proxy.create(Object.freeze({
+      get: function(receiver, name) {
+        return function() {
+          var arg = (name === "get" || name == "set") ? arguments[1] : "";
+          print("wet handler enter", name, arg);
+          var optDryHandler = wet2dry.get(wetRevokeHandler);
+          try {
+            var x = asWet(optDryHandler[name].apply(
+              optDryHandler, Array.prototype.map.call(arguments, asDry)));
+            print("wet handler exit", name, arg, "returning", str(x));
+            return x;
+          } catch (eDry) {
+            var x = asWet(eDry);
+            print("wet handler exception", name, arg, "throwing", str(x));
+            throw x;
+          }
+        };
+      }
+    }));
+    wet2dry.set(wetRevokeHandler, dryHandler);
+
+    if (typeof dry === "function") {
+      function callTrap() {
+        print("wet call trap enter", str(this));
+        var x = asWet(dry.apply(
+          asDry(this), Array.prototype.map.call(arguments, asDry)));
+        print("wet call trap exit", str(this), "returning", str(x));
+        return x;
+      }
+      function constructTrap() {
+        function forward(args) { return dry.apply(this, args) }
+        return asWet(new forward(Array.prototype.map.call(arguments, asDry)));
+      }
+      wetResult =
+        Proxy.createFunction(wetRevokeHandler, callTrap, constructTrap);
+    } else {
+      wetResult =
+        Proxy.create(wetRevokeHandler, asWet(Object.getPrototypeOf(dry)));
+    }
+    dry2wet.set(dry, wetResult);
+    wet2dry.set(wetResult, dry);
+    return wetResult;
+  }
+
+  var gate = Object.freeze({
+    revoke: function() {
+      dry2wet = wet2dry = Object.freeze({
+        get: function(key) { throw new Error("revoked"); },
+        set: function(key, val) { throw new Error("revoked"); }
+      });
+    }
+  });
+
+  return Object.freeze({ wrapper: asDry(wetTarget), gate: gate });
+}
+
+
+var receiver
+var argument
+var o = {
+  a: 6,
+  b: {bb: 8},
+  f: function(x) { receiver = this; argument = x; return x },
+  g: function(x) { receiver = this; argument = x; return x.a },
+  h: function(x) { receiver = this; argument = x; this.q = x },
+  s: function(x) { receiver = this; argument = x; this.x = {y: x}; return this }
+}
+o[2] = {c: 7}
+var m = createMembrane(o)
+var w = m.wrapper
+print("o =", str(o))
+print("w =", str(w))
+
+var f = w.f
+var x = f(66)
+var x = f({a: 1})
+var x = w.f({a: 1})
+var a = x.a
+assertEquals(6, w.a)
+assertEquals(8, w.b.bb)
+assertEquals(7, w[2]["c"])
+assertEquals(undefined, w.c)
+assertEquals(1, w.f(1))
+assertSame(o, receiver)
+assertEquals(1, w.f({a: 1}).a)
+assertSame(o, receiver)
+assertEquals(2, w.g({a: 2}))
+assertSame(o, receiver)
+assertSame(w, w.f(w))
+assertSame(o, receiver)
+assertSame(o, argument)
+assertSame(o, w.f(o))
+assertSame(o, receiver)
+// Note that argument !== o, since o isn't dry, so gets wrapped wet again.
+assertEquals(3, (w.r = {a: 3}).a)
+assertEquals(3, w.r.a)
+assertEquals(3, o.r.a)
+w.h(3)
+assertEquals(3, w.q)
+assertEquals(3, o.q)
+assertEquals(4, (new w.h(4)).q)
+assertEquals(5, w.s(5).x.y)
+assertSame(o, receiver)
+
+var wb = w.b
+var wr = w.r
+var wf = w.f
+var wf3 = w.f(3)
+var wfx = w.f({a: 6})
+var wgx = w.g({a: {aa: 7}})
+var wh4 = new w.h(4)
+var ws5 = w.s(5)
+var ws5x = ws5.x
+m.gate.revoke()
+assertEquals(3, wf3)
+assertThrows(function() { w.a }, Error)
+assertThrows(function() { w.r }, Error)
+assertThrows(function() { w.r = {a: 4} }, Error)
+assertThrows(function() { o.r.a }, Error)
+assertEquals("object", typeof o.r)
+assertEquals(5, (o.r = {a: 5}).a)
+assertEquals(5, o.r.a)
+assertThrows(function() { w[1] }, Error)
+assertThrows(function() { w.c }, Error)
+assertThrows(function() { wb.bb }, Error)
+assertEquals(3, wr.a)
+assertThrows(function() { wf(4) }, Error)
+assertEquals(6, wfx.a)
+assertEquals(7, wgx.aa)
+assertThrows(function() { wh4.q }, Error)
+assertThrows(function() { ws5.x }, Error)
+assertThrows(function() { ws5x.y }, Error)
diff --git a/test/mjsunit/harmony/proxies-for.js b/test/mjsunit/harmony/proxies-for.js
new file mode 100644
index 0000000..3d419c6
--- /dev/null
+++ b/test/mjsunit/harmony/proxies-for.js
@@ -0,0 +1,168 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies
+
+
+// Helper.
+
+function TestWithProxies(test, x, y, z) {
+  test(Proxy.create, x, y, z)
+  test(function(h) {return Proxy.createFunction(h, function() {})}, x, y, z)
+}
+
+
+// Iterate over a proxy.
+
+function TestForIn(properties, handler) {
+  TestWithProxies(TestForIn2, properties, handler)
+}
+
+function TestForIn2(create, properties, handler) {
+  var p = create(handler)
+  var found = []
+  for (var x in p) found.push(x)
+  assertArrayEquals(properties, found)
+}
+
+TestForIn(["0", "a"], {
+  enumerate: function() { return [0, "a"] }
+})
+
+TestForIn(["null", "a"], {
+  enumerate: function() { return this.enumerate2() },
+  enumerate2: function() { return [null, "a"] }
+})
+
+TestForIn(["b", "d"], {
+  getPropertyNames: function() { return ["a", "b", "c", "d", "e"] },
+  getPropertyDescriptor: function(k) {
+    switch (k) {
+      case "a": return {enumerable: false, value: "3"};
+      case "b": return {enumerable: true, get get() {}};
+      case "c": return {value: 4};
+      case "d": return {get enumerable() { return true }};
+      default: return undefined;
+    }
+  }
+})
+
+TestForIn(["b", "a", "0", "c"], Proxy.create({
+  get: function(pr, pk) {
+    return function() { return ["b", "a", 0, "c"] }
+  }
+}))
+
+
+
+// Iterate over an object with a proxy prototype.
+
+function TestForInDerived(properties, handler) {
+  TestWithProxies(TestForInDerived2, properties, handler)
+}
+
+function TestForInDerived2(create, properties, handler) {
+  var p = create(handler)
+  var o = Object.create(p)
+  o.z = 0
+  var found = []
+  for (var x in o) found.push(x)
+  assertArrayEquals(["z"].concat(properties), found)
+
+  var oo = Object.create(o)
+  oo.y = 0
+  var found = []
+  for (var x in oo) found.push(x)
+  assertArrayEquals(["y", "z"].concat(properties), found)
+}
+
+TestForInDerived(["0", "a"], {
+  enumerate: function() { return [0, "a"] },
+  getPropertyDescriptor: function(k) {
+    return k == "0" || k == "a" ? {} : undefined
+  }
+})
+
+TestForInDerived(["null", "a"], {
+  enumerate: function() { return this.enumerate2() },
+  enumerate2: function() { return [null, "a"] },
+  getPropertyDescriptor: function(k) {
+    return k == "null" || k == "a" ? {} : undefined
+  }
+})
+
+TestForInDerived(["b", "d"], {
+  getPropertyNames: function() { return ["a", "b", "c", "d", "e"] },
+  getPropertyDescriptor: function(k) {
+    switch (k) {
+      case "a": return {enumerable: false, value: "3"};
+      case "b": return {enumerable: true, get get() {}};
+      case "c": return {value: 4};
+      case "d": return {get enumerable() { return true }};
+      default: return undefined;
+    }
+  }
+})
+
+
+
+// Throw exception in enumerate trap.
+
+function TestForInThrow(handler) {
+  TestWithProxies(TestForInThrow2, handler)
+}
+
+function TestForInThrow2(create, handler) {
+  var p = create(handler)
+  var o = Object.create(p)
+  assertThrows(function(){ for (var x in p) {} }, "myexn")
+  assertThrows(function(){ for (var x in o) {} }, "myexn")
+}
+
+TestForInThrow({
+  enumerate: function() { throw "myexn" }
+})
+
+TestForInThrow({
+  enumerate: function() { return this.enumerate2() },
+  enumerate2: function() { throw "myexn" }
+})
+
+TestForInThrow({
+  getPropertyNames: function() { throw "myexn" }
+})
+
+TestForInThrow({
+  getPropertyNames: function() { return ["a"] },
+  getPropertyDescriptor: function() { throw "myexn" }
+})
+
+TestForInThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function() { throw "myexn" }
+  }
+}))
diff --git a/test/mjsunit/harmony/proxies-function.js b/test/mjsunit/harmony/proxies-function.js
new file mode 100644
index 0000000..6b8d098
--- /dev/null
+++ b/test/mjsunit/harmony/proxies-function.js
@@ -0,0 +1,748 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies --allow-natives-syntax
+
+
+// Helper.
+
+function CreateFrozen(handler, callTrap, constructTrap) {
+  if (handler.fix === undefined) handler.fix = function() { return {} }
+  var f = Proxy.createFunction(handler, callTrap, constructTrap)
+  Object.freeze(f)
+  return f
+}
+
+
+// Ensures that checking the "length" property of a function proxy doesn't
+// crash due to lack of a [[Get]] method.
+var handler = {
+  get : function(r, n) { return n == "length" ? 2 : undefined }
+}
+
+
+// Calling (call, Function.prototype.call, Function.prototype.apply,
+//          Function.prototype.bind).
+
+var global_object = this
+var receiver
+
+function TestCall(isStrict, callTrap) {
+  assertEquals(42, callTrap(5, 37))
+  // TODO(rossberg): strict mode seems to be broken on x64...
+  // assertSame(isStrict ? undefined : global_object, receiver)
+
+  var handler = {
+    get: function(r, k) {
+      return k == "length" ? 2 : Function.prototype[k]
+    }
+  }
+  var f = Proxy.createFunction(handler, callTrap)
+  var o = {f: f}
+  global_object.f = f
+
+  receiver = 333
+  assertEquals(42, f(11, 31))
+  // TODO(rossberg): strict mode seems to be broken on x64...
+  // assertSame(isStrict ? undefined : global_object, receiver)
+  receiver = 333
+  assertEquals(42, o.f(10, 32))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, o["f"](9, 33))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, (1, o).f(8, 34))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, (1, o)["f"](7, 35))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, f.call(o, 32, 10))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, f.call(undefined, 33, 9))
+  assertSame(isStrict ? undefined : global_object, receiver)
+  receiver = 333
+  assertEquals(42, f.call(null, 33, 9))
+  assertSame(isStrict ? null : global_object, receiver)
+  receiver = 333
+  assertEquals(44, f.call(2, 21, 23))
+  assertSame(2, receiver.valueOf())
+  receiver = 333
+  assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
+  assertSame(isStrict ? null : global_object, receiver)
+  assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
+  assertEquals(2, receiver.valueOf())
+  receiver = 333
+  assertEquals(32, f.apply(o, [16, 16]))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Call(o, 11, 31, f))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Call(null, 11, 31, f))
+  assertSame(isStrict ? null : global_object, receiver)
+  receiver = 333
+  assertEquals(42, %Apply(f, o, [11, 31], 0, 2))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Apply(f, null, [11, 31], 0, 2))
+  assertSame(isStrict ? null : global_object, receiver)
+  receiver = 333
+  assertEquals(42, %_CallFunction(o, 11, 31, f))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %_CallFunction(null, 11, 31, f))
+  assertSame(isStrict ? null : global_object, receiver)
+
+  var ff = Function.prototype.bind.call(f, o, 12)
+  assertTrue(ff.length <= 1)  // TODO(rossberg): Not spec'ed yet, be lax.
+  receiver = 333
+  assertEquals(42, ff(30))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(33, Function.prototype.call.call(ff, {}, 21))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(23, %Call({}, 11, ff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(23, %Call({}, 11, 3, ff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(24, %Apply(ff, {}, [12, 13], 0, 1))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(24, %Apply(ff, {}, [12, 13], 0, 2))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(34, %_CallFunction({}, 22, ff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(34, %_CallFunction({}, 22, 3, ff))
+  assertSame(o, receiver)
+
+  var fff = Function.prototype.bind.call(ff, o, 30)
+  assertEquals(0, fff.length)
+  receiver = 333
+  assertEquals(42, fff())
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, Function.prototype.call.call(fff, {}))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, Function.prototype.apply.call(fff, {}))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Call({}, fff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Call({}, 11, 3, fff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Apply(fff, {}, [], 0, 0))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Apply(fff, {}, [12, 13], 0, 0))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Apply(fff, {}, [12, 13], 0, 2))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %_CallFunction({}, fff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %_CallFunction({}, 3, 4, 5, fff))
+  assertSame(o, receiver)
+
+  var f = CreateFrozen({}, callTrap)
+  receiver = 333
+  assertEquals(42, f(11, 31))
+  assertSame(isStrict ? undefined : global_object, receiver)
+  var o = {f: f}
+  receiver = 333
+  assertEquals(42, o.f(10, 32))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, o["f"](9, 33))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, (1, o).f(8, 34))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, (1, o)["f"](7, 35))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(23, %Call(o, 11, 12, f))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(27, %Apply(f, o, [12, 13, 14], 1, 2))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %_CallFunction(o, 18, 24, f))
+  assertSame(o, receiver)
+}
+
+TestCall(false, function(x, y) {
+  receiver = this
+  return x + y
+})
+
+TestCall(true, function(x, y) {
+  "use strict"
+  receiver = this
+  return x + y
+})
+
+TestCall(false, function() {
+  receiver = this
+  return arguments[0] + arguments[1]
+})
+
+TestCall(false, Proxy.createFunction(handler, function(x, y) {
+  receiver = this
+  return x + y
+}))
+
+TestCall(true, Proxy.createFunction(handler, function(x, y) {
+  "use strict"
+  receiver = this
+  return x + y
+}))
+
+TestCall(false, CreateFrozen(handler, function(x, y) {
+  receiver = this
+  return x + y
+}))
+
+
+
+// Using intrinsics as call traps.
+
+function TestCallIntrinsic(type, callTrap) {
+  var f = Proxy.createFunction({}, callTrap)
+  var x = f()
+  assertTrue(typeof x == type)
+}
+
+TestCallIntrinsic("boolean", Boolean)
+TestCallIntrinsic("number", Number)
+TestCallIntrinsic("string", String)
+TestCallIntrinsic("object", Object)
+TestCallIntrinsic("function", Function)
+
+
+
+// Throwing from call trap.
+
+function TestCallThrow(callTrap) {
+  var f = Proxy.createFunction({}, callTrap)
+  assertThrows(function(){ f(11) }, "myexn")
+  assertThrows(function(){ ({x: f}).x(11) }, "myexn")
+  assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
+  assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
+  assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+  assertThrows(function(){ %Call({}, f) }, "myexn")
+  assertThrows(function(){ %Call({}, 1, 2, f) }, "myexn")
+  assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
+  assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
+  assertThrows(function(){ %_CallFunction({}, f) }, "myexn")
+  assertThrows(function(){ %_CallFunction({}, 1, 2, f) }, "myexn")
+
+  var f = CreateFrozen({}, callTrap)
+  assertThrows(function(){ f(11) }, "myexn")
+  assertThrows(function(){ ({x: f}).x(11) }, "myexn")
+  assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
+  assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
+  assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+  assertThrows(function(){ %Call({}, f) }, "myexn")
+  assertThrows(function(){ %Call({}, 1, 2, f) }, "myexn")
+  assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
+  assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
+  assertThrows(function(){ %_CallFunction({}, f) }, "myexn")
+  assertThrows(function(){ %_CallFunction({}, 1, 2, f) }, "myexn")
+}
+
+TestCallThrow(function() { throw "myexn" })
+TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
+TestCallThrow(CreateFrozen({}, function() { throw "myexn" }))
+
+
+
+// Construction (new).
+
+var prototype = {myprop: 0}
+var receiver
+
+var handlerWithPrototype = {
+  fix: function() { return { prototype: { value: prototype } }; },
+  get: function(r, n) {
+    if (n == "length") return 2;
+    assertEquals("prototype", n);
+    return prototype;
+  }
+}
+
+var handlerSansPrototype = {
+  fix: function() { return { length: { value: 2 } } },
+  get: function(r, n) {
+    if (n == "length") return 2;
+    assertEquals("prototype", n);
+    return undefined;
+  }
+}
+
+function ReturnUndef(x, y) {
+  "use strict";
+  receiver = this;
+  this.sum = x + y;
+}
+
+function ReturnThis(x, y) {
+  "use strict";
+  receiver = this;
+  this.sum = x + y;
+  return this;
+}
+
+function ReturnNew(x, y) {
+  "use strict";
+  receiver = this;
+  return {sum: x + y};
+}
+
+function ReturnNewWithProto(x, y) {
+  "use strict";
+  receiver = this;
+  var result = Object.create(prototype);
+  result.sum = x + y;
+  return result;
+}
+
+function TestConstruct(proto, constructTrap) {
+  TestConstruct2(proto, constructTrap, handlerWithPrototype)
+  TestConstruct2(proto, constructTrap, handlerSansPrototype)
+}
+
+function TestConstruct2(proto, constructTrap, handler) {
+  var f = Proxy.createFunction(handler, function() {}, constructTrap)
+  var o = new f(11, 31)
+  assertEquals(undefined, receiver)
+  assertEquals(42, o.sum)
+  assertSame(proto, Object.getPrototypeOf(o))
+
+  var f = CreateFrozen(handler, function() {}, constructTrap)
+  var o = new f(11, 32)
+  assertEquals(undefined, receiver)
+  assertEquals(43, o.sum)
+  assertSame(proto, Object.getPrototypeOf(o))
+}
+
+TestConstruct(Object.prototype, ReturnNew)
+TestConstruct(prototype, ReturnNewWithProto)
+
+TestConstruct(Object.prototype, Proxy.createFunction(handler, ReturnNew))
+TestConstruct(prototype, Proxy.createFunction(handler, ReturnNewWithProto))
+
+TestConstruct(Object.prototype, CreateFrozen(handler, ReturnNew))
+TestConstruct(prototype, CreateFrozen(handler, ReturnNewWithProto))
+
+
+
+// Construction with derived construct trap.
+
+function TestConstructFromCall(proto, returnsThis, callTrap) {
+  TestConstructFromCall2(prototype, returnsThis, callTrap, handlerWithPrototype)
+  TestConstructFromCall2(proto, returnsThis, callTrap, handlerSansPrototype)
+}
+
+function TestConstructFromCall2(proto, returnsThis, callTrap, handler) {
+  // TODO(rossberg): handling of prototype for derived construct trap will be
+  // fixed in a separate change. Commenting out checks below for now.
+  var f = Proxy.createFunction(handler, callTrap)
+  var o = new f(11, 31)
+  if (returnsThis) assertEquals(o, receiver)
+  assertEquals(42, o.sum)
+  // assertSame(proto, Object.getPrototypeOf(o))
+
+  var g = CreateFrozen(handler, callTrap)
+  // assertSame(f.prototype, g.prototype)
+  var o = new g(11, 32)
+  if (returnsThis) assertEquals(o, receiver)
+  assertEquals(43, o.sum)
+  // assertSame(proto, Object.getPrototypeOf(o))
+}
+
+TestConstructFromCall(Object.prototype, true, ReturnUndef)
+TestConstructFromCall(Object.prototype, true, ReturnThis)
+TestConstructFromCall(Object.prototype, false, ReturnNew)
+TestConstructFromCall(prototype, false, ReturnNewWithProto)
+
+TestConstructFromCall(Object.prototype, true,
+                      Proxy.createFunction(handler, ReturnUndef))
+TestConstructFromCall(Object.prototype, true,
+                      Proxy.createFunction(handler, ReturnThis))
+TestConstructFromCall(Object.prototype, false,
+                      Proxy.createFunction(handler, ReturnNew))
+TestConstructFromCall(prototype, false,
+                      Proxy.createFunction(handler, ReturnNewWithProto))
+
+TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnUndef))
+TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnThis))
+TestConstructFromCall(Object.prototype, false, CreateFrozen({}, ReturnNew))
+TestConstructFromCall(prototype, false, CreateFrozen({}, ReturnNewWithProto))
+
+ReturnUndef.prototype = prototype
+ReturnThis.prototype = prototype
+ReturnNew.prototype = prototype
+ReturnNewWithProto.prototype = prototype
+
+TestConstructFromCall(prototype, true, ReturnUndef)
+TestConstructFromCall(prototype, true, ReturnThis)
+TestConstructFromCall(Object.prototype, false, ReturnNew)
+TestConstructFromCall(prototype, false, ReturnNewWithProto)
+
+TestConstructFromCall(Object.prototype, true,
+                      Proxy.createFunction(handler, ReturnUndef))
+TestConstructFromCall(Object.prototype, true,
+                      Proxy.createFunction(handler, ReturnThis))
+TestConstructFromCall(Object.prototype, false,
+                      Proxy.createFunction(handler, ReturnNew))
+TestConstructFromCall(prototype, false,
+                      Proxy.createFunction(handler, ReturnNewWithProto))
+
+TestConstructFromCall(prototype, true,
+                      Proxy.createFunction(handlerWithPrototype, ReturnUndef))
+TestConstructFromCall(prototype, true,
+                      Proxy.createFunction(handlerWithPrototype, ReturnThis))
+TestConstructFromCall(Object.prototype, false,
+                      Proxy.createFunction(handlerWithPrototype, ReturnNew))
+TestConstructFromCall(prototype, false,
+                      Proxy.createFunction(handlerWithPrototype,
+                                           ReturnNewWithProto))
+
+TestConstructFromCall(prototype, true,
+                      CreateFrozen(handlerWithPrototype, ReturnUndef))
+TestConstructFromCall(prototype, true,
+                      CreateFrozen(handlerWithPrototype, ReturnThis))
+TestConstructFromCall(Object.prototype, false,
+                      CreateFrozen(handlerWithPrototype, ReturnNew))
+TestConstructFromCall(prototype, false,
+                      CreateFrozen(handlerWithPrototype, ReturnNewWithProto))
+
+
+
+// Throwing from the construct trap.
+
+function TestConstructThrow(trap) {
+  TestConstructThrow2(Proxy.createFunction({ fix: function() {return {};} },
+                                           trap))
+  TestConstructThrow2(Proxy.createFunction({ fix: function() {return {};} },
+                                           function() {},
+                                           trap))
+}
+
+function TestConstructThrow2(f) {
+  assertThrows(function(){ new f(11) }, "myexn")
+  Object.freeze(f)
+  assertThrows(function(){ new f(11) }, "myexn")
+}
+
+TestConstructThrow(function() { throw "myexn" })
+TestConstructThrow(Proxy.createFunction({}, function() { throw "myexn" }))
+TestConstructThrow(CreateFrozen({}, function() { throw "myexn" }))
+
+
+
+// Using function proxies as getters and setters.
+
+var value
+var receiver
+
+function TestAccessorCall(getterCallTrap, setterCallTrap) {
+  var handler = { fix: function() { return {} } }
+  var pgetter = Proxy.createFunction(handler, getterCallTrap)
+  var psetter = Proxy.createFunction(handler, setterCallTrap)
+
+  var o = {}
+  var oo = Object.create(o)
+  Object.defineProperty(o, "a", {get: pgetter, set: psetter})
+  Object.defineProperty(o, "b", {get: pgetter})
+  Object.defineProperty(o, "c", {set: psetter})
+  Object.defineProperty(o, "3", {get: pgetter, set: psetter})
+  Object.defineProperty(oo, "a", {value: 43})
+
+  receiver = ""
+  assertEquals(42, o.a)
+  assertSame(o, receiver)
+  receiver = ""
+  assertEquals(42, o.b)
+  assertSame(o, receiver)
+  receiver = ""
+  assertEquals(undefined, o.c)
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(42, o["a"])
+  assertSame(o, receiver)
+  receiver = ""
+  assertEquals(42, o[3])
+  assertSame(o, receiver)
+
+  receiver = ""
+  assertEquals(43, oo.a)
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(42, oo.b)
+  assertSame(oo, receiver)
+  receiver = ""
+  assertEquals(undefined, oo.c)
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(43, oo["a"])
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(42, oo[3])
+  assertSame(oo, receiver)
+
+  receiver = ""
+  assertEquals(50, o.a = 50)
+  assertSame(o, receiver)
+  assertEquals(50, value)
+  receiver = ""
+  assertEquals(51, o.b = 51)
+  assertEquals("", receiver)
+  assertEquals(50, value)  // no setter
+  assertThrows(function() { "use strict"; o.b = 51 }, TypeError)
+  receiver = ""
+  assertEquals(52, o.c = 52)
+  assertSame(o, receiver)
+  assertEquals(52, value)
+  receiver = ""
+  assertEquals(53, o["a"] = 53)
+  assertSame(o, receiver)
+  assertEquals(53, value)
+  receiver = ""
+  assertEquals(54, o[3] = 54)
+  assertSame(o, receiver)
+  assertEquals(54, value)
+
+  value = 0
+  receiver = ""
+  assertEquals(60, oo.a = 60)
+  assertEquals("", receiver)
+  assertEquals(0, value)  // oo has own 'a'
+  assertEquals(61, oo.b = 61)
+  assertSame("", receiver)
+  assertEquals(0, value)  // no setter
+  assertThrows(function() { "use strict"; oo.b = 61 }, TypeError)
+  receiver = ""
+  assertEquals(62, oo.c = 62)
+  assertSame(oo, receiver)
+  assertEquals(62, value)
+  receiver = ""
+  assertEquals(63, oo["c"] = 63)
+  assertSame(oo, receiver)
+  assertEquals(63, value)
+  receiver = ""
+  assertEquals(64, oo[3] = 64)
+  assertSame(oo, receiver)
+  assertEquals(64, value)
+}
+
+TestAccessorCall(
+  function() { receiver = this; return 42 },
+  function(x) { receiver = this; value = x }
+)
+
+TestAccessorCall(
+  function() { "use strict"; receiver = this; return 42 },
+  function(x) { "use strict"; receiver = this; value = x }
+)
+
+TestAccessorCall(
+  Proxy.createFunction({}, function() { receiver = this; return 42 }),
+  Proxy.createFunction({}, function(x) { receiver = this; value = x })
+)
+
+TestAccessorCall(
+  CreateFrozen({}, function() { receiver = this; return 42 }),
+  CreateFrozen({}, function(x) { receiver = this; value = x })
+)
+
+
+
+// Passing a proxy function to higher-order library functions.
+
+function TestHigherOrder(f) {
+  assertEquals(6, [6, 2].map(f)[0])
+  assertEquals(4, [5, 2].reduce(f, 4))
+  assertTrue([1, 2].some(f))
+  assertEquals("a.b.c", "a.b.c".replace(".", f))
+}
+
+TestHigherOrder(function(x) { return x })
+TestHigherOrder(function(x) { "use strict"; return x })
+TestHigherOrder(Proxy.createFunction({}, function(x) { return x }))
+TestHigherOrder(CreateFrozen({}, function(x) { return x }))
+
+
+
+// TODO(rossberg): Ultimately, I want to have the following test function
+// run through, but it currently fails on so many cases (some not even
+// involving proxies), that I leave that for later...
+/*
+function TestCalls() {
+  var handler = {
+    get: function(r, k) {
+      return k == "length" ? 2 : Function.prototype[k]
+    }
+  }
+  var bind = Function.prototype.bind
+  var o = {}
+
+  var traps = [
+    function(x, y) {
+      return {receiver: this, result: x + y, strict: false}
+    },
+    function(x, y) { "use strict";
+      return {receiver: this, result: x + y, strict: true}
+    },
+    function() {
+      var x = arguments[0], y = arguments[1]
+      return {receiver: this, result: x + y, strict: false}
+    },
+    Proxy.createFunction(handler, function(x, y) {
+      return {receiver: this, result: x + y, strict: false}
+    }),
+    Proxy.createFunction(handler, function() {
+      var x = arguments[0], y = arguments[1]
+      return {receiver: this, result: x + y, strict: false}
+    }),
+    Proxy.createFunction(handler, function(x, y) { "use strict"
+      return {receiver: this, result: x + y, strict: true}
+    }),
+    CreateFrozen(handler, function(x, y) {
+      return {receiver: this, result: x + y, strict: false}
+    }),
+    CreateFrozen(handler, function(x, y) { "use strict"
+      return {receiver: this, result: x + y, strict: true}
+    }),
+  ]
+  var creates = [
+    function(trap) { return trap },
+    function(trap) { return CreateFrozen({}, callTrap) },
+    function(trap) { return Proxy.createFunction(handler, callTrap) },
+    function(trap) {
+      return Proxy.createFunction(handler, CreateFrozen({}, callTrap))
+    },
+    function(trap) {
+      return Proxy.createFunction(handler, Proxy.createFunction(handler, callTrap))
+    },
+  ]
+  var binds = [
+    function(f, o, x, y) { return f },
+    function(f, o, x, y) { return bind.call(f, o) },
+    function(f, o, x, y) { return bind.call(f, o, x) },
+    function(f, o, x, y) { return bind.call(f, o, x, y) },
+    function(f, o, x, y) { return bind.call(f, o, x, y, 5) },
+    function(f, o, x, y) { return bind.call(bind.call(f, o), {}, x, y) },
+    function(f, o, x, y) { return bind.call(bind.call(f, o, x), {}, y) },
+    function(f, o, x, y) { return bind.call(bind.call(f, o, x, y), {}, 5) },
+  ]
+  var calls = [
+    function(f, x, y) { return f(x, y) },
+    function(f, x, y) { var g = f; return g(x, y) },
+    function(f, x, y) { with ({}) return f(x, y) },
+    function(f, x, y) { var g = f; with ({}) return g(x, y) },
+    function(f, x, y, o) { with (o) return f(x, y) },
+    function(f, x, y, o) { return f.call(o, x, y) },
+    function(f, x, y, o) { return f.apply(o, [x, y]) },
+    function(f, x, y, o) { return Function.prototype.call.call(f, o, x, y) },
+    function(f, x, y, o) { return Function.prototype.apply.call(f, o, [x, y]) },
+    function(f, x, y, o) { return %_CallFunction(o, x, y, f) },
+    function(f, x, y, o) { return %Call(o, x, y, f) },
+    function(f, x, y, o) { return %Apply(f, o, [null, x, y, null], 1, 2) },
+    function(f, x, y, o) { return %Apply(f, o, arguments, 2, 2) },
+    function(f, x, y, o) { if (typeof o == "object") return o.f(x, y) },
+    function(f, x, y, o) { if (typeof o == "object") return o["f"](x, y) },
+    function(f, x, y, o) { if (typeof o == "object") return (1, o).f(x, y) },
+    function(f, x, y, o) { if (typeof o == "object") return (1, o)["f"](x, y) },
+  ]
+  var receivers = [o, global_object, undefined, null, 2, "bla", true]
+  var expectedNonStricts = [o, global_object, global_object, global_object]
+
+  for (var t = 0; t < traps.length; ++t) {
+    for (var i = 0; i < creates.length; ++i) {
+      for (var j = 0; j < binds.length; ++j) {
+        for (var k = 0; k < calls.length; ++k) {
+          for (var m = 0; m < receivers.length; ++m) {
+            for (var n = 0; n < receivers.length; ++n) {
+              var bound = receivers[m]
+              var receiver = receivers[n]
+              var func = binds[j](creates[i](traps[t]), bound, 31, 11)
+              var expected = j > 0 ? bound : receiver
+              var expectedNonStrict = expectedNonStricts[j > 0 ? m : n]
+              o.f = func
+              global_object.f = func
+              var x = calls[k](func, 11, 31, receiver)
+              if (x !== undefined) {
+                assertEquals(42, x.result)
+                if (calls[k].length < 4)
+                  assertSame(x.strict ? undefined : global_object, x.receiver)
+                else if (x.strict)
+                  assertSame(expected, x.receiver)
+                else if (expectedNonStrict === undefined)
+                  assertSame(expected, x.receiver.valueOf())
+                else
+                  assertSame(expectedNonStrict, x.receiver)
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+TestCalls()
+*/
diff --git a/test/mjsunit/harmony/proxies-hash.js b/test/mjsunit/harmony/proxies-hash.js
new file mode 100644
index 0000000..abfc0f5
--- /dev/null
+++ b/test/mjsunit/harmony/proxies-hash.js
@@ -0,0 +1,122 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies --harmony-collections
+
+
+// Helper.
+
+function TestWithProxies(test, construct, handler) {
+  test(construct, handler, Proxy.create)
+  test(construct, handler, function(h) {
+    return Proxy.createFunction(h, function() {})
+  })
+}
+
+
+// Sets.
+
+function TestSet(construct, fix) {
+  TestWithProxies(TestSet2, construct, fix)
+}
+
+function TestSet2(construct, fix, create) {
+  var handler = {fix: function() { return {} }}
+  var p1 = create(handler)
+  var p2 = create(handler)
+  var p3 = create(handler)
+  fix(p3)
+
+  var s = construct();
+  s.add(p1);
+  s.add(p2);
+  assertTrue(s.has(p1));
+  assertTrue(s.has(p2));
+  assertFalse(s.has(p3));
+
+  fix(p1)
+  fix(p2)
+  assertTrue(s.has(p1));
+  assertTrue(s.has(p2));
+  assertFalse(s.has(p3));
+
+  s.delete(p2);
+  assertTrue(s.has(p1));
+  assertFalse(s.has(p2));
+  assertFalse(s.has(p3));
+}
+
+TestSet(Set, Object.seal)
+TestSet(Set, Object.freeze)
+TestSet(Set, Object.preventExtensions)
+
+
+// Maps and weak maps.
+
+function TestMap(construct, fix) {
+  TestWithProxies(TestMap2, construct, fix)
+}
+
+function TestMap2(construct, fix, create) {
+  var handler = {fix: function() { return {} }}
+  var p1 = create(handler)
+  var p2 = create(handler)
+  var p3 = create(handler)
+  fix(p3)
+
+  var m = construct();
+  m.set(p1, 123);
+  m.set(p2, 321);
+  assertTrue(m.has(p1));
+  assertTrue(m.has(p2));
+  assertFalse(m.has(p3));
+  assertSame(123, m.get(p1));
+  assertSame(321, m.get(p2));
+
+  fix(p1)
+  fix(p2)
+  assertTrue(m.has(p1));
+  assertTrue(m.has(p2));
+  assertFalse(m.has(p3));
+  assertSame(123, m.get(p1));
+  assertSame(321, m.get(p2));
+
+  m.delete(p2);
+  assertTrue(m.has(p1));
+  assertFalse(m.has(p2));
+  assertFalse(m.has(p3));
+  assertSame(123, m.get(p1));
+  assertSame(undefined, m.get(p2));
+}
+
+TestMap(Map, Object.seal)
+TestMap(Map, Object.freeze)
+TestMap(Map, Object.preventExtensions)
+
+TestMap(WeakMap, Object.seal)
+TestMap(WeakMap, Object.freeze)
+TestMap(WeakMap, Object.preventExtensions)
diff --git a/test/mjsunit/harmony/proxies.js b/test/mjsunit/harmony/proxies.js
index 3c4e5f6..8d8f839 100644
--- a/test/mjsunit/harmony/proxies.js
+++ b/test/mjsunit/harmony/proxies.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,70 +28,167 @@
 // Flags: --harmony-proxies
 
 
-// TODO(rossberg): for-in for proxies not implemented.
-// TODO(rossberg): inheritance from proxies not implemented.
-// TODO(rossberg): function proxies as constructors not implemented.
-
-
 // Helper.
 
-function TestWithProxies(test, handler) {
-  test(handler, Proxy.create)
-  test(handler, function(h) {return Proxy.createFunction(h, function() {})})
+function TestWithProxies(test, x, y, z) {
+  test(Proxy.create, x, y, z)
+  test(function(h) {return Proxy.createFunction(h, function() {})}, x, y, z)
 }
 
 
-// Getters.
+
+// Getting property descriptors (Object.getOwnPropertyDescriptor).
+
+var key
+
+function TestGetOwnProperty(handler) {
+  TestWithProxies(TestGetOwnProperty2, handler)
+}
+
+function TestGetOwnProperty2(create, handler) {
+  var p = create(handler)
+  assertEquals(42, Object.getOwnPropertyDescriptor(p, "a").value)
+  assertEquals("a", key)
+  assertEquals(42, Object.getOwnPropertyDescriptor(p, 99).value)
+  assertEquals("99", key)
+}
+
+TestGetOwnProperty({
+  getOwnPropertyDescriptor: function(k) {
+    key = k
+    return {value: 42, configurable: true}
+  }
+})
+
+TestGetOwnProperty({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) {
+    key = k
+    return {value: 42, configurable: true}
+  }
+})
+
+TestGetOwnProperty({
+  getOwnPropertyDescriptor: function(k) {
+    key = k
+    return {get value() { return 42 }, get configurable() { return true }}
+  }
+})
+
+TestGetOwnProperty(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { key = k; return {value: 42, configurable: true} }
+  }
+}))
+
+
+function TestGetOwnPropertyThrow(handler) {
+  TestWithProxies(TestGetOwnPropertyThrow2, handler)
+}
+
+function TestGetOwnPropertyThrow2(create, handler) {
+  var p = create(handler)
+  assertThrows(function(){ Object.getOwnPropertyDescriptor(p, "a") }, "myexn")
+  assertThrows(function(){ Object.getOwnPropertyDescriptor(p, 77) }, "myexn")
+}
+
+TestGetOwnPropertyThrow({
+  getOwnPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestGetOwnPropertyThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { throw "myexn" }
+})
+
+TestGetOwnPropertyThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return {get value() { throw "myexn" }}
+  }
+})
+
+TestGetOwnPropertyThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { throw "myexn" }
+  }
+}))
+
+
+
+// Getters (dot, brackets).
+
+var key
 
 function TestGet(handler) {
   TestWithProxies(TestGet2, handler)
 }
 
-function TestGet2(handler, create) {
+function TestGet2(create, handler) {
   var p = create(handler)
   assertEquals(42, p.a)
+  assertEquals("a", key)
   assertEquals(42, p["b"])
+  assertEquals("b", key)
+  assertEquals(42, p[99])
+  assertEquals("99", key)
+  assertEquals(42, (function(n) { return p[n] })("c"))
+  assertEquals("c", key)
+  assertEquals(42, (function(n) { return p[n] })(101))
+  assertEquals("101", key)
 
-  // TODO(rossberg): inheritance from proxies not yet implemented.
-  // var o = Object.create(p, {x: {value: 88}})
-  // assertEquals(42, o.a)
-  // assertEquals(42, o["b"])
-  // assertEquals(88, o.x)
-  // assertEquals(88, o["x"])
+  var o = Object.create(p, {x: {value: 88}})
+  assertEquals(42, o.a)
+  assertEquals("a", key)
+  assertEquals(42, o["b"])
+  assertEquals("b", key)
+  assertEquals(42, o[99])
+  assertEquals("99", key)
+  assertEquals(88, o.x)
+  assertEquals(88, o["x"])
+  assertEquals(42, (function(n) { return o[n] })("c"))
+  assertEquals("c", key)
+  assertEquals(42, (function(n) { return o[n] })(101))
+  assertEquals("101", key)
+  assertEquals(88, (function(n) { return o[n] })("x"))
 }
 
 TestGet({
-  get: function(r, k) { return 42 }
+  get: function(r, k) { key = k; return 42 }
 })
 
 TestGet({
   get: function(r, k) { return this.get2(r, k) },
-  get2: function(r, k) { return 42 }
+  get2: function(r, k) { key = k; return 42 }
 })
 
 TestGet({
-  getPropertyDescriptor: function(k) { return {value: 42} }
+  getPropertyDescriptor: function(k) { key = k; return {value: 42} }
 })
 
 TestGet({
   getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
-  getPropertyDescriptor2: function(k) { return {value: 42} }
+  getPropertyDescriptor2: function(k) { key = k; return {value: 42} }
 })
 
 TestGet({
   getPropertyDescriptor: function(k) {
+    key = k;
     return {get value() { return 42 }}
   }
 })
 
 TestGet({
   get: undefined,
-  getPropertyDescriptor: function(k) { return {value: 42} }
+  getPropertyDescriptor: function(k) { key = k; return {value: 42} }
 })
 
 TestGet(Proxy.create({
   get: function(pr, pk) {
-    return function(r, k) { return 42 }
+    return function(r, k) { key = k; return 42 }
   }
 }))
 
@@ -100,14 +197,43 @@
   TestWithProxies(TestGetCall2, handler)
 }
 
-function TestGetCall2(handler, create) {
+function TestGetCall2(create, handler) {
   var p = create(handler)
   assertEquals(55, p.f())
+  assertEquals(55, p["f"]())
   assertEquals(55, p.f("unused", "arguments"))
   assertEquals(55, p.f.call(p))
+  assertEquals(55, p["f"].call(p))
+  assertEquals(55, p[101].call(p))
   assertEquals(55, p.withargs(45, 5))
   assertEquals(55, p.withargs.call(p, 11, 22))
+  assertEquals(55, (function(n) { return p[n]() })("f"))
+  assertEquals(55, (function(n) { return p[n].call(p) })("f"))
+  assertEquals(55, (function(n) { return p[n](15, 20) })("withargs"))
+  assertEquals(55, (function(n) { return p[n].call(p, 13, 21) })("withargs"))
   assertEquals("6655", "66" + p)  // calls p.toString
+
+  var o = Object.create(p, {g: {value: function(x) { return x + 88 }}})
+  assertEquals(55, o.f())
+  assertEquals(55, o["f"]())
+  assertEquals(55, o.f("unused", "arguments"))
+  assertEquals(55, o.f.call(o))
+  assertEquals(55, o.f.call(p))
+  assertEquals(55, o["f"].call(p))
+  assertEquals(55, o[101].call(p))
+  assertEquals(55, o.withargs(45, 5))
+  assertEquals(55, o.withargs.call(p, 11, 22))
+  assertEquals(90, o.g(2))
+  assertEquals(91, o.g.call(o, 3))
+  assertEquals(92, o.g.call(p, 4))
+  assertEquals(55, (function(n) { return o[n]() })("f"))
+  assertEquals(55, (function(n) { return o[n].call(o) })("f"))
+  assertEquals(55, (function(n) { return o[n](15, 20) })("withargs"))
+  assertEquals(55, (function(n) { return o[n].call(o, 13, 21) })("withargs"))
+  assertEquals(93, (function(n) { return o[n](5) })("g"))
+  assertEquals(94, (function(n) { return o[n].call(o, 6) })("g"))
+  assertEquals(95, (function(n) { return o[n].call(p, 7) })("g"))
+  assertEquals("6655", "66" + o)  // calls o.toString
 }
 
 TestGetCall({
@@ -168,10 +294,20 @@
   TestWithProxies(TestGetThrow2, handler)
 }
 
-function TestGetThrow2(handler, create) {
+function TestGetThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ p.a }, "myexn")
   assertThrows(function(){ p["b"] }, "myexn")
+  assertThrows(function(){ p[3] }, "myexn")
+  assertThrows(function(){ (function(n) { p[n] })("c") }, "myexn")
+  assertThrows(function(){ (function(n) { p[n] })(99) }, "myexn")
+
+  var o = Object.create(p, {x: {value: 88}, '4': {value: 89}})
+  assertThrows(function(){ o.a }, "myexn")
+  assertThrows(function(){ o["b"] }, "myexn")
+  assertThrows(function(){ o[3] }, "myexn")
+  assertThrows(function(){ (function(n) { o[n] })("c") }, "myexn")
+  assertThrows(function(){ (function(n) { o[n] })(99) }, "myexn")
 }
 
 TestGetThrow({
@@ -220,11 +356,11 @@
 var key
 var val
 
-function TestSet(handler, create) {
+function TestSet(handler) {
   TestWithProxies(TestSet2, handler)
 }
 
-function TestSet2(handler, create) {
+function TestSet2(create, handler) {
   var p = create(handler)
   assertEquals(42, p.a = 42)
   assertEquals("a", key)
@@ -232,6 +368,16 @@
   assertEquals(43, p["b"] = 43)
   assertEquals("b", key)
   assertEquals(43, val)
+  assertEquals(44, p[77] = 44)
+  assertEquals("77", key)
+  assertEquals(44, val)
+
+  assertEquals(45, (function(n) { return p[n] = 45 })("c"))
+  assertEquals("c", key)
+  assertEquals(45, val)
+  assertEquals(46, (function(n) { return p[n] = 46 })(99))
+  assertEquals("99", key)
+  assertEquals(46, val)
 }
 
 TestSet({
@@ -304,15 +450,17 @@
 }))
 
 
-
-function TestSetThrow(handler, create) {
+function TestSetThrow(handler) {
   TestWithProxies(TestSetThrow2, handler)
 }
 
-function TestSetThrow2(handler, create) {
+function TestSetThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ p.a = 42 }, "myexn")
   assertThrows(function(){ p["b"] = 42 }, "myexn")
+  assertThrows(function(){ p[22] = 42 }, "myexn")
+  assertThrows(function(){ (function(n) { p[n] = 45 })("c") }, "myexn")
+  assertThrows(function(){ (function(n) { p[n] = 46 })(99) }, "myexn")
 }
 
 TestSetThrow({
@@ -424,6 +572,124 @@
 }))
 
 
+var key
+var val
+
+function TestSetForDerived(handler) {
+  TestWithProxies(TestSetForDerived2, handler)
+}
+
+function TestSetForDerived2(create, handler) {
+  var p = create(handler)
+  var o = Object.create(p, {x: {value: 88, writable: true},
+                            '1': {value: 89, writable: true}})
+
+  key = ""
+  assertEquals(48, o.x = 48)
+  assertEquals("", key)  // trap not invoked
+  assertEquals(48, o.x)
+
+  assertEquals(47, o[1] = 47)
+  assertEquals("", key)  // trap not invoked
+  assertEquals(47, o[1])
+
+  assertEquals(49, o.y = 49)
+  assertEquals("y", key)
+  assertEquals(49, o.y)
+
+  assertEquals(50, o[2] = 50)
+  assertEquals("2", key)
+  assertEquals(50, o[2])
+
+  assertEquals(44, o.p_writable = 44)
+  assertEquals("p_writable", key)
+  assertEquals(44, o.p_writable)
+
+  assertEquals(45, o.p_nonwritable = 45)
+  assertEquals("p_nonwritable", key)
+  assertEquals(45, o.p_nonwritable)
+
+  assertEquals(46, o.p_setter = 46)
+  assertEquals("p_setter", key)
+  assertEquals(46, val)  // written to parent
+  assertFalse(Object.prototype.hasOwnProperty.call(o, "p_setter"))
+
+  val = ""
+  assertEquals(47, o.p_nosetter = 47)
+  assertEquals("p_nosetter", key)
+  assertEquals("", val)  // not written at all
+  assertFalse(Object.prototype.hasOwnProperty.call(o, "p_nosetter"));
+
+  key = ""
+  assertThrows(function(){ "use strict"; o.p_nosetter = 50 }, TypeError)
+  assertEquals("p_nosetter", key)
+  assertEquals("", val)  // not written at all
+
+  assertThrows(function(){ o.p_nonconf = 53 }, TypeError)
+  assertEquals("p_nonconf", key)
+
+  assertThrows(function(){ o.p_throw = 51 }, "myexn")
+  assertEquals("p_throw", key)
+
+  assertThrows(function(){ o.p_setterthrow = 52 }, "myexn")
+  assertEquals("p_setterthrow", key)
+}
+
+TestSetForDerived({
+  getPropertyDescriptor: function(k) {
+    key = k;
+    switch (k) {
+      case "p_writable": return {writable: true, configurable: true}
+      case "p_nonwritable": return {writable: false, configurable: true}
+      case "p_setter":return {set: function(x) { val = x }, configurable: true}
+      case "p_nosetter": return {get: function() { return 1 }, configurable: true}
+      case "p_nonconf":return {}
+      case "p_throw": throw "myexn"
+      case "p_setterthrow": return {set: function(x) { throw "myexn" }}
+      default: return undefined
+    }
+  }
+})
+
+
+// Evil proxy-induced side-effects shouldn't crash.
+// TODO(rossberg): proper behaviour isn't really spec'ed yet, so ignore results.
+
+TestWithProxies(function(create) {
+  var calls = 0
+  var handler = {
+    getPropertyDescriptor: function() {
+      ++calls
+      return (calls % 2 == 1)
+        ? {get: function() { return 5 }, configurable: true}
+        : {set: function() { return false }, configurable: true}
+    }
+  }
+  var p = create(handler)
+  var o = Object.create(p)
+  // Make proxy prototype property read-only after CanPut check.
+  try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestWithProxies(function(create) {
+  var handler = {
+    getPropertyDescriptor: function() {
+      Object.defineProperty(o, "x", {get: function() { return 5 }});
+      return {set: function() {}}
+    }
+  }
+  var p = create(handler)
+  var o = Object.create(p)
+  // Make object property read-only after CanPut check.
+  try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+
+
+// TODO(rossberg): TestSetReject, returning false
+// TODO(rossberg): TestGetProperty, TestSetProperty
+
+
 
 // Property definition (Object.defineProperty and Object.defineProperties).
 
@@ -434,7 +700,7 @@
   TestWithProxies(TestDefine2, handler)
 }
 
-function TestDefine2(handler, create) {
+function TestDefine2(create, handler) {
   var p = create(handler)
   assertEquals(p, Object.defineProperty(p, "a", {value: 44}))
   assertEquals("a", key)
@@ -453,6 +719,12 @@
   assertEquals(46, desc.value)
   assertEquals(false, desc.enumerable)
 
+  assertEquals(p, Object.defineProperty(p, 101, {value: 47, enumerable: false}))
+  assertEquals("101", key)
+  assertEquals(2, Object.getOwnPropertyNames(desc).length)
+  assertEquals(47, desc.value)
+  assertEquals(false, desc.enumerable)
+
   var attributes = {configurable: true, mine: 66, minetoo: 23}
   assertEquals(p, Object.defineProperty(p, "d", attributes))
   assertEquals("d", key)
@@ -474,20 +746,20 @@
   assertEquals("zzz", key)
   assertEquals(0, Object.getOwnPropertyNames(desc).length)
 
-// TODO(rossberg): This test requires for-in on proxies.
-//  var d = create({
-//    get: function(r, k) { return (k === "value") ? 77 : void 0 },
-//    getOwnPropertyNames: function() { return ["value"] }
-//  })
-//  assertEquals(1, Object.getOwnPropertyNames(d).length)
-//  assertEquals(77, d.value)
-//  assertEquals(p, Object.defineProperty(p, "p", d))
-//  assertEquals("p", key)
-//  assertEquals(1, Object.getOwnPropertyNames(desc).length)
-//  assertEquals(77, desc.value)
+  var d = create({
+    get: function(r, k) { return (k === "value") ? 77 : void 0 },
+    getOwnPropertyNames: function() { return ["value"] },
+    enumerate: function() { return ["value"] }
+  })
+  assertEquals(1, Object.getOwnPropertyNames(d).length)
+  assertEquals(77, d.value)
+  assertEquals(p, Object.defineProperty(p, "p", d))
+  assertEquals("p", key)
+  assertEquals(1, Object.getOwnPropertyNames(desc).length)
+  assertEquals(77, desc.value)
 
   var props = {
-    'bla': {},
+    '11': {},
     blub: {get: function() { return true }},
     '': {get value() { return 20 }},
     last: {value: 21, configurable: true, mine: "eyes"}
@@ -524,21 +796,21 @@
   TestWithProxies(TestDefineThrow2, handler)
 }
 
-function TestDefineThrow2(handler, create) {
+function TestDefineThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.defineProperty(p, "a", {value: 44})}, "myexn")
+  assertThrows(function(){ Object.defineProperty(p, 0, {value: 44})}, "myexn")
 
-// TODO(rossberg): These tests require for-in on proxies.
-//  var d1 = create({
-//    get: function(r, k) { throw "myexn" },
-//    getOwnPropertyNames: function() { return ["value"] }
-//  })
-//  assertThrows(function(){ Object.defineProperty(p, "p", d1) }, "myexn")
-//  var d2 = create({
-//    get: function(r, k) { return 77 },
-//    getOwnPropertyNames: function() { throw "myexn" }
-//  })
-//  assertThrows(function(){ Object.defineProperty(p, "p", d2) }, "myexn")
+  var d1 = create({
+    get: function(r, k) { throw "myexn" },
+    getOwnPropertyNames: function() { return ["value"] }
+  })
+  assertThrows(function(){ Object.defineProperty(p, "p", d1) }, "myexn")
+  var d2 = create({
+    get: function(r, k) { return 77 },
+    getOwnPropertyNames: function() { throw "myexn" }
+  })
+  assertThrows(function(){ Object.defineProperty(p, "p", d2) }, "myexn")
 
   var props = {bla: {get value() { throw "otherexn" }}}
   assertThrows(function(){ Object.defineProperties(p, props) }, "otherexn")
@@ -573,12 +845,14 @@
   TestWithProxies(TestDelete2, handler)
 }
 
-function TestDelete2(handler, create) {
+function TestDelete2(create, handler) {
   var p = create(handler)
   assertEquals(true, delete p.a)
   assertEquals("a", key)
   assertEquals(true, delete p["b"])
   assertEquals("b", key)
+  assertEquals(true, delete p[1])
+  assertEquals("1", key)
 
   assertEquals(false, delete p.z1)
   assertEquals("z1", key)
@@ -591,6 +865,8 @@
     assertEquals("c", key)
     assertEquals(true, delete p["d"])
     assertEquals("d", key)
+    assertEquals(true, delete p[2])
+    assertEquals("2", key)
 
     assertThrows(function(){ delete p.z3 }, TypeError)
     assertEquals("z3", key)
@@ -619,15 +895,17 @@
   TestWithProxies(TestDeleteThrow2, handler)
 }
 
-function TestDeleteThrow2(handler, create) {
+function TestDeleteThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ delete p.a }, "myexn")
   assertThrows(function(){ delete p["b"] }, "myexn");
+  assertThrows(function(){ delete p[3] }, "myexn");
 
   (function() {
     "use strict"
     assertThrows(function(){ delete p.c }, "myexn")
     assertThrows(function(){ delete p["d"] }, "myexn")
+    assertThrows(function(){ delete p[4] }, "myexn");
   })()
 }
 
@@ -658,7 +936,7 @@
   TestWithProxies(TestDescriptor2, handler)
 }
 
-function TestDescriptor2(handler, create) {
+function TestDescriptor2(create, handler) {
   var p = create(handler)
   var descs = [
     {configurable: true},
@@ -697,7 +975,7 @@
   TestWithProxies(TestDescriptorThrow2, handler)
 }
 
-function TestDescriptorThrow2(handler, create) {
+function TestDescriptorThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.getOwnPropertyDescriptor(p, "a") }, "myexn")
 }
@@ -721,7 +999,7 @@
   TestWithProxies(TestComparison2, eq)
 }
 
-function TestComparison2(eq, create) {
+function TestComparison2(create, eq) {
   var p1 = create({})
   var p2 = create({})
 
@@ -764,7 +1042,7 @@
   TestWithProxies(TestIn2, handler)
 }
 
-function TestIn2(handler, create) {
+function TestIn2(create, handler) {
   var p = create(handler)
   assertTrue("a" in p)
   assertEquals("a", key)
@@ -778,6 +1056,7 @@
   assertEquals(0, ("zzz" in p) ? 2 : 0)
   assertEquals(2, !("zzz" in p) ? 2 : 0)
 
+  // Test compilation in conditionals.
   if ("b" in p) {
   } else {
     assertTrue(false)
@@ -830,7 +1109,7 @@
 })
 
 TestIn({
-  get: undefined,
+  has: undefined,
   getPropertyDescriptor: function(k) {
     key = k; return k < "z" ? {value: 42} : void 0
   }
@@ -847,9 +1126,10 @@
   TestWithProxies(TestInThrow2, handler)
 }
 
-function TestInThrow2(handler, create) {
+function TestInThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ return "a" in o }, "myexn")
+  assertThrows(function(){ return 99 in o }, "myexn")
   assertThrows(function(){ return !("a" in o) }, "myexn")
   assertThrows(function(){ return ("a" in o) ? 2 : 3 }, "myexn")
   assertThrows(function(){ if ("b" in o) {} }, "myexn")
@@ -876,7 +1156,7 @@
 })
 
 TestInThrow({
-  get: undefined,
+  has: undefined,
   getPropertyDescriptor: function(k) { throw "myexn" }
 })
 
@@ -891,6 +1171,158 @@
 }))
 
 
+function TestInForDerived(handler) {
+  TestWithProxies(TestInForDerived2, handler)
+}
+
+function TestInForDerived2(create, handler) {
+  var p = create(handler)
+  var o = Object.create(p)
+
+  assertTrue("a" in o)
+  assertEquals("a", key)
+  assertTrue(99 in o)
+  assertEquals("99", key)
+  assertFalse("z" in o)
+  assertEquals("z", key)
+
+  assertEquals(2, ("a" in o) ? 2 : 0)
+  assertEquals(0, !("a" in o) ? 2 : 0)
+  assertEquals(0, ("zzz" in o) ? 2 : 0)
+  assertEquals(2, !("zzz" in o) ? 2 : 0)
+
+  if ("b" in o) {
+  } else {
+    assertTrue(false)
+  }
+  assertEquals("b", key)
+
+  if ("zz" in o) {
+    assertTrue(false)
+  }
+  assertEquals("zz", key)
+
+  if (!("c" in o)) {
+    assertTrue(false)
+  }
+  assertEquals("c", key)
+
+  if (!("zzz" in o)) {
+  } else {
+    assertTrue(false)
+  }
+  assertEquals("zzz", key)
+}
+
+TestInForDerived({
+  getPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+  getPropertyDescriptor2: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getPropertyDescriptor: function(k) {
+    key = k;
+    return k < "z" ? {get value() { return 42 }, configurable: true} : void 0
+  }
+})
+
+/* TODO(rossberg): this will work once we implement the newest proposal
+ * regarding default traps for getPropertyDescriptor.
+TestInForDerived({
+  getOwnPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getOwnPropertyDescriptor: function(k) {
+    key = k;
+    return k < "z" ? {get value() { return 42 }, configurable: true} : void 0
+  }
+})
+*/
+
+TestInForDerived(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) {
+      key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+    }
+  }
+}))
+
+
+
+// Property descriptor conversion.
+
+var descget
+
+function TestDescriptorGetOrder(handler) {
+  var p = Proxy.create(handler)
+  var o = Object.create(p, {b: {value: 0}})
+  TestDescriptorGetOrder2(function(n) { return p[n] }, "vV")
+  TestDescriptorGetOrder2(function(n) { return n in p }, "")
+  TestDescriptorGetOrder2(function(n) { return o[n] }, "vV")
+  TestDescriptorGetOrder2(function(n) { return n in o }, "eEcCvVwWgs")
+}
+
+function TestDescriptorGetOrder2(f, access) {
+  descget = ""
+  assertTrue(f("a"))
+  assertEquals(access, descget)
+  descget = ""
+  assertTrue(f(99))
+  assertEquals(access, descget)
+  descget = ""
+  assertFalse(!!f("z"))
+  assertEquals("", descget)
+}
+
+TestDescriptorGetOrder({
+  getPropertyDescriptor: function(k) {
+    if (k >= "z") return void 0
+    // Return a proxy as property descriptor, so that we can log accesses.
+    return Proxy.create({
+      get: function(r, attr) {
+        descget += attr[0].toUpperCase()
+        return true
+      },
+      has: function(attr) {
+        descget += attr[0]
+        switch (attr) {
+          case "writable":
+          case "enumerable":
+          case "configurable":
+          case "value":
+            return true
+          case "get":
+          case "set":
+            return false
+          default:
+            assertUnreachable()
+        }
+      }
+    })
+  }
+})
+
+
 
 // Own Properties (Object.prototype.hasOwnProperty).
 
@@ -900,7 +1332,7 @@
   TestWithProxies(TestHasOwn2, handler)
 }
 
-function TestHasOwn2(handler, create) {
+function TestHasOwn2(create, handler) {
   var p = create(handler)
   assertTrue(Object.prototype.hasOwnProperty.call(p, "a"))
   assertEquals("a", key)
@@ -958,7 +1390,7 @@
   TestWithProxies(TestHasOwnThrow2, handler)
 }
 
-function TestHasOwnThrow2(handler, create) {
+function TestHasOwnThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.prototype.hasOwnProperty.call(p, "a")},
     "myexn")
@@ -1005,84 +1437,173 @@
 
 // Instanceof (instanceof)
 
-function TestInstanceof() {
-  var o = {}
+function TestProxyInstanceof() {
+  var o1 = {}
   var p1 = Proxy.create({})
-  var p2 = Proxy.create({}, o)
+  var p2 = Proxy.create({}, o1)
   var p3 = Proxy.create({}, p2)
+  var o2 = Object.create(p2)
 
   var f0 = function() {}
-  f0.prototype = o
+  f0.prototype = o1
   var f1 = function() {}
   f1.prototype = p1
   var f2 = function() {}
   f2.prototype = p2
+  var f3 = function() {}
+  f3.prototype = o2
 
-  assertTrue(o instanceof Object)
-  assertFalse(o instanceof f0)
-  assertFalse(o instanceof f1)
-  assertFalse(o instanceof f2)
+  assertTrue(o1 instanceof Object)
+  assertFalse(o1 instanceof f0)
+  assertFalse(o1 instanceof f1)
+  assertFalse(o1 instanceof f2)
+  assertFalse(o1 instanceof f3)
   assertFalse(p1 instanceof Object)
   assertFalse(p1 instanceof f0)
   assertFalse(p1 instanceof f1)
   assertFalse(p1 instanceof f2)
+  assertFalse(p1 instanceof f3)
   assertTrue(p2 instanceof Object)
   assertTrue(p2 instanceof f0)
   assertFalse(p2 instanceof f1)
   assertFalse(p2 instanceof f2)
+  assertFalse(p2 instanceof f3)
   assertTrue(p3 instanceof Object)
   assertTrue(p3 instanceof f0)
   assertFalse(p3 instanceof f1)
   assertTrue(p3 instanceof f2)
+  assertFalse(p3 instanceof f3)
+  assertTrue(o2 instanceof Object)
+  assertTrue(o2 instanceof f0)
+  assertFalse(o2 instanceof f1)
+  assertTrue(o2 instanceof f2)
+  assertFalse(o2 instanceof f3)
 
   var f = Proxy.createFunction({}, function() {})
   assertTrue(f instanceof Function)
 }
 
-TestInstanceof()
+TestProxyInstanceof()
+
+
+function TestInstanceofProxy() {
+  var o0 = Object.create(null)
+  var o1 = {}
+  var o2 = Object.create(o0)
+  var o3 = Object.create(o1)
+  var o4 = Object.create(o2)
+  var o5 = Object.create(o3)
+
+  function handler(o) { return {get: function() { return o } } }
+  var f0 = Proxy.createFunction(handler(o0), function() {})
+  var f1 = Proxy.createFunction(handler(o1), function() {})
+  var f2 = Proxy.createFunction(handler(o2), function() {})
+  var f3 = Proxy.createFunction(handler(o3), function() {})
+  var f4 = Proxy.createFunction(handler(o4), function() {})
+  var f5 = Proxy.createFunction(handler(o4), function() {})
+
+  assertFalse(null instanceof f0)
+  assertFalse(o0 instanceof f0)
+  assertFalse(o0 instanceof f1)
+  assertFalse(o0 instanceof f2)
+  assertFalse(o0 instanceof f3)
+  assertFalse(o0 instanceof f4)
+  assertFalse(o0 instanceof f5)
+  assertFalse(o1 instanceof f0)
+  assertFalse(o1 instanceof f1)
+  assertFalse(o1 instanceof f2)
+  assertFalse(o1 instanceof f3)
+  assertFalse(o1 instanceof f4)
+  assertFalse(o1 instanceof f5)
+  assertTrue(o2 instanceof f0)
+  assertFalse(o2 instanceof f1)
+  assertFalse(o2 instanceof f2)
+  assertFalse(o2 instanceof f3)
+  assertFalse(o2 instanceof f4)
+  assertFalse(o2 instanceof f5)
+  assertFalse(o3 instanceof f0)
+  assertTrue(o3 instanceof f1)
+  assertFalse(o3 instanceof f2)
+  assertFalse(o3 instanceof f3)
+  assertFalse(o3 instanceof f4)
+  assertFalse(o3 instanceof f5)
+  assertTrue(o4 instanceof f0)
+  assertFalse(o4 instanceof f1)
+  assertTrue(o4 instanceof f2)
+  assertFalse(o4 instanceof f3)
+  assertFalse(o4 instanceof f4)
+  assertFalse(o4 instanceof f5)
+  assertFalse(o5 instanceof f0)
+  assertTrue(o5 instanceof f1)
+  assertFalse(o5 instanceof f2)
+  assertTrue(o5 instanceof f3)
+  assertFalse(o5 instanceof f4)
+  assertFalse(o5 instanceof f5)
+
+  var f = Proxy.createFunction({}, function() {})
+  var ff = Proxy.createFunction(handler(Function), function() {})
+  assertTrue(f instanceof Function)
+  assertFalse(f instanceof ff)
+}
+
+TestInstanceofProxy()
 
 
 
 // Prototype (Object.getPrototypeOf, Object.prototype.isPrototypeOf).
 
 function TestPrototype() {
-  var o = {}
+  var o1 = {}
   var p1 = Proxy.create({})
-  var p2 = Proxy.create({}, o)
+  var p2 = Proxy.create({}, o1)
   var p3 = Proxy.create({}, p2)
-  var p4 = Proxy.create({}, 666)
+  var p4 = Proxy.create({}, null)
+  var o2 = Object.create(p3)
 
-  assertSame(Object.getPrototypeOf(o), Object.prototype)
+  assertSame(Object.getPrototypeOf(o1), Object.prototype)
   assertSame(Object.getPrototypeOf(p1), null)
-  assertSame(Object.getPrototypeOf(p2), o)
+  assertSame(Object.getPrototypeOf(p2), o1)
   assertSame(Object.getPrototypeOf(p3), p2)
   assertSame(Object.getPrototypeOf(p4), null)
+  assertSame(Object.getPrototypeOf(o2), p3)
 
-  assertTrue(Object.prototype.isPrototypeOf(o))
+  assertTrue(Object.prototype.isPrototypeOf(o1))
   assertFalse(Object.prototype.isPrototypeOf(p1))
   assertTrue(Object.prototype.isPrototypeOf(p2))
   assertTrue(Object.prototype.isPrototypeOf(p3))
   assertFalse(Object.prototype.isPrototypeOf(p4))
-  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o))
+  assertTrue(Object.prototype.isPrototypeOf(o2))
+  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o1))
   assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p1))
   assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p2))
   assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p3))
   assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p4))
-  assertFalse(Object.prototype.isPrototypeOf.call(o, o))
-  assertFalse(Object.prototype.isPrototypeOf.call(o, p1))
-  assertTrue(Object.prototype.isPrototypeOf.call(o, p2))
-  assertTrue(Object.prototype.isPrototypeOf.call(o, p3))
-  assertFalse(Object.prototype.isPrototypeOf.call(o, p4))
+  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o2))
+  assertFalse(Object.prototype.isPrototypeOf.call(o1, o1))
+  assertFalse(Object.prototype.isPrototypeOf.call(o1, p1))
+  assertTrue(Object.prototype.isPrototypeOf.call(o1, p2))
+  assertTrue(Object.prototype.isPrototypeOf.call(o1, p3))
+  assertFalse(Object.prototype.isPrototypeOf.call(o1, p4))
+  assertTrue(Object.prototype.isPrototypeOf.call(o1, o2))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p1))
-  assertFalse(Object.prototype.isPrototypeOf.call(p1, o))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, o1))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p2))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p3))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p4))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, o2))
   assertFalse(Object.prototype.isPrototypeOf.call(p2, p1))
   assertFalse(Object.prototype.isPrototypeOf.call(p2, p2))
   assertTrue(Object.prototype.isPrototypeOf.call(p2, p3))
   assertFalse(Object.prototype.isPrototypeOf.call(p2, p4))
+  assertTrue(Object.prototype.isPrototypeOf.call(p2, o2))
   assertFalse(Object.prototype.isPrototypeOf.call(p3, p2))
+  assertTrue(Object.prototype.isPrototypeOf.call(p3, o2))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, o1))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p1))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p2))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p3))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p4))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, o2))
 
   var f = Proxy.createFunction({}, function() {})
   assertSame(Object.getPrototypeOf(f), Function.prototype)
@@ -1097,12 +1618,12 @@
 // Property names (Object.getOwnPropertyNames, Object.keys).
 
 function TestPropertyNames(names, handler) {
-  TestWithProxies(TestPropertyNames2, [names, handler])
+  TestWithProxies(TestPropertyNames2, handler, names)
 }
 
-function TestPropertyNames2(names_handler, create) {
-  var p = create(names_handler[1])
-  assertArrayEquals(names_handler[0], Object.getOwnPropertyNames(p))
+function TestPropertyNames2(create, handler, names) {
+  var p = create(handler)
+  assertArrayEquals(names, Object.getOwnPropertyNames(p))
 }
 
 TestPropertyNames([], {
@@ -1129,7 +1650,7 @@
   TestWithProxies(TestPropertyNamesThrow2, handler)
 }
 
-function TestPropertyNamesThrow2(handler, create) {
+function TestPropertyNamesThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.getOwnPropertyNames(p) }, "myexn")
 }
@@ -1145,12 +1666,12 @@
 
 
 function TestKeys(names, handler) {
-  TestWithProxies(TestKeys2, [names, handler])
+  TestWithProxies(TestKeys2, handler, names)
 }
 
-function TestKeys2(names_handler, create) {
-  var p = create(names_handler[1])
-  assertArrayEquals(names_handler[0], Object.keys(p))
+function TestKeys2(create, handler, names) {
+  var p = create(handler)
+  assertArrayEquals(names, Object.keys(p))
 }
 
 TestKeys([], {
@@ -1174,7 +1695,9 @@
 
 TestKeys(["a", "0"], {
   getOwnPropertyNames: function() { return ["a", 23, "zz", "", 0] },
-  getOwnPropertyDescriptor: function(k) { return {enumerable: k.length == 1} }
+  getOwnPropertyDescriptor: function(k) {
+    return k == "" ? undefined : {enumerable: k.length == 1}
+  }
 })
 
 TestKeys(["23", "zz", ""], {
@@ -1188,10 +1711,12 @@
 
 TestKeys(["a", "b", "c", "5"], {
   get getOwnPropertyNames() {
-    return function() { return ["0", 4, "a", "b", "c", 5] }
+    return function() { return ["0", 4, "a", "b", "c", 5, "ety"] }
   },
   get getOwnPropertyDescriptor() {
-    return function(k) { return {enumerable: k >= "44"} }
+    return function(k) {
+      return k == "ety" ? undefined : {enumerable: k >= "44"}
+    }
   }
 })
 
@@ -1207,7 +1732,7 @@
   TestWithProxies(TestKeysThrow2, handler)
 }
 
-function TestKeysThrow2(handler, create) {
+function TestKeysThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.keys(p) }, "myexn")
 }
@@ -1267,7 +1792,6 @@
 // Fixing (Object.freeze, Object.seal, Object.preventExtensions,
 //         Object.isFrozen, Object.isSealed, Object.isExtensible)
 
-// TODO(rossberg): use TestWithProxies to include funciton proxies
 function TestFix(names, handler) {
   var proto = {p: 77}
   var assertFixing = function(o, s, f, e) {
@@ -1314,19 +1838,27 @@
                     Object.keys(p3).sort())
   assertEquals(proto, Object.getPrototypeOf(p3))
   assertEquals(77, p3.p)
+
+  var p = Proxy.create(handler, proto)
+  var o = Object.create(p)
+  assertFixing(p, false, false, true)
+  assertFixing(o, false, false, true)
+  Object.freeze(o)
+  assertFixing(p, false, false, true)
+  assertFixing(o, true, true, false)
 }
 
 TestFix([], {
   fix: function() { return {} }
 })
 
-TestFix(["a", "b", "c", "d", "zz"], {
+TestFix(["a", "b", "c", "3", "zz"], {
   fix: function() {
     return {
       a: {value: "a", writable: true, configurable: false, enumerable: true},
       b: {value: 33, writable: false, configurable: false, enumerable: true},
       c: {value: 0, writable: true, configurable: true, enumerable: true},
-      d: {value: true, writable: false, configurable: true, enumerable: true},
+      '3': {value: true, writable: false, configurable: true, enumerable: true},
       zz: {value: 0, enumerable: false}
     }
   }
@@ -1377,8 +1909,8 @@
   TestWithProxies(TestFixThrow2, handler)
 }
 
-function TestFixThrow2(handler) {
-  var p = Proxy.create(handler, {})
+function TestFixThrow2(create, handler) {
+  var p = create(handler, {})
   assertThrows(function(){ Object.seal(p) }, "myexn")
   assertThrows(function(){ Object.freeze(p) }, "myexn")
   assertThrows(function(){ Object.preventExtensions(p) }, "myexn")
@@ -1404,6 +1936,135 @@
 })
 
 
+// Freeze a proxy in the middle of operations on it.
+// TODO(rossberg): actual behaviour not specified consistently at the moment,
+// just make sure that we do not crash.
+function TestReentrantFix(f) {
+  TestWithProxies(f, Object.freeze)
+  TestWithProxies(f, Object.seal)
+  TestWithProxies(f, Object.preventExtensions)
+}
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    get get() { freeze(p); return undefined },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while getting get trap.
+  try { p.x } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    get: function() { freeze(p); return 3 },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while executing get trap.
+  try { p.x } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    getPropertyDescriptor: function() { freeze(p); return undefined },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while executing default get trap.
+  try { p.x } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    getPropertyDescriptor: function() { freeze(p); return {get: function(){}} },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  var o = Object.create(p)
+  // Freeze while getting a property from prototype.
+  try { o.x } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    get set() { freeze(p); return undefined },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while getting set trap.
+  try { p.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    set: function() { freeze(p); return true },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while executing set trap.
+  try { p.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    getOwnPropertyDescriptor: function() { freeze(p); return undefined },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while executing default set trap.
+  try { p.x } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    getPropertyDescriptor: function() { freeze(p); return {set: function(){}} },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  var o = Object.create(p)
+  // Freeze while setting a property in prototype, dropping the property!
+  try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    getPropertyDescriptor: function() { freeze(p); return {set: function(){}} },
+    fix: function() { return {x: {get: function(){}}} }
+  }
+  var p = create(handler)
+  var o = Object.create(p)
+  // Freeze while setting a property in prototype, making it read-only!
+  try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    get fix() { freeze(p); return function(){} }
+  }
+  var p = create(handler)
+  // Freeze while getting fix trap.
+  try { Object.freeze(p) } catch (e) { assertInstanceof(e, Error) }
+  p = create(handler)
+  try { Object.seal(p) } catch (e) { assertInstanceof(e, Error) }
+  p = create(handler)
+  try { Object.preventExtensions(p) } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    fix: function() { freeze(p); return {} }
+  }
+  var p = create(handler)
+  // Freeze while executing fix trap.
+  try { Object.freeze(p) } catch (e) { assertInstanceof(e, Error) }
+  p = create(handler)
+  try { Object.seal(p) } catch (e) { assertInstanceof(e, Error) }
+  p = create(handler)
+  try { Object.preventExtensions(p) } catch (e) { assertInstanceof(e, Error) }
+})
+
+
 
 // String conversion (Object.prototype.toString,
 //                    Object.prototype.toLocaleString,
@@ -1426,6 +2087,13 @@
   assertEquals("my_proxy", Object.prototype.toLocaleString.call(f))
   assertEquals("toString", key)
   assertDoesNotThrow(function(){ Function.prototype.toString.call(f) })
+
+  var o = Object.create(p)
+  key = ""
+  assertEquals("[object Object]", Object.prototype.toString.call(o))
+  assertEquals("", key)
+  assertEquals("my_proxy", Object.prototype.toLocaleString.call(o))
+  assertEquals("toString", key)
 }
 
 TestToString({
@@ -1452,6 +2120,10 @@
   var f = Proxy.createFunction(handler, function() {})
   assertEquals("[object Function]", Object.prototype.toString.call(f))
   assertThrows(function(){ Object.prototype.toLocaleString.call(f) }, "myexn")
+
+  var o = Object.create(p)
+  assertEquals("[object Object]", Object.prototype.toString.call(o))
+  assertThrows(function(){ Object.prototype.toLocaleString.call(o) }, "myexn")
 }
 
 TestToStringThrow({
@@ -1485,7 +2157,7 @@
   TestWithProxies(TestValueOf2, handler)
 }
 
-function TestValueOf2(handler, create) {
+function TestValueOf2(create, handler) {
   var p = create(handler)
   assertSame(p, Object.prototype.valueOf.call(p))
 }
@@ -1502,7 +2174,7 @@
   TestWithProxies(TestIsEnumerable2, handler)
 }
 
-function TestIsEnumerable2(handler, create) {
+function TestIsEnumerable2(create, handler) {
   var p = create(handler)
   assertTrue(Object.prototype.propertyIsEnumerable.call(p, "a"))
   assertEquals("a", key)
@@ -1510,6 +2182,11 @@
   assertEquals("2", key)
   assertFalse(Object.prototype.propertyIsEnumerable.call(p, "z"))
   assertEquals("z", key)
+
+  var o = Object.create(p)
+  key = ""
+  assertFalse(Object.prototype.propertyIsEnumerable.call(o, "a"))
+  assertEquals("", key)  // trap not invoked
 }
 
 TestIsEnumerable({
@@ -1546,7 +2223,7 @@
   TestWithProxies(TestIsEnumerableThrow2, handler)
 }
 
-function TestIsEnumerableThrow2(handler, create) {
+function TestIsEnumerableThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.prototype.propertyIsEnumerable.call(p, "a") },
     "myexn")
@@ -1583,100 +2260,19 @@
 
 
 
-// Calling (call, Function.prototype.call, Function.prototype.apply,
-//          Function.prototype.bind).
+// Constructor functions with proxy prototypes.
 
-var global = this
-var receiver
-
-function TestCall(isStrict, callTrap) {
-  assertEquals(42, callTrap(5, 37))
-// TODO(rossberg): unrelated bug: this does not succeed for optimized code.
-// assertEquals(isStrict ? undefined : global, receiver)
-
-  var f = Proxy.createFunction({fix: function() { return {} }}, callTrap)
-  receiver = 333
-  assertEquals(42, f(11, 31))
-  assertEquals(isStrict ? undefined : global, receiver)
-  var o = {}
-  assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
-  assertEquals(o, receiver)
-  assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
-  assertEquals(isStrict ? null : global, receiver)
-  assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
-  assertEquals(2, receiver.valueOf())
-  receiver = 333
-  assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
-  assertEquals(o, receiver)
-  var ff = Function.prototype.bind.call(f, o, 12)
-  receiver = 333
-  assertEquals(42, ff(30))
-  assertEquals(o, receiver)
-  receiver = 333
-  assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
-  assertEquals(o, receiver)
-
-  Object.freeze(f)
-  receiver = 333
-  assertEquals(42, f(11, 31))
-// TODO(rossberg): unrelated bug: this does not succeed for optimized code.
-// assertEquals(isStrict ? undefined : global, receiver)
-  receiver = 333
-  assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
-  assertEquals(o, receiver)
-  receiver = 333
-  assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
-  assertEquals(o, receiver)
-  receiver = 333
-  assertEquals(42, ff(30))
-  assertEquals(o, receiver)
-  receiver = 333
-  assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
-  assertEquals(o, receiver)
+function TestConstructorWithProxyPrototype() {
+  TestWithProxies(TestConstructorWithProxyPrototype2, {})
 }
 
-TestCall(false, function(x, y) {
-  receiver = this; return x + y
-})
+function TestConstructorWithProxyPrototype2(create, handler) {
+  function C() {};
+  C.prototype = create(handler);
 
-TestCall(true, function(x, y) {
-  "use strict";
-  receiver = this; return x + y
-})
-
-TestCall(false, Proxy.createFunction({}, function(x, y) {
-  receiver = this; return x + y
-}))
-
-TestCall(true, Proxy.createFunction({}, function(x, y) {
-  "use strict";
-  receiver = this; return x + y
-}))
-
-var p = Proxy.createFunction({fix: function() {return {}}}, function(x, y) {
-  receiver = this; return x + y
-})
-TestCall(false, p)
-Object.freeze(p)
-TestCall(false, p)
-
-
-function TestCallThrow(callTrap) {
-  var f = Proxy.createFunction({fix: function() {return {}}}, callTrap)
-  assertThrows(function(){ f(11) }, "myexn")
-  assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
-  assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
-
-  Object.freeze(f)
-  assertThrows(function(){ f(11) }, "myexn")
-  assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
-  assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+  var o = new C;
+  assertSame(C.prototype, o.__proto__);
+  assertSame(C.prototype, Object.getPrototypeOf(o));
 }
 
-TestCallThrow(function() { throw "myexn" })
-TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
-
-var p = Proxy.createFunction(
-  {fix: function() {return {}}}, function() { throw "myexn" })
-Object.freeze(p)
-TestCallThrow(p)
+TestConstructorWithProxyPrototype();
diff --git a/test/mjsunit/harmony/weakmaps.js b/test/mjsunit/harmony/weakmaps.js
deleted file mode 100644
index 7b5dcaf..0000000
--- a/test/mjsunit/harmony/weakmaps.js
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-weakmaps --expose-gc
-
-
-// Test valid getter and setter calls
-var m = new WeakMap;
-assertDoesNotThrow(function () { m.get(new Object) });
-assertDoesNotThrow(function () { m.set(new Object) });
-assertDoesNotThrow(function () { m.has(new Object) });
-assertDoesNotThrow(function () { m.delete(new Object) });
-
-
-// Test invalid getter and setter calls
-var m = new WeakMap;
-assertThrows(function () { m.get(undefined) }, TypeError);
-assertThrows(function () { m.set(undefined, 0) }, TypeError);
-assertThrows(function () { m.get(0) }, TypeError);
-assertThrows(function () { m.set(0, 0) }, TypeError);
-assertThrows(function () { m.get('a-key') }, TypeError);
-assertThrows(function () { m.set('a-key', 0) }, TypeError);
-
-
-// Test expected mapping behavior
-var m = new WeakMap;
-function TestMapping(map, key, value) {
-  map.set(key, value);
-  assertSame(value, map.get(key));
-}
-TestMapping(m, new Object, 23);
-TestMapping(m, new Object, 'the-value');
-TestMapping(m, new Object, new Object);
-
-
-// Test expected querying behavior
-var m = new WeakMap;
-var key = new Object;
-TestMapping(m, key, 'to-be-present');
-assertTrue(m.has(key));
-assertFalse(m.has(new Object));
-TestMapping(m, key, undefined);
-assertFalse(m.has(key));
-assertFalse(m.has(new Object));
-
-
-// Test expected deletion behavior
-var m = new WeakMap;
-var key = new Object;
-TestMapping(m, key, 'to-be-deleted');
-assertTrue(m.delete(key));
-assertFalse(m.delete(key));
-assertFalse(m.delete(new Object));
-assertSame(m.get(key), undefined);
-
-
-// Test GC of map with entry
-var m = new WeakMap;
-var key = new Object;
-m.set(key, 'not-collected');
-gc();
-assertSame('not-collected', m.get(key));
-
-
-// Test GC of map with chained entries
-var m = new WeakMap;
-var head = new Object;
-for (key = head, i = 0; i < 10; i++, key = m.get(key)) {
-  m.set(key, new Object);
-}
-gc();
-var count = 0;
-for (key = head; key != undefined; key = m.get(key)) {
-  count++;
-}
-assertEquals(11, count);
-
-
-// Test property attribute [[Enumerable]]
-var m = new WeakMap;
-function props(x) {
-  var array = [];
-  for (var p in x) array.push(p);
-  return array.sort();
-}
-assertArrayEquals([], props(WeakMap));
-assertArrayEquals([], props(WeakMap.prototype));
-assertArrayEquals([], props(m));
-
-
-// Test arbitrary properties on weak maps
-var m = new WeakMap;
-function TestProperty(map, property, value) {
-  map[property] = value;
-  assertEquals(value, map[property]);
-}
-for (i = 0; i < 20; i++) {
-  TestProperty(m, i, 'val' + i);
-  TestProperty(m, 'foo' + i, 'bar' + i);
-}
-TestMapping(m, new Object, 'foobar');
-
-
-// Test direct constructor call
-var m = WeakMap();
-assertTrue(m instanceof WeakMap);
-
-
-// Test some common JavaScript idioms
-var m = new WeakMap;
-assertTrue(m instanceof WeakMap);
-assertTrue(WeakMap.prototype.set instanceof Function)
-assertTrue(WeakMap.prototype.get instanceof Function)
-assertTrue(WeakMap.prototype.has instanceof Function)
-assertTrue(WeakMap.prototype.delete instanceof Function)
-
-
-// Regression test for WeakMap prototype.
-assertTrue(WeakMap.prototype.constructor === WeakMap)
-assertTrue(Object.getPrototypeOf(WeakMap.prototype) === Object.prototype)
-
-
-// Regression test for issue 1617: The prototype of the WeakMap constructor
-// needs to be unique (i.e. different from the one of the Object constructor).
-assertFalse(WeakMap.prototype === Object.prototype);
-var o = Object.create({});
-assertFalse("get" in o);
-assertFalse("set" in o);
-assertEquals(undefined, o.get);
-assertEquals(undefined, o.set);
-var o = Object.create({}, { myValue: {
-  value: 10,
-  enumerable: false,
-  configurable: true,
-  writable: true
-}});
-assertEquals(10, o.myValue);
-
-
-// Stress Test
-// There is a proposed stress-test available at the es-discuss mailing list
-// which cannot be reasonably automated.  Check it out by hand if you like:
-// https://mail.mozilla.org/pipermail/es-discuss/2011-May/014096.html
diff --git a/test/mjsunit/math-min-max.js b/test/mjsunit/math-min-max.js
index 0833c5c..e4fd313 100644
--- a/test/mjsunit/math-min-max.js
+++ b/test/mjsunit/math-min-max.js
@@ -115,3 +115,75 @@
 
 assertEquals(Infinity, 1/Math.max(ZERO, -0));
 assertEquals(Infinity, 1/Math.max(-0, ZERO));
+
+function run(crankshaft_test) {
+  crankshaft_test(1);
+  crankshaft_test(1);
+  %OptimizeFunctionOnNextCall(crankshaft_test);
+  crankshaft_test(-0);
+}
+
+function crankshaft_test_1(arg) {
+  var v1 = 1;
+  var v2 = 5;
+  var v3 = 1.5;
+  var v4 = 5.5;
+  var v5 = 2;
+  var v6 = 6;
+  var v7 = 0;
+  var v8 = -0;
+
+  var v9 = 9.9;
+  var v0 = 10.1;
+  // Integer32 representation.
+  assertEquals(v2, Math.max(v1++, v2++));
+  assertEquals(v1, Math.min(v1++, v2++));
+  // Tagged representation.
+  assertEquals(v4, Math.max(v3, v4));
+  assertEquals(v3, Math.min(v3, v4));
+  assertEquals(v6, Math.max(v5, v6));
+  assertEquals(v5, Math.min(v5, v6));
+  // Double representation.
+  assertEquals(v0, Math.max(v0++, v9++));
+  assertEquals(v9, Math.min(v0++, v9++));
+  // Mixed representation.
+  assertEquals(v1, Math.min(v1++, v9++));  // int32, double
+  assertEquals(v0, Math.max(v0++, v2++));  // double, int32
+  assertEquals(v1, Math.min(v1++, v6));    // int32, tagged
+  assertEquals(v2, Math.max(v5, v2++));    // tagged, int32
+  assertEquals(v6, Math.min(v6, v9++));    // tagged, double
+  assertEquals(v0, Math.max(v0++, v5));    // double, tagged
+
+  // Minus zero.
+  assertEquals(Infinity, 1/Math.max(v7, v8));
+  assertEquals(-Infinity, 1/Math.min(v7, v8));
+  // NaN.
+  assertEquals(NaN, Math.max(NaN, v8));
+  assertEquals(NaN, Math.min(NaN, v9));
+  assertEquals(NaN, Math.max(v8, NaN));
+  assertEquals(NaN, Math.min(v9, NaN));
+  // Minus zero as Integer32.
+  assertEquals((arg === -0) ? -Infinity : 1, 1/Math.min(arg, v2));
+}
+
+run(crankshaft_test_1);
+
+function crankshaft_test_2() {
+  var v9 = {};
+  v9.valueOf = function() { return 6; }
+  // Deopt expected due to non-heapnumber objects.
+  assertEquals(6, Math.min(v9, 12));
+}
+
+run(crankshaft_test_2);
+
+// Test overriding Math.min and Math.max
+Math.min = function(a, b) { return a + b; }
+Math.max = function(a, b) { return a - b; }
+
+function crankshaft_test_3() {
+  assertEquals(8, Math.min(3, 5));
+  assertEquals(3, Math.max(5, 2));
+}
+
+run(crankshaft_test_3);
diff --git a/test/mjsunit/math-pow.js b/test/mjsunit/math-pow.js
index 30d0cbd..fb5f8a1 100644
--- a/test/mjsunit/math-pow.js
+++ b/test/mjsunit/math-pow.js
@@ -25,118 +25,149 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
 // Tests the special cases specified by ES 15.8.2.13
 
-// Simple sanity check
-assertEquals(4, Math.pow(2, 2));
-assertEquals(2147483648, Math.pow(2, 31));
-assertEquals(0.25, Math.pow(2, -2));
-assertEquals(0.0625, Math.pow(2, -4));
-assertEquals(1, Math.pow(1, 100));
-assertEquals(0, Math.pow(0, 1000));
+function test() {
+  // Simple sanity check
+  assertEquals(4, Math.pow(2, 2));
+  assertEquals(2147483648, Math.pow(2, 31));
+  assertEquals(0.25, Math.pow(2, -2));
+  assertEquals(0.0625, Math.pow(2, -4));
+  assertEquals(1, Math.pow(1, 100));
+  assertEquals(0, Math.pow(0, 1000));
 
-// Spec tests
-assertEquals(NaN, Math.pow(2, NaN));
-assertEquals(NaN, Math.pow(+0, NaN));
-assertEquals(NaN, Math.pow(-0, NaN));
-assertEquals(NaN, Math.pow(Infinity, NaN));
-assertEquals(NaN, Math.pow(-Infinity, NaN));
+  // Spec tests
+  assertEquals(NaN, Math.pow(2, NaN));
+  assertEquals(NaN, Math.pow(+0, NaN));
+  assertEquals(NaN, Math.pow(-0, NaN));
+  assertEquals(NaN, Math.pow(Infinity, NaN));
+  assertEquals(NaN, Math.pow(-Infinity, NaN));
 
-assertEquals(1, Math.pow(NaN, +0));
-assertEquals(1, Math.pow(NaN, -0));
+  assertEquals(1, Math.pow(NaN, +0));
+  assertEquals(1, Math.pow(NaN, -0));
 
-assertEquals(NaN, Math.pow(NaN, NaN));
-assertEquals(NaN, Math.pow(NaN, 2.2));
-assertEquals(NaN, Math.pow(NaN, 1));
-assertEquals(NaN, Math.pow(NaN, -1));
-assertEquals(NaN, Math.pow(NaN, -2.2));
-assertEquals(NaN, Math.pow(NaN, Infinity));
-assertEquals(NaN, Math.pow(NaN, -Infinity));
+  assertEquals(NaN, Math.pow(NaN, NaN));
+  assertEquals(NaN, Math.pow(NaN, 2.2));
+  assertEquals(NaN, Math.pow(NaN, 1));
+  assertEquals(NaN, Math.pow(NaN, -1));
+  assertEquals(NaN, Math.pow(NaN, -2.2));
+  assertEquals(NaN, Math.pow(NaN, Infinity));
+  assertEquals(NaN, Math.pow(NaN, -Infinity));
 
-assertEquals(Infinity, Math.pow(1.1, Infinity));
-assertEquals(Infinity, Math.pow(-1.1, Infinity));
-assertEquals(Infinity, Math.pow(2, Infinity));
-assertEquals(Infinity, Math.pow(-2, Infinity));
+  assertEquals(Infinity, Math.pow(1.1, Infinity));
+  assertEquals(Infinity, Math.pow(-1.1, Infinity));
+  assertEquals(Infinity, Math.pow(2, Infinity));
+  assertEquals(Infinity, Math.pow(-2, Infinity));
 
-// Because +0 == -0, we need to compare 1/{+,-}0 to {+,-}Infinity
-assertEquals(+Infinity, 1/Math.pow(1.1, -Infinity));
-assertEquals(+Infinity, 1/Math.pow(-1.1, -Infinity));
-assertEquals(+Infinity, 1/Math.pow(2, -Infinity));
-assertEquals(+Infinity, 1/Math.pow(-2, -Infinity));
+  // Because +0 == -0, we need to compare 1/{+,-}0 to {+,-}Infinity
+  assertEquals(+Infinity, 1/Math.pow(1.1, -Infinity));
+  assertEquals(+Infinity, 1/Math.pow(-1.1, -Infinity));
+  assertEquals(+Infinity, 1/Math.pow(2, -Infinity));
+  assertEquals(+Infinity, 1/Math.pow(-2, -Infinity));
 
-assertEquals(NaN, Math.pow(1, Infinity));
-assertEquals(NaN, Math.pow(1, -Infinity));
-assertEquals(NaN, Math.pow(-1, Infinity));
-assertEquals(NaN, Math.pow(-1, -Infinity));
+  assertEquals(NaN, Math.pow(1, Infinity));
+  assertEquals(NaN, Math.pow(1, -Infinity));
+  assertEquals(NaN, Math.pow(-1, Infinity));
+  assertEquals(NaN, Math.pow(-1, -Infinity));
 
-assertEquals(+0, Math.pow(0.1, Infinity));
-assertEquals(+0, Math.pow(-0.1, Infinity));
-assertEquals(+0, Math.pow(0.999, Infinity));
-assertEquals(+0, Math.pow(-0.999, Infinity));
+  assertEquals(+0, Math.pow(0.1, Infinity));
+  assertEquals(+0, Math.pow(-0.1, Infinity));
+  assertEquals(+0, Math.pow(0.999, Infinity));
+  assertEquals(+0, Math.pow(-0.999, Infinity));
 
-assertEquals(Infinity, Math.pow(0.1, -Infinity));
-assertEquals(Infinity, Math.pow(-0.1, -Infinity));
-assertEquals(Infinity, Math.pow(0.999, -Infinity));
-assertEquals(Infinity, Math.pow(-0.999, -Infinity));
+  assertEquals(Infinity, Math.pow(0.1, -Infinity));
+  assertEquals(Infinity, Math.pow(-0.1, -Infinity));
+  assertEquals(Infinity, Math.pow(0.999, -Infinity));
+  assertEquals(Infinity, Math.pow(-0.999, -Infinity));
 
-assertEquals(Infinity, Math.pow(Infinity, 0.1));
-assertEquals(Infinity, Math.pow(Infinity, 2));
+  assertEquals(Infinity, Math.pow(Infinity, 0.1));
+  assertEquals(Infinity, Math.pow(Infinity, 2));
 
-assertEquals(+Infinity, 1/Math.pow(Infinity, -0.1));
-assertEquals(+Infinity, 1/Math.pow(Infinity, -2));
+  assertEquals(+Infinity, 1/Math.pow(Infinity, -0.1));
+  assertEquals(+Infinity, 1/Math.pow(Infinity, -2));
 
-assertEquals(-Infinity, Math.pow(-Infinity, 3));
-assertEquals(-Infinity, Math.pow(-Infinity, 13));
+  assertEquals(-Infinity, Math.pow(-Infinity, 3));
+  assertEquals(-Infinity, Math.pow(-Infinity, 13));
 
-assertEquals(Infinity, Math.pow(-Infinity, 3.1));
-assertEquals(Infinity, Math.pow(-Infinity, 2));
+  assertEquals(Infinity, Math.pow(-Infinity, 3.1));
+  assertEquals(Infinity, Math.pow(-Infinity, 2));
 
-assertEquals(-Infinity, 1/Math.pow(-Infinity, -3));
-assertEquals(-Infinity, 1/Math.pow(-Infinity, -13));
+  assertEquals(-Infinity, 1/Math.pow(-Infinity, -3));
+  assertEquals(-Infinity, 1/Math.pow(-Infinity, -13));
 
-assertEquals(+Infinity, 1/Math.pow(-Infinity, -3.1));
-assertEquals(+Infinity, 1/Math.pow(-Infinity, -2));
+  assertEquals(+Infinity, 1/Math.pow(-Infinity, -3.1));
+  assertEquals(+Infinity, 1/Math.pow(-Infinity, -2));
 
-assertEquals(+Infinity, 1/Math.pow(+0, 1.1));
-assertEquals(+Infinity, 1/Math.pow(+0, 2));
+  assertEquals(+Infinity, 1/Math.pow(+0, 1.1));
+  assertEquals(+Infinity, 1/Math.pow(+0, 2));
 
-assertEquals(Infinity, Math.pow(+0, -1.1));
-assertEquals(Infinity, Math.pow(+0, -2));
+  assertEquals(Infinity, Math.pow(+0, -1.1));
+  assertEquals(Infinity, Math.pow(+0, -2));
 
-assertEquals(-Infinity, 1/Math.pow(-0, 3));
-assertEquals(-Infinity, 1/Math.pow(-0, 13));
+  assertEquals(-Infinity, 1/Math.pow(-0, 3));
+  assertEquals(-Infinity, 1/Math.pow(-0, 13));
 
-assertEquals(+Infinity, 1/Math.pow(-0, 3.1));
-assertEquals(+Infinity, 1/Math.pow(-0, 2));
+  assertEquals(+Infinity, 1/Math.pow(-0, 3.1));
+  assertEquals(+Infinity, 1/Math.pow(-0, 2));
 
-assertEquals(-Infinity, Math.pow(-0, -3));
-assertEquals(-Infinity, Math.pow(-0, -13));
+  assertEquals(-Infinity, Math.pow(-0, -3));
+  assertEquals(-Infinity, Math.pow(-0, -13));
 
-assertEquals(Infinity, Math.pow(-0, -3.1));
-assertEquals(Infinity, Math.pow(-0, -2));
+  assertEquals(Infinity, Math.pow(-0, -3.1));
+  assertEquals(Infinity, Math.pow(-0, -2));
 
-assertEquals(NaN, Math.pow(-0.00001, 1.1));
-assertEquals(NaN, Math.pow(-0.00001, -1.1));
-assertEquals(NaN, Math.pow(-1.1, 1.1));
-assertEquals(NaN, Math.pow(-1.1, -1.1));
-assertEquals(NaN, Math.pow(-2, 1.1));
-assertEquals(NaN, Math.pow(-2, -1.1));
-assertEquals(NaN, Math.pow(-1000, 1.1));
-assertEquals(NaN, Math.pow(-1000, -1.1));
+  assertEquals(NaN, Math.pow(-0.00001, 1.1));
+  assertEquals(NaN, Math.pow(-0.00001, -1.1));
+  assertEquals(NaN, Math.pow(-1.1, 1.1));
+  assertEquals(NaN, Math.pow(-1.1, -1.1));
+  assertEquals(NaN, Math.pow(-2, 1.1));
+  assertEquals(NaN, Math.pow(-2, -1.1));
+  assertEquals(NaN, Math.pow(-1000, 1.1));
+  assertEquals(NaN, Math.pow(-1000, -1.1));
 
-assertEquals(+Infinity, 1/Math.pow(-0, 0.5));
-assertEquals(+Infinity, 1/Math.pow(-0, 0.6));
-assertEquals(-Infinity, 1/Math.pow(-0, 1));
-assertEquals(-Infinity, 1/Math.pow(-0, 10000000001));
+  assertEquals(+Infinity, 1/Math.pow(-0, 0.5));
+  assertEquals(+Infinity, 1/Math.pow(-0, 0.6));
+  assertEquals(-Infinity, 1/Math.pow(-0, 1));
+  assertEquals(-Infinity, 1/Math.pow(-0, 10000000001));
 
-assertEquals(+Infinity, Math.pow(-0, -0.5));
-assertEquals(+Infinity, Math.pow(-0, -0.6));
-assertEquals(-Infinity, Math.pow(-0, -1));
-assertEquals(-Infinity, Math.pow(-0, -10000000001));
+  assertEquals(+Infinity, Math.pow(-0, -0.5));
+  assertEquals(+Infinity, Math.pow(-0, -0.6));
+  assertEquals(-Infinity, Math.pow(-0, -1));
+  assertEquals(-Infinity, Math.pow(-0, -10000000001));
 
+  assertEquals(4, Math.pow(16, 0.5));
+  assertEquals(NaN, Math.pow(-16, 0.5));
+  assertEquals(0.25, Math.pow(16, -0.5));
+  assertEquals(NaN, Math.pow(-16, -0.5));
 
+  // Test detecting and converting integer value as double.
+  assertEquals(8, Math.pow(2, Math.sqrt(9)));
 
-// Tests from Sputnik S8.5_A13_T1.
-assertTrue((1*((Math.pow(2,53))-1)*(Math.pow(2,-1074))) === 4.4501477170144023e-308);
-assertTrue((1*(Math.pow(2,52))*(Math.pow(2,-1074))) === 2.2250738585072014e-308);
-assertTrue((-1*(Math.pow(2,52))*(Math.pow(2,-1074))) === -2.2250738585072014e-308);
+  // Tests from Mozilla 15.8.2.13.
+  assertEquals(2, Math.pow.length);
+  assertEquals(NaN, Math.pow());
+  assertEquals(1, Math.pow(null, null));
+  assertEquals(NaN, Math.pow(void 0, void 0));
+  assertEquals(1, Math.pow(true, false));
+  assertEquals(0, Math.pow(false, true));
+  assertEquals(Infinity, Math.pow(-Infinity, Infinity));
+  assertEquals(0, Math.pow(-Infinity, -Infinity));
+  assertEquals(1, Math.pow(0, 0));
+  assertEquals(0, Math.pow(0, Infinity));
+  assertEquals(NaN, Math.pow(NaN, 0.5));
+  assertEquals(NaN, Math.pow(NaN, -0.5));
+
+  // Tests from Sputnik S8.5_A13_T1.
+  assertTrue(
+      (1*((Math.pow(2,53))-1)*(Math.pow(2,-1074))) === 4.4501477170144023e-308);
+  assertTrue(
+      (1*(Math.pow(2,52))*(Math.pow(2,-1074))) === 2.2250738585072014e-308);
+  assertTrue(
+      (-1*(Math.pow(2,52))*(Math.pow(2,-1074))) === -2.2250738585072014e-308);
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
\ No newline at end of file
diff --git a/test/mjsunit/mjsunit.js b/test/mjsunit/mjsunit.js
index faa5a43..033c78f 100644
--- a/test/mjsunit/mjsunit.js
+++ b/test/mjsunit/mjsunit.js
@@ -221,9 +221,11 @@
 
 
   assertSame = function assertSame(expected, found, name_opt) {
+    // TODO(mstarzinger): We should think about using Harmony's egal operator
+    // or the function equivalent Object.is() here.
     if (found === expected) {
       if (expected !== 0 || (1 / expected) == (1 / found)) return;
-    } else if (isNaN(expected) && isNaN(found)) {
+    } else if ((expected !== expected) && (found !== found)) {
       return;
     }
     fail(PrettyPrint(expected), found, name_opt);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index bae09b4..a1b9270 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -34,9 +34,10 @@
 # Fails.
 regress/regress-1119: FAIL
 
-#############################################################################
-# Fails due to r10102 which reverts precise stepping on the 3.6 branch.
-debug-step-2: FAIL
+##############################################################################
+
+# NewGC: BUG(1719) slow to collect arrays over several contexts.
+regress/regress-524: SKIP
 
 ##############################################################################
 # Too slow in debug mode with --stress-opt
@@ -46,16 +47,16 @@
 
 ##############################################################################
 # This one uses a built-in that's only present in debug mode. It takes
-# too long to run in debug mode on ARM.
-fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm)
+# too long to run in debug mode on ARM and MIPS.
+fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm || $arch == mips)
 
 big-object-literal: PASS, SKIP if ($arch == arm)
 
 # Issue 488: this test sometimes times out.
 array-constructor: PASS || TIMEOUT
 
-# Very slow on ARM, contains no architecture dependent code.
-unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm)
+# Very slow on ARM and MIPS, contains no architecture dependent code.
+unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == mips)
 
 # Skip long running test in debug and allow it to timeout in release mode.
 regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
@@ -64,6 +65,15 @@
 debug-liveedit-check-stack: SKIP
 debug-liveedit-patch-positions-replace: SKIP
 
+# Test Crankshaft compilation time.  Expected to take too long in debug mode.
+regress/regress-1969: PASS, SKIP if $mode == debug
+
+##############################################################################
+[ $isolates ]
+
+# This test sets the umask on a per-process basis and hence cannot be
+# used in multi-threaded runs.
+d8-os: SKIP
 
 ##############################################################################
 [ $arch == arm ]
@@ -119,11 +129,23 @@
 
 ##############################################################################
 [ $arch == mips ]
-# Run those tests, but expect them to time out.
-array-sort: PASS || TIMEOUT
-mirror-object: PASS || TIMEOUT
 
-# Skip long-running tests.
+# Slow tests which times out in debug mode.
+try: PASS, SKIP if $mode == debug
+debug-scripts-request: PASS, SKIP if $mode == debug
+array-constructor: PASS, SKIP if $mode == debug
+
+# Times out often in release mode on MIPS.
+compiler/regress-stacktrace-methods: PASS, PASS || TIMEOUT if $mode == release
+array-splice: PASS || TIMEOUT
+
+# Long running test.
+mirror-object: PASS || TIMEOUT
+string-indexof-2: PASS || TIMEOUT
+
+# BUG(3251035): Timeouts in long looping crankshaft optimization
+# tests. Skipping because having them timeout takes too long on the
+# buildbot.
 compiler/alloc-number: SKIP
 compiler/array-length: SKIP
 compiler/assignment-deopt: SKIP
@@ -148,12 +170,8 @@
 regress/regress-create-exception: SKIP
 regress/regress-3218915: SKIP
 regress/regress-3247124: SKIP
-regress/regress-1132: SKIP
-regress/regress-1257: SKIP
-regress/regress-91008: SKIP
 
-##############################################################################
-[ $isolates ]
-# d8-os writes temporary files that might interfer with each other when running
-# in multible threads. Skip this if running with isolates testing.
-d8-os: SKIP
+# Requires bigger stack size in the Genesis and if stack size is increased,
+# the test requires too much time to run.  However, the problem test covers
+# should be platform-independent.
+regress/regress-1132: SKIP
diff --git a/test/mjsunit/number-is.js b/test/mjsunit/number-is.js
new file mode 100644
index 0000000..1589fc6
--- /dev/null
+++ b/test/mjsunit/number-is.js
@@ -0,0 +1,58 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test Harmony Number.isFinite() and Number.isNaN() functions.
+
+assertTrue(Number.isFinite(0));
+assertTrue(Number.isFinite(Number.MIN_VALUE));
+assertTrue(Number.isFinite(Number.MAX_VALUE));
+assertFalse(Number.isFinite(Number.NaN));
+assertFalse(Number.isFinite(Number.POSITIVE_INFINITY));
+assertFalse(Number.isFinite(Number.NEGATIVE_INFINITY));
+assertFalse(Number.isFinite(new Number(0)));
+assertFalse(Number.isFinite(1/0));
+assertFalse(Number.isFinite(-1/0));
+assertFalse(Number.isFinite({}));
+assertFalse(Number.isFinite([]));
+assertFalse(Number.isFinite("s"));
+assertFalse(Number.isFinite(null));
+assertFalse(Number.isFinite(undefined));
+
+assertFalse(Number.isNaN(0));
+assertFalse(Number.isNaN(Number.MIN_VALUE));
+assertFalse(Number.isNaN(Number.MAX_VALUE));
+assertTrue(Number.isNaN(Number.NaN));
+assertFalse(Number.isNaN(Number.POSITIVE_INFINITY));
+assertFalse(Number.isNaN(Number.NEGATIVE_INFINITY));
+assertFalse(Number.isNaN(new Number(0)));
+assertFalse(Number.isNaN(1/0));
+assertFalse(Number.isNaN(-1/0));
+assertFalse(Number.isNaN({}));
+assertFalse(Number.isNaN([]));
+assertFalse(Number.isNaN("s"));
+assertFalse(Number.isNaN(null));
+assertFalse(Number.isNaN(undefined));
diff --git a/test/mjsunit/object-define-properties.js b/test/mjsunit/object-define-properties.js
index 128df69..6d5032e 100644
--- a/test/mjsunit/object-define-properties.js
+++ b/test/mjsunit/object-define-properties.js
@@ -54,3 +54,19 @@
 
 assertEquals(x.foo, 10);
 assertEquals(x.bar, 42);
+
+
+// Make sure that all property descriptors are calculated before any
+// modifications are done.
+
+var object = {};
+
+assertThrows(function() {
+    Object.defineProperties(object, {
+      foo: { value: 1 },
+      bar: { value: 2, get: function() { return 3; } }
+    });
+  }, TypeError);
+
+assertEquals(undefined, object.foo);
+assertEquals(undefined, object.bar);
diff --git a/test/mjsunit/object-define-property.js b/test/mjsunit/object-define-property.js
index ee6083a..fdaf82d 100644
--- a/test/mjsunit/object-define-property.js
+++ b/test/mjsunit/object-define-property.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -503,7 +503,7 @@
 // Defining properties null should fail even when we have
 // other allowed values
 try {
-  %DefineOrRedefineAccessorProperty(null, 'foo', 0, func, 0);
+  %DefineOrRedefineAccessorProperty(null, 'foo', func, null, 0);
 } catch (e) {
   assertTrue(/illegal access/.test(e));
 }
@@ -1053,4 +1053,35 @@
   // Non-enumerable property forces dictionary mode.
   Object.defineProperty(o, i, {value: i, enumerable: false});
 }
-assertEquals(999, o[999]);
\ No newline at end of file
+assertEquals(999, o[999]);
+
+
+// Regression test: Bizzare behavior on non-strict arguments object.
+(function test(arg0) {
+  // Here arguments[0] is a fast alias on arg0.
+  Object.defineProperty(arguments, "0", {
+    value:1,
+    enumerable:false
+  });
+  // Here arguments[0] is a slow alias on arg0.
+  Object.defineProperty(arguments, "0", {
+    value:2,
+    writable:false
+  });
+  // Here arguments[0] is no alias at all.
+  Object.defineProperty(arguments, "0", {
+    value:3
+  });
+  assertEquals(2, arg0);
+  assertEquals(3, arguments[0]);
+})(0);
+
+
+// Regression test: We should never observe the hole value.
+var objectWithGetter = {};
+objectWithGetter.__defineGetter__('foo', function() {});
+assertEquals(undefined, objectWithGetter.__lookupSetter__('foo'));
+
+var objectWithSetter = {};
+objectWithSetter.__defineSetter__('foo', function(x) {});
+assertEquals(undefined, objectWithSetter.__lookupGetter__('foo'));
diff --git a/src/extensions/experimental/i18n-extension.h b/test/mjsunit/object-is.js
similarity index 68%
copy from src/extensions/experimental/i18n-extension.h
copy to test/mjsunit/object-is.js
index 5401f25..b9fdc84 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/test/mjsunit/object-is.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,30 +25,23 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+// Test both the Harmony egal operator and it's function equivalent.
 
-#include "include/v8.h"
+function TestEgal(expected, x, y) {
+  // TODO(mstarzinger): Once we have the egal operator, we can test it here.
+  assertSame(expected, Object.is(x, y));
+}
 
-namespace v8 {
-namespace internal {
-
-
-class I18NExtension : public v8::Extension {
- public:
-  I18NExtension();
-
-  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
-      v8::Handle<v8::String> name);
-
-  // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
-  static void Register();
-  static I18NExtension* get();
-
- private:
-  static I18NExtension* extension_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+var test_set = [ {}, [], 1/0, -1/0, "s", 0, 0/-1, null, undefined ];
+print(test_set);
+for (var i = 0; i < test_set.length; i++) {
+  for (var j = 0; j < test_set.length; j++) {
+    if (i == j) {
+      assertSame(test_set[i], test_set[j]);
+      TestEgal(true, test_set[i], test_set[j]);
+    } else {
+      TestEgal(false, test_set[i], test_set[j]);
+      TestEgal(false, test_set[j], test_set[i]);
+    }
+  }
+}
diff --git a/test/mjsunit/object-prevent-extensions.js b/test/mjsunit/object-prevent-extensions.js
index 322a2cb..6b9184d 100644
--- a/test/mjsunit/object-prevent-extensions.js
+++ b/test/mjsunit/object-prevent-extensions.js
@@ -114,3 +114,15 @@
 
 foo.x = 29;
 assertEquals(undefined, foo.x);
+
+// when Object.isExtensible(o) === false
+// assignment should return right hand side value
+var o = Object.preventExtensions({});
+var v = o.v = 50;
+assertEquals(undefined, o.v);
+assertEquals(50, v);
+
+// test same behavior as above, but for integer properties
+var n = o[0] = 100;
+assertEquals(undefined, o[0]);
+assertEquals(100, n);
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/optimized-typeof.js
similarity index 79%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/optimized-typeof.js
index 2502b53..b0c0725 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/optimized-typeof.js
@@ -25,22 +25,23 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --allow-natives-syntax
 
-var e = new Error();
-assertEquals('Error', e + '');
+function typeofDirectly() {
+  return typeof({}) === "undefined";
+}
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+typeofDirectly();
+typeofDirectly();
+%OptimizeFunctionOnNextCall(typeofDirectly);
+typeofDirectly();
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+function typeofViaVariable() {
+  var foo = typeof({})
+  return foo === "undefined";
+}
+
+typeofViaVariable();
+typeofViaVariable();
+%OptimizeFunctionOnNextCall(typeofViaVariable);
+typeofViaVariable();
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/pixel-array-rounding.js
similarity index 81%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/pixel-array-rounding.js
index d3f2e35..ef5a10b 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/pixel-array-rounding.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+var pixels = new PixelArray(8);
 
-assertThrows('eval(delete eval)');
+function f() {
+  for (var i = 0; i < 8; i++) {
+    pixels[i] = (i * 1.1);
+  }
+  return pixels[1] + pixels[6];
+}
 
+f();
+f();
+assertEquals(6, pixels[5]);
+%OptimizeFunctionOnNextCall(f);
+f();
+assertEquals(6, pixels[5]);
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regexp-capture-3.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regexp-capture-3.js
index d3f2e35..50e423f 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regexp-capture-3.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+"abcd".replace(/b/g, function() { });
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+assertEquals("cd", RegExp.rightContext);
diff --git a/test/mjsunit/regexp-static.js b/test/mjsunit/regexp-static.js
index 0f84968..8f283f6 100644
--- a/test/mjsunit/regexp-static.js
+++ b/test/mjsunit/regexp-static.js
@@ -25,18 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that we throw exceptions when calling test and exec with no
-// input.  This is not part of the spec, but we do it for
-// compatibility with JSC.
-assertThrows("/a/.test()");
-assertThrows("/a/.exec()");
-
-// Test that we do not throw exceptions once the static RegExp.input
-// field has been set.
-RegExp.input = "a";
-assertDoesNotThrow("/a/.test()");
-assertDoesNotThrow("/a/.exec()");
-
 // Test the (deprecated as of JS 1.5) properties of the RegExp function.
 var re = /((\d+)\.(\d+))/;
 var s = 'abc123.456def';
@@ -166,3 +154,8 @@
 var foo = "lsdfj sldkfj sdklfj læsdfjl sdkfjlsdk fjsdl fjsdljskdj flsj flsdkj flskd regexp: /foobar/\nldkfj sdlkfj sdkl";
 assertTrue(/^([a-z]+): (.*)/.test(foo.substring(foo.indexOf("regexp:"))), "regexp: setup");
 assertEquals("regexp", RegExp.$1, "RegExp.$1");
+
+
+// Check that calling with no argument is the same as calling with undefined.
+assertTrue(/^undefined$/.test());
+assertEquals(["undefined"], /^undefined$/.exec());
diff --git a/test/mjsunit/regexp.js b/test/mjsunit/regexp.js
index 3c4f883..ec82c96 100644
--- a/test/mjsunit/regexp.js
+++ b/test/mjsunit/regexp.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -127,6 +127,17 @@
 
 assertTrue(/^[Z-\c-e]*$/.test("Z[\\cde"));
 
+// Test that we handle \s and \S correctly on special Unicode characters.
+re = /\s/;
+assertTrue(re.test("\u2028"));
+assertTrue(re.test("\u2029"));
+assertTrue(re.test("\uFEFF"));
+
+re = /\S/;
+assertFalse(re.test("\u2028"));
+assertFalse(re.test("\u2029"));
+assertFalse(re.test("\uFEFF"));
+
 // Test that we handle \s and \S correctly inside some bizarre
 // character classes.
 re = /[\s-:]/;
@@ -690,3 +701,7 @@
 assertThrows("RegExp('(?:*)')");
 assertThrows("RegExp('(?=*)')");
 assertThrows("RegExp('(?!*)')");
+
+// Test trimmed regular expression for RegExp.test().
+assertTrue(/.*abc/.test("abc"));
+assertFalse(/.*\d+/.test("q"));
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-100702.js
similarity index 79%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-100702.js
index 2502b53..46494ab 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-100702.js
@@ -25,22 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Regression test for correct handling of non-object receiver values
+// passed to built-in array functions.
 
-var e = new Error();
-assertEquals('Error', e + '');
+String.prototype.isThatMe = function () {
+  assertFalse(this === str);
+};
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+var str = "abc";
+str.isThatMe();
+str.isThatMe.call(str);
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+var arr = [1];
+arr.forEach("".isThatMe, str);
+arr.filter("".isThatMe, str);
+arr.some("".isThatMe, str);
+arr.every("".isThatMe, str);
+arr.map("".isThatMe, str);
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-102153.js
similarity index 69%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-102153.js
index d3f2e35..0f67656 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-102153.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,33 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --expose-debug-as debug
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// Test that the break point is set before initializing the loop variable
+// so that we break before any iteration has been run.
 
-assertThrows('eval(delete eval)');
+Debug = debug.Debug;
 
+var break_hit = false;
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    break_hit = true;
+  }
+}
+
+Debug.setListener(listener);
+
+function test() {
+  for (var i = 0; i < 3; i++) {  // Break here.
+    if (i == 0) break;
+  }
+}
+
+Debug.setBreakPoint(test, 1, 0);
+
+assertTrue(Debug.showBreakPoints(test).indexOf("// Break here.") >= 0);
+
+test();
+
+assertTrue(break_hit);
diff --git a/src/extensions/experimental/i18n-extension.h b/test/mjsunit/regress/regress-108296.js
similarity index 68%
copy from src/extensions/experimental/i18n-extension.h
copy to test/mjsunit/regress/regress-108296.js
index 5401f25..38ecda7 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/test/mjsunit/regress/regress-108296.js
@@ -25,30 +25,28 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+// Flags: --allow-natives-syntax
 
-#include "include/v8.h"
+// This test checks that young immediates embedded into code objects
+// are referenced through a cell.
 
-namespace v8 {
-namespace internal {
+function f (k, a, b) {
+  // Create control flow for a.foo.  Control flow resolution will
+  // be generated as a part of a gap move. Gap move operate on immediates as
+  // a.foo is a CONSTANT_FUNCTION.
+  var x = k ? a.foo : a.foo;
+  return x.prototype;
+}
 
+var a = { };
 
-class I18NExtension : public v8::Extension {
- public:
-  I18NExtension();
+// Make sure that foo is a CONSTANT_FUNCTION but not be pretenured.
+a.foo = (function () { return function () {}; })();
 
-  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
-      v8::Handle<v8::String> name);
-
-  // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
-  static void Register();
-  static I18NExtension* get();
-
- private:
-  static I18NExtension* extension_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+// Ensure that both branches of ternary operator have monomorphic type feedback.
+f(true, a, a);
+f(true, a, a);
+f(false, a, a);
+f(false, a, a);
+%OptimizeFunctionOnNextCall(f);
+f(true, a, a);
diff --git a/src/extensions/experimental/i18n-extension.h b/test/mjsunit/regress/regress-109195.js
similarity index 66%
copy from src/extensions/experimental/i18n-extension.h
copy to test/mjsunit/regress/regress-109195.js
index 5401f25..97538aa 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/test/mjsunit/regress/regress-109195.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,30 +25,41 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+// Flags: --expose-debug-as debug
+var Debug = debug.Debug;
 
-#include "include/v8.h"
+function listener(event, exec_state, event_data, data) {
+  for (var i = 0, n = exec_state.frameCount(); i < n; i++) {
+    exec_state.frame().scopeCount(i);
+  }
+  exec_state.prepareStep(Debug.StepAction.Continue, 1);
+}
 
-namespace v8 {
-namespace internal {
+Debug.setListener(listener);
 
-
-class I18NExtension : public v8::Extension {
- public:
-  I18NExtension();
-
-  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
-      v8::Handle<v8::String> name);
-
-  // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
-  static void Register();
-  static I18NExtension* get();
-
- private:
-  static I18NExtension* extension_;
+var F = function () {
+  1, function () {
+    var d = 0;
+    (function () { d; });
+    debugger;
+  }();
 };
 
-} }  // namespace v8::internal
+var src = "(" + F.toString() + ")()";
+eval(src);
 
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+Function.prototype.__defineGetter__("f", function () {
+  debugger;
+  return 0;
+});
+
+var G = function () {
+  1, function () {
+    var d = 0;
+    (function () { d; });
+    debugger;
+  }['f'];
+};
+
+var src = "(" + G.toString() + ")()";
+eval(src);
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-110509.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-110509.js
index d3f2e35..132bd23 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-110509.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// Verify that LRandom preserves rsi correctly.
 
-assertThrows('eval(delete eval)');
+function foo() {
+  Math.random();
+  new Function("");
+}
 
+foo();
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/test/mjsunit/regress/regress-1110.js b/test/mjsunit/regress/regress-1110.js
index 43b8d77..124f520 100644
--- a/test/mjsunit/regress/regress-1110.js
+++ b/test/mjsunit/regress/regress-1110.js
@@ -28,10 +28,9 @@
 // Test that the illegal continue is thrown at parse time.
 
 try {
-  function Crash() { continue;if (Crash) {
-    } }
+  eval("function Crash() { assertUnreachable(); continue;if (Crash) {  } }");
   Crash();
-  assertTrue(false);
+  assertUnreachable();
 } catch (e) {
   assertTrue(e instanceof SyntaxError);
   assertTrue(/continue/.test(e.message));
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-113924.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-113924.js
index d3f2e35..3ecdec4 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-113924.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
-
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+var count=12000;
+while(count--) {
+  eval("var a = new Object(10); a[2] += 7;");
+}
diff --git a/src/extensions/experimental/i18n-extension.h b/test/mjsunit/regress/regress-115452.js
similarity index 67%
copy from src/extensions/experimental/i18n-extension.h
copy to test/mjsunit/regress/regress-115452.js
index 5401f25..7e424ed 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/test/mjsunit/regress/regress-115452.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,30 +25,24 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+// Test that a function declaration cannot overwrite a read-only property.
 
-#include "include/v8.h"
+print(0)
+function foobl() {}
+assertTrue(typeof this.foobl == "function");
+assertTrue(Object.getOwnPropertyDescriptor(this, "foobl").writable);
 
-namespace v8 {
-namespace internal {
+print(1)
+Object.defineProperty(this, "foobl", {value: 1, writable: false});
+assertSame(1, this.foobl);
+assertFalse(Object.getOwnPropertyDescriptor(this, "foobl").writable);
 
+print(2)
+eval("function foobl() {}");
+assertSame(1, this.foobl);
+assertFalse(Object.getOwnPropertyDescriptor(this, "foobl").writable);
 
-class I18NExtension : public v8::Extension {
- public:
-  I18NExtension();
-
-  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
-      v8::Handle<v8::String> name);
-
-  // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
-  static void Register();
-  static I18NExtension* get();
-
- private:
-  static I18NExtension* extension_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+print(3)
+eval("function foobl() {}");
+assertSame(1, this.foobl);
+assertFalse(Object.getOwnPropertyDescriptor(this, "foobl").writable);
diff --git a/test/mjsunit/regress/regress-1170.js b/test/mjsunit/regress/regress-1170.js
index 95684c5..66ed9f2 100644
--- a/test/mjsunit/regress/regress-1170.js
+++ b/test/mjsunit/regress/regress-1170.js
@@ -49,7 +49,7 @@
   exception = true;
   assertTrue(/TypeError/.test(e));
 }
-assertTrue(exception);
+assertFalse(exception);
 
 exception = false;
 try {
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-117794.js
similarity index 71%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-117794.js
index d3f2e35..5e11b40 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-117794.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,33 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Loads specialized to be from the global object should not omit the
+// smi check on the receiver.  The code below should not crash.
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+print = function() {}
 
-assertThrows('eval(delete eval)');
+function constructor() {};
 
+function assertHasOwnProperties(object, limit) {
+  for (var i = 0; i < limit; i++) {  }
+}
+
+try {
+  Object.keys();
+} catch(exc2) {
+  print(exc2.stack);
+}
+
+var x1 = new Object();
+
+try {
+  new Function("A Man Called Horse", x1.d);
+} catch(exc3) {
+  print(exc3.stack);
+}
+
+try {
+  (-(true)).toPrecision(0x30, 'lib1-f1');
+} catch(exc1) {
+  print(exc1.stack);
+}
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-119429.js
similarity index 84%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-119429.js
index d3f2e35..a876487 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-119429.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+var d = 0;
+function recurse() {
+  if (++d == 25135) { // A magic number just below stack overflow  on ia32
+    %DebugBreak();
+  }
+  recurse();
+}
+assertThrows(function() { recurse();} );
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-119925.js
similarity index 84%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-119925.js
index d3f2e35..6712754 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-119925.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
-
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+// Test that the throw is not inlined if object literals cannot be
+// inlined.
+Array.prototype.__proto__ = { 77e4  : null };
+function continueWithinLoop() {
+    for (var key in [(1.2)]) {  }
+};
+continueWithinLoop();
diff --git a/test/mjsunit/regress/regress-1213575.js b/test/mjsunit/regress/regress-1213575.js
index 9d82064..f3a11db 100644
--- a/test/mjsunit/regress/regress-1213575.js
+++ b/test/mjsunit/regress/regress-1213575.js
@@ -25,17 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Make sure that a const definition always
-// conflicts with a defined setter. This avoid
-// trying to pass 'the hole' to the setter.
+// Make sure that a const definition does not try
+// to pass 'the hole' to a defined setter.
 
-this.__defineSetter__('x', function(value) { assertTrue(false); });
+this.__defineSetter__('x', function(value) { assertTrue(value === 1); });
 
 var caught = false;
 try {
-  eval('const x');
+  eval('const x = 1');
 } catch(e) {
   assertTrue(e instanceof TypeError);
   caught = true;
 }
-assertTrue(caught);
+assertFalse(caught);
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-121407.js
similarity index 83%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-121407.js
index d3f2e35..25033fb 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-121407.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+var a = [0,1,2,3];
+a[2000000] = 2000000;
+a.length=2000;
+for (var i = 0; i <= 256; i++) {
+    a[i] = new Object();
+}
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+a = [0.5,1.5,2.5,3.5,4.5,5.5];
+a[2000000] = 2000000;
+a.length=2000;
+for (var i = 0; i <= 256; i++) {
+    a[i] = new Object();
+}
\ No newline at end of file
diff --git a/src/extensions/experimental/i18n-extension.h b/test/mjsunit/regress/regress-1217.js
similarity index 68%
copy from src/extensions/experimental/i18n-extension.h
copy to test/mjsunit/regress/regress-1217.js
index 5401f25..6530549 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/test/mjsunit/regress/regress-1217.js
@@ -25,30 +25,26 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+// Check that RegExp.prototype is itself a RegExp object.
 
-#include "include/v8.h"
+var proto = RegExp.prototype;
+assertEquals("[object RegExp]", Object.prototype.toString.call(proto));
 
-namespace v8 {
-namespace internal {
+assertEquals("", proto.source);
+assertEquals(false, proto.global);
+assertEquals(false, proto.multiline);
+assertEquals(false, proto.ignoreCase);
+assertEquals(0, proto.lastIndex);
 
+assertEquals("/(?:)/", proto.toString());
 
-class I18NExtension : public v8::Extension {
- public:
-  I18NExtension();
+var execResult = proto.exec("argle");
+assertEquals(1, execResult.length);
+assertEquals("", execResult[0]);
+assertEquals("argle", execResult.input);
+assertEquals(0, execResult.index);
 
-  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
-      v8::Handle<v8::String> name);
+assertTrue(proto.test("argle"));
 
-  // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
-  static void Register();
-  static I18NExtension* get();
-
- private:
-  static I18NExtension* extension_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+// We disallow re-compiling the RegExp.prototype object.
+assertThrows(function(){ proto.compile("something"); }, TypeError);
diff --git a/test/mjsunit/regress/regress-1229.js b/test/mjsunit/regress/regress-1229.js
index e16d278..5447f3f 100644
--- a/test/mjsunit/regress/regress-1229.js
+++ b/test/mjsunit/regress/regress-1229.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,59 +29,118 @@
 
 // Check that %NewObjectFromBound works correctly when called from optimized
 // frame.
-function foo(x, y, z) {
+function foo1(x, y, z) {
   assertEquals(1, x);
   assertEquals(2, y);
   assertEquals(3, z);
 }
 
-var bound_arg = [1];
+function foo2(x, y, z) {
+  assertEquals(1, x);
+  assertEquals(2, y);
+  assertEquals(undefined, z);
+}
 
-function f(y, z) {
-  return %NewObjectFromBound(foo, bound_arg);
+function foo3(x, y, z) {
+  assertEquals(1, x);
+  assertEquals(2, y);
+  assertEquals(3, z);
+}
+
+
+var foob1 = foo1.bind({}, 1);
+var foob2 = foo2.bind({}, 1);
+var foob3 = foo3.bind({}, 1);
+
+
+function f1(y, z) {
+  return %NewObjectFromBound(foob1);
+}
+
+function f2(y, z) {
+  return %NewObjectFromBound(foob2);
+}
+
+function f3(y, z) {
+  return %NewObjectFromBound(foob3);
 }
 
 // Check that %NewObjectFromBound looks at correct frame for inlined function.
-function g(z, y) {
-  return f(y, z); /* f should be inlined into g, note rotated arguments */
+function g1(z, y) {
+  return f1(y, z); /* f should be inlined into g, note rotated arguments */
+}
+
+function g2(z, y, x) {
+  return f2(y); /* f should be inlined into g, note argument count mismatch */
+}
+
+function g3(z, y, x) {
+  return f3(x, y, z); /* f should be inlined into g, note argument count mismatch */
 }
 
 // Check that %NewObjectFromBound looks at correct frame for inlined function.
 function ff(x) { }
-function h(z2, y2) {
+function h1(z2, y2) {
   var local_z = z2 >> 1;
   ff(local_z);
   var local_y = y2 >> 1;
   ff(local_y);
-  return f(local_y, local_z); /* f should be inlined into h */
+  return f1(local_y, local_z); /* f should be inlined into h */
 }
 
-for (var i = 0; i < 5; i++) f(2, 3);
-%OptimizeFunctionOnNextCall(f);
-f(2, 3);
+function h2(z2, y2, x2) {
+  var local_z = z2 >> 1;
+  ff(local_z);
+  var local_y = y2 >> 1;
+  ff(local_y);
+  return f2(local_y); /* f should be inlined into h */
+}
 
-for (var i = 0; i < 5; i++) g(3, 2);
-%OptimizeFunctionOnNextCall(g);
-g(3, 2);
+function h3(z2, y2, x2) {
+  var local_z = z2 >> 1;
+  ff(local_z);
+  var local_y = y2 >> 1;
+  ff(local_y);
+  var local_x = x2 >> 1;
+  ff(local_x);
+  return f3(local_x, local_y, local_z); /* f should be inlined into h */
+}
 
-for (var i = 0; i < 5; i++) h(6, 4);
-%OptimizeFunctionOnNextCall(h);
-h(6, 4);
+
+function invoke(f, args) {
+  for (var i = 0; i < 5; i++) f.apply(this, args);
+  %OptimizeFunctionOnNextCall(f);
+  f.apply(this, args);
+}
+
+invoke(f1, [2, 3]);
+invoke(f2, [2]);
+invoke(f3, [2, 3, 4]);
+invoke(g1, [3, 2]);
+invoke(g2, [3, 2, 4]);
+invoke(g3, [4, 3, 2]);
+invoke(h1, [6, 4]);
+invoke(h2, [6, 4, 8]);
+invoke(h3, [8, 6, 4]);
 
 // Check that %_IsConstructCall returns correct value when inlined
 var NON_CONSTRUCT_MARKER = {};
 var CONSTRUCT_MARKER = {};
-function baz() {
+function baz(x) {
   return (!%_IsConstructCall()) ? NON_CONSTRUCT_MARKER : CONSTRUCT_MARKER;
 }
 
 function bar(x, y, z) {
+  var non_construct = baz(0); /* baz should be inlined */
+  assertSame(non_construct, NON_CONSTRUCT_MARKER);
   var non_construct = baz(); /* baz should be inlined */
-  assertEquals(non_construct, NON_CONSTRUCT_MARKER);
-  var construct = new baz();
-  assertEquals(construct, CONSTRUCT_MARKER);
+  assertSame(non_construct, NON_CONSTRUCT_MARKER);
+  var non_construct = baz(0, 0); /* baz should be inlined */
+  assertSame(non_construct, NON_CONSTRUCT_MARKER);
+  var construct = new baz(0);
+  assertSame(construct, CONSTRUCT_MARKER);
+  var construct = new baz(0, 0);
+  assertSame(construct, CONSTRUCT_MARKER);
 }
 
-for (var i = 0; i < 5; i++) new bar(1, 2, 3);
-%OptimizeFunctionOnNextCall(bar);
-bar(1, 2, 3);
+invoke(bar, [1, 2, 3]);
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-1415.js
similarity index 73%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-1415.js
index 2502b53..f993e9b 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-1415.js
@@ -25,22 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Surrogate pair range.
+// U+D800
+assertThrows(function(){ decodeURIComponent("%ED%A0%80"); }, URIError);
+// U+DBFF
+assertThrows(function(){ decodeURIComponent("%ED%AF%BF"); }, URIError);
+// U+DC00
+assertThrows(function(){ decodeURIComponent("%ED%B0%80"); }, URIError);
+// U+DFFF
+assertThrows(function(){ decodeURIComponent("%ED%BF%BF"); }, URIError);
 
-var e = new Error();
-assertEquals('Error', e + '');
-
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
-
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+// Overlong encodings
+// U+007F in two bytes.
+assertThrows(function(){ decodeURIComponent("%C1%BF"); }, URIError);
+// U+07FF in three bytes.
+assertThrows(function(){ decodeURIComponent("%E0%9F%BF"); }, URIError);
diff --git a/test/mjsunit/regress/regress-1530.js b/test/mjsunit/regress/regress-1530.js
new file mode 100644
index 0000000..db21144
--- /dev/null
+++ b/test/mjsunit/regress/regress-1530.js
@@ -0,0 +1,69 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that redefining the 'prototype' property of a function object
+// does actually set the internal value and does not screw up any
+// shadowing between said property and the internal value.
+
+var f = function() {};
+
+// Verify that normal assignment of 'prototype' property works properly
+// and updates the internal value.
+var x = { foo: 'bar' };
+f.prototype = x;
+assertSame(f.prototype, x);
+assertSame(f.prototype.foo, 'bar');
+assertSame(new f().foo, 'bar');
+assertSame(Object.getPrototypeOf(new f()), x);
+assertSame(Object.getOwnPropertyDescriptor(f, 'prototype').value, x);
+
+// Verify that 'prototype' behaves like a data property when it comes to
+// redefining with Object.defineProperty() and the internal value gets
+// updated.
+var y = { foo: 'baz' };
+Object.defineProperty(f, 'prototype', { value: y, writable: true });
+assertSame(f.prototype, y);
+assertSame(f.prototype.foo, 'baz');
+assertSame(new f().foo, 'baz');
+assertSame(Object.getPrototypeOf(new f()), y);
+assertSame(Object.getOwnPropertyDescriptor(f, 'prototype').value, y);
+
+// Verify that the previous redefinition didn't screw up callbacks and
+// the internal value still gets updated.
+var z = { foo: 'other' };
+f.prototype = z;
+assertSame(f.prototype, z);
+assertSame(f.prototype.foo, 'other');
+assertSame(new f().foo, 'other');
+assertSame(Object.getPrototypeOf(new f()), z);
+assertSame(Object.getOwnPropertyDescriptor(f, 'prototype').value, z);
+
+// Verify that non-writability of other properties is respected.
+assertThrows("Object.defineProperty(f, 'name', { value: {} })");
+assertThrows("Object.defineProperty(f, 'length', { value: {} })");
+assertThrows("Object.defineProperty(f, 'caller', { value: {} })");
+assertThrows("Object.defineProperty(f, 'arguments', { value: {} })");
diff --git a/test/mjsunit/regress/regress-1624-strict.js b/test/mjsunit/regress/regress-1624-strict.js
new file mode 100644
index 0000000..8bc58d5
--- /dev/null
+++ b/test/mjsunit/regress/regress-1624-strict.js
@@ -0,0 +1,140 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that global eval calls of strict code (independent from whether being
+// direct or indirect) have their own lexical and variable environment.
+
+"use strict";
+var evil = eval;
+
+// Test global direct strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+eval('"use strict"; var no_touch = 1;');
+assertSame(0, no_touch);
+
+// Test global indirect strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+evil('"use strict"; var no_touch = 2;');
+assertSame(0, no_touch);
+
+// Test global direct non-strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+eval('var no_touch = 3;');
+assertSame(0, no_touch);
+
+// Test global indirect non-strict eval in strict script.
+// Expects global environment.
+var no_touch = 0;
+evil('var no_touch = 4;');
+assertSame(4, no_touch);
+
+// Test non-global direct strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  var no_touch = 0;
+  eval('"use strict"; var no_touch = 5;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  var no_touch = 0;
+  evil('"use strict"; var no_touch = 6;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global direct non-strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  var no_touch = 0;
+  eval('var no_touch = 7;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect non-strict eval in strict script.
+// Expects global environment.
+var no_touch = 0;
+(function() {
+  var no_touch = 0;
+  evil('var no_touch = 8;');
+  assertSame(0, no_touch);
+})()
+assertSame(8, no_touch);
+
+// Test non-global direct strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  "use strict";
+  var no_touch = 0;
+  eval('"use strict"; var no_touch = 9;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  "use strict";
+  var no_touch = 0;
+  evil('"use strict"; var no_touch = 10;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global direct non-strict eval in strict script.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  "use strict";
+  var no_touch = 0;
+  eval('var no_touch = 11;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect non-strict eval in strict script.
+// Expects global environment.
+var no_touch = 0;
+(function() {
+  "use strict";
+  var no_touch = 0;
+  evil('var no_touch = 12;');
+  assertSame(0, no_touch);
+})()
+assertSame(12, no_touch);
diff --git a/test/mjsunit/regress/regress-1624.js b/test/mjsunit/regress/regress-1624.js
new file mode 100644
index 0000000..987e036
--- /dev/null
+++ b/test/mjsunit/regress/regress-1624.js
@@ -0,0 +1,139 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that global eval calls of strict code (independent from whether being
+// direct or indirect) have their own lexical and variable environment.
+
+var evil = eval;
+
+// Test global direct strict eval.
+// Expects new environment.
+var no_touch = 0;
+eval('"use strict"; var no_touch = 1;');
+assertSame(0, no_touch);
+
+// Test global indirect strict eval.
+// Expects new environment.
+var no_touch = 0;
+evil('"use strict"; var no_touch = 2;');
+assertSame(0, no_touch);
+
+// Test global direct non-strict eval.
+// Expects global environment.
+var no_touch = 0;
+eval('var no_touch = 3;');
+assertSame(3, no_touch);
+
+// Test global indirect non-strict eval.
+// Expects global environment.
+var no_touch = 0;
+evil('var no_touch = 4;');
+assertSame(4, no_touch);
+
+// Test non-global direct strict eval in non-strict function.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  var no_touch = 0;
+  eval('"use strict"; var no_touch = 5;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect strict eval in non-strict function.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  var no_touch = 0;
+  evil('"use strict"; var no_touch = 6;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global direct non-strict eval in non-strict function.
+// Expects function environment.
+var no_touch = 0;
+(function() {
+  var no_touch = 0;
+  eval('var no_touch = 7;');
+  assertSame(7, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect non-strict eval in non-strict function.
+// Expects global environment.
+var no_touch = 0;
+(function() {
+  var no_touch = 0;
+  evil('var no_touch = 8;');
+  assertSame(0, no_touch);
+})()
+assertSame(8, no_touch);
+
+// Test non-global direct strict eval in strict function.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  "use strict";
+  var no_touch = 0;
+  eval('"use strict"; var no_touch = 9;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect strict eval in strict function.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  "use strict";
+  var no_touch = 0;
+  evil('"use strict"; var no_touch = 10;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global direct non-strict eval in strict function.
+// Expects new environment.
+var no_touch = 0;
+(function() {
+  "use strict";
+  var no_touch = 0;
+  eval('var no_touch = 11;');
+  assertSame(0, no_touch);
+})()
+assertSame(0, no_touch);
+
+// Test non-global indirect non-strict eval in strict function.
+// Expects global environment.
+var no_touch = 0;
+(function() {
+  "use strict";
+  var no_touch = 0;
+  evil('var no_touch = 12;');
+  assertSame(0, no_touch);
+})()
+assertSame(12, no_touch);
diff --git a/test/mjsunit/regress/regress-1639-2.js b/test/mjsunit/regress/regress-1639-2.js
new file mode 100644
index 0000000..c439dd8
--- /dev/null
+++ b/test/mjsunit/regress/regress-1639-2.js
@@ -0,0 +1,93 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+function sendCommand(state, cmd) {
+  // Get the debug command processor in paused state.
+  var dcp = state.debugCommandProcessor(false);
+  var request = JSON.stringify(cmd);
+  var response = dcp.processDebugJSONRequest(request);
+}
+
+var state = 0;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      var line = event_data.sourceLineText();
+      print('break: ' + line);
+      print('event data: ' + event_data.toJSONProtocol());
+      print();
+      assertEquals('// BREAK', line.substr(-8),
+                   "should not break outside evaluate");
+
+      switch (state) {
+      case 0:
+        state = 1;
+        // While in the debugger and stepping through a set of instructions
+        // executed in the evaluate command, the stepping must stop at the end
+        // of the said set of instructions and not step further into native
+        // debugger code.
+        sendCommand(exec_state, {
+          seq : 0,
+          type : "request",
+          command : "evaluate",
+          arguments : {
+            'expression' : 'print("A"); debugger; print("B"); // BREAK',
+            'global' : true
+          }
+        });
+        break;
+      case 1:
+        sendCommand(exec_state, {
+          seq : 0,
+          type : "request",
+          command : "continue",
+          arguments : {
+            stepaction : "next"
+          }
+        });
+        break;
+      }
+    }
+  } catch (e) {
+    print(e);
+  }
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function a() {
+} // BREAK
+
+// Set a break point and call to invoke the debug event listener.
+Debug.setBreakPoint(a, 0, 0);
+a();
diff --git a/test/mjsunit/regress/regress-1692.js b/test/mjsunit/regress/regress-1692.js
new file mode 100644
index 0000000..06bd66c
--- /dev/null
+++ b/test/mjsunit/regress/regress-1692.js
@@ -0,0 +1,89 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that Object.prototype.propertyIsEnumerable handles array indices
+// correctly.
+
+var p = Object.create({}, {
+  a : { value : 42, enumerable : true },
+  b : { value : 42, enumerable : false },
+  1 : { value : 42, enumerable : true },
+  2 : { value : 42, enumerable : false },
+  f : { get: function(){}, enumerable: true },
+  g : { get: function(){}, enumerable: false },
+  11 : { get: function(){}, enumerable: true },
+  12 : { get: function(){}, enumerable: false }
+});
+var o = Object.create(p, {
+  c : { value : 42, enumerable : true },
+  d : { value : 42, enumerable : false },
+  3 : { value : 42, enumerable : true },
+  4 : { value : 42, enumerable : false },
+  h : { get: function(){}, enumerable: true },
+  k : { get: function(){}, enumerable: false },
+  13 : { get: function(){}, enumerable: true },
+  14 : { get: function(){}, enumerable: false }
+});
+
+// Inherited properties are ignored.
+assertFalse(o.propertyIsEnumerable("a"));
+assertFalse(o.propertyIsEnumerable("b"));
+assertFalse(o.propertyIsEnumerable("1"));
+assertFalse(o.propertyIsEnumerable("2"));
+
+// Own properties.
+assertTrue(o.propertyIsEnumerable("c"));
+assertFalse(o.propertyIsEnumerable("d"));
+assertTrue(o.propertyIsEnumerable("3"));
+assertFalse(o.propertyIsEnumerable("4"));
+
+// Inherited accessors.
+assertFalse(o.propertyIsEnumerable("f"));
+assertFalse(o.propertyIsEnumerable("g"));
+assertFalse(o.propertyIsEnumerable("11"));
+assertFalse(o.propertyIsEnumerable("12"));
+
+// Own accessors.
+assertTrue(o.propertyIsEnumerable("h"));
+assertFalse(o.propertyIsEnumerable("k"));
+assertTrue(o.propertyIsEnumerable("13"));
+assertFalse(o.propertyIsEnumerable("14"));
+
+// Nonexisting properties.
+assertFalse(o.propertyIsEnumerable("xxx"));
+assertFalse(o.propertyIsEnumerable("999"));
+
+// String object properties.
+var o = Object("string");
+// Non-string property on String object.
+o[10] = 42;
+assertTrue(o.propertyIsEnumerable(10));
+assertFalse(o.propertyIsEnumerable(0));
+
+// Fast elements.
+var o = [1,2,3,4,5];
+assertTrue(o.propertyIsEnumerable(3));
diff --git a/test/mjsunit/regress/regress-1708.js b/test/mjsunit/regress/regress-1708.js
new file mode 100644
index 0000000..ab50e07
--- /dev/null
+++ b/test/mjsunit/regress/regress-1708.js
@@ -0,0 +1,63 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test of a very rare corner case where left-trimming an
+// array caused invalid marking bit patterns on lazily swept pages.
+
+// Flags: --expose-gc --noincremental-marking --max-new-space-size 1000
+
+(function() {
+  var head = new Array(1);
+  var tail = head;
+
+  // Fill heap to increase old-space size and trigger lazy sweeping on
+  // some of the old-space pages.
+  for (var i = 0; i < 200; i++) {
+    tail[1] = new Array(1000);
+    tail = tail[1];
+  }
+  array = new Array(100);
+  gc(); gc();
+
+  // At this point "array" should have been promoted to old-space and be
+  // located in a lazy swept page with intact marking bits. Now shift
+  // the array to trigger left-trimming operations.
+  assertEquals(100, array.length);
+  for (var i = 0; i < 50; i++) {
+    array.shift();
+  }
+  assertEquals(50, array.length);
+
+  // At this point "array" should have been trimmed from the left with
+  // marking bits being correctly transfered to the new object start.
+  // Scavenging operations cause lazy sweeping to advance and verify
+  // that marking bit patterns are still sane.
+  for (var i = 0; i < 200; i++) {
+    tail[1] = new Array(1000);
+    tail = tail[1];
+  }
+})();
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-1711.js
similarity index 80%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-1711.js
index d3f2e35..15591b1 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-1711.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// string.split needs to evaluate the separator's toString even if limit
+// is 0 because toString may have side effects.
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+var side_effect = false;
+var separator = new Object();
+separator.toString = function() {
+  side_effect = true;
+  return undefined;
+}
+'subject'.split(separator, 0);
+assertTrue(side_effect);
diff --git a/test/mjsunit/regress/regress-1713.js b/test/mjsunit/regress/regress-1713.js
new file mode 100644
index 0000000..0af1144
--- /dev/null
+++ b/test/mjsunit/regress/regress-1713.js
@@ -0,0 +1,127 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --always-compact --expose-gc
+
+var O = { get f() { return 0; } };
+
+var CODE = [];
+
+var R = [];
+
+function Allocate4Kb(N) {
+  var arr = [];
+  do {arr.push(new Array(1024));} while (--N > 0);
+  return arr;
+}
+
+function AllocateXMb(X) {
+  return Allocate4Kb((1024 * X) / 4);
+}
+
+function Node(v, next) { this.v = v; this.next = next; }
+
+Node.prototype.execute = function (O) {
+  var n = this;
+  while (n.next !== null) n = n.next;
+  n.v(O);
+};
+
+function LongList(N, x) {
+  if (N == 0) return new Node(x, null);
+  return new Node(new Array(1024), LongList(N - 1, x));
+}
+
+var L = LongList(1024, function (O) {
+  for (var i = 0; i < 5; i++) O.f;
+});
+
+
+
+function Incremental(O, x) {
+  if (!x) {
+    return;
+  }
+  function CreateCode(i) {
+    var f = new Function("return O.f_" + i);
+    CODE.push(f);
+    f(); // compile
+    f(); // compile
+    f(); // compile
+  }
+
+  for (var i = 0; i < 1e4; i++) CreateCode(i);
+  gc();
+  gc();
+  gc();
+
+  print(">>> 1 <<<");
+
+  L.execute(O);
+
+  try {} catch (e) {}
+
+  L = null;
+  print(">>> 2 <<<");
+  AllocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+
+}
+
+function foo(O, x) {
+  Incremental(O, x);
+
+  print('f');
+
+  for (var i = 0; i < 5; i++) O.f;
+
+
+  print('g');
+
+  bar(x);
+}
+
+function bar(x) {
+  if (!x) return;
+  %DeoptimizeFunction(foo);
+  AllocateXMb(8);
+  AllocateXMb(8);
+}
+
+var O1 = {};
+var O2 = {};
+var O3 = {};
+var O4 = {f:0};
+
+foo(O1, false);
+foo(O2, false);
+foo(O3, false);
+%OptimizeFunctionOnNextCall(foo);
+foo(O4, true);
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-1748.js
similarity index 80%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-1748.js
index 2502b53..e287e55 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-1748.js
@@ -25,22 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Test that /^/ only matches at beginning of string.
+// Bug in x64 caused it to match when executing the RegExp on a part
+// of a string that starts at a multiplum of 256.
 
-var e = new Error();
-assertEquals('Error', e + '');
-
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
-
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+var str = Array(10000).join("X");
+str.replace(/^|X/g, function(m, i, s) {
+  if (i > 0) assertEquals("X", m, "at position 0x" + i.toString(16));
+});
\ No newline at end of file
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-1757.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-1757.js
index d3f2e35..f7a5516 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-1757.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --string-slices --expose-externalize-string
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+var a = "abcdefghijklmnopqrstuvqxy"+"z";
+externalizeString(a, true);
+assertEquals('b', a.substring(1).charAt(0));
\ No newline at end of file
diff --git a/test/mjsunit/regress/regress-1790.js b/test/mjsunit/regress/regress-1790.js
new file mode 100644
index 0000000..8848eea
--- /dev/null
+++ b/test/mjsunit/regress/regress-1790.js
@@ -0,0 +1,58 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test checking that the sequence of element access in built-in
+// array functions is specification conform (i.e. [[HasProperty]] might return
+// bogus result after [[Get]] has been called).
+
+function CheckSequence(builtin, callback) {
+  var array = [1,2,3];
+  var callback_count = 0;
+  var callback_wrapper = function() {
+    callback_count++;
+    return callback()
+  }
+
+  // Define getter that will delete itself upon first invocation.
+  Object.defineProperty(array, '1', {
+    get: function () { delete array[1]; },
+    configurable: true
+  });
+
+  assertTrue(array.hasOwnProperty('1'));
+  builtin.apply(array, [callback_wrapper, 'argument']);
+  assertFalse(array.hasOwnProperty('1'));
+  assertEquals(3, callback_count);
+}
+
+CheckSequence(Array.prototype.every,       function() { return true; });
+CheckSequence(Array.prototype.filter,      function() { return true; });
+CheckSequence(Array.prototype.forEach,     function() { return 0; });
+CheckSequence(Array.prototype.map,         function() { return 0; });
+CheckSequence(Array.prototype.reduce,      function() { return 0; });
+CheckSequence(Array.prototype.reduceRight, function() { return 0; });
+CheckSequence(Array.prototype.some,        function() { return false; });
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-1849.js
similarity index 80%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-1849.js
index d3f2e35..176f918 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-1849.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// See: http://code.google.com/p/v8/issues/detail?id=1878
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// Flags: --allow-natives-syntax
 
-assertThrows('eval(delete eval)');
-
+var count = 1e5;
+var arr = new Array(count);
+assertFalse(%HasFastDoubleElements(arr));
+for (var i = 0; i < count; i++) {
+  arr[i] = 0;
+}
+assertFalse(%HasFastDoubleElements(arr));
+assertTrue(%HasFastSmiOnlyElements(arr));
diff --git a/test/mjsunit/regress/regress-1853.js b/test/mjsunit/regress/regress-1853.js
new file mode 100644
index 0000000..f80bade
--- /dev/null
+++ b/test/mjsunit/regress/regress-1853.js
@@ -0,0 +1,116 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Test whether scripts compiled after setting the break point are
+// updated correctly.
+
+Debug = debug.Debug;
+
+var break_count = 0;
+var test_break_1 = false;
+var test_break_2 = false;
+
+function sendCommand(state, cmd) {
+  // Get the debug command processor in paused state.
+  var dcp = state.debugCommandProcessor(false);
+  var request = JSON.stringify(cmd);
+  var response = dcp.processDebugJSONRequest(request);
+  return JSON.parse(response);
+}
+
+function setBreakPointByName(state) {
+  sendCommand(state, {
+    seq: 0,
+    type: "request",
+    command: "setbreakpoint",
+    arguments: {
+      type: "script",
+      target: "testScriptOne",
+      line: 2
+    }
+  });
+}
+
+function setBreakPointByRegExp(state) {
+  sendCommand(state, {
+    seq: 0,
+    type: "request",
+    command: "setbreakpoint",
+    arguments: {
+      type: "scriptRegExp",
+      target: "Scrip.Two",
+      line: 2
+    }
+  });
+}
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      switch (break_count) {
+        case 0:
+          // Set break points before the code has been compiled.
+          setBreakPointByName(exec_state);
+          setBreakPointByRegExp(exec_state);
+          break;
+        case 1:
+          // Set the flag to prove that we hit the first break point.
+          test_break_1 = true;
+          break;
+        case 2:
+          // Set the flag to prove that we hit the second break point.
+          test_break_2 = true;
+          break;
+      }
+      break_count++;
+    }
+  } catch (e) {
+    print(e);
+  }
+}
+
+Debug.setListener(listener);
+debugger;
+
+eval('function test1() {                \n' +
+     '  assertFalse(test_break_1);      \n' +
+     '  assertTrue(test_break_1);       \n' +
+     '}                                 \n' +
+     '//@ sourceURL=testScriptOne');
+
+eval('function test2() {                \n' +
+     '  assertFalse(test_break_2);      \n' +
+     '  assertTrue(test_break_2);       \n' +
+     '}                                 \n' +
+     '//@ sourceURL=testScriptTwo');
+
+test1();
+test2();
+assertEquals(3, break_count);
+
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-1878.js
similarity index 75%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-1878.js
index d3f2e35..a1648b1 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-1878.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// See: http://code.google.com/p/v8/issues/detail?id=1878
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// Flags: --allow-natives-syntax --expose_natives_as=natives
 
-assertThrows('eval(delete eval)');
+var a = Array();
 
+for (var i = 0; i < 1000; i++) {
+  var ai = natives.InternalArray(10000);
+  assertFalse(%HaveSameMap(ai, a));
+  assertTrue(%HasFastElements(ai));
+}
+
+for (var i = 0; i < 1000; i++) {
+  var ai = new natives.InternalArray(10000);
+  assertFalse(%HaveSameMap(ai, a));
+  assertTrue(%HasFastElements(ai));
+}
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-1898.js
similarity index 86%
rename from test/mjsunit/regress/regress-221.js
rename to test/mjsunit/regress/regress-1898.js
index d3f2e35..5440446 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-1898.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+function f(x) {
+  Math.log(Math.min(0.1, Math.abs(x)));
+}
 
-assertThrows('eval(delete eval)');
-
+f(0.1);
+f(0.1);
+%OptimizeFunctionOnNextCall(f);
+f(0.1);
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-1924.js
similarity index 75%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-1924.js
index d3f2e35..8039541 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-1924.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// For http://code.google.com/p/v8/issues/detail?id=1924
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+a: break a;
+a: b: break a;
+a: b: break b;
+assertThrows("a: break a a", SyntaxError)
+assertThrows("a: break a 1", SyntaxError)
+assertThrows("a: break a ''", SyntaxError)
+assertThrows("a: break a var b", SyntaxError)
+assertThrows("a: break a {}", SyntaxError)
 
-assertThrows('eval(delete eval)');
-
+a: if (0) break a;
+b: if (0) {break b;} else {}
+c: if (0) break c; else {}
+d: if (0) break d; else break d;
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-1945.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-1945.js
index d3f2e35..bffc775 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-1945.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+var _d = new Date();
+_d.setHours(0,0,0,0);
+_d.setHours(0,0,0,0);
+%OptimizeFunctionOnNextCall(_d.setHours);
+_d.setHours(0,0,0,0);
diff --git a/test/mjsunit/regress/regress-1969.js b/test/mjsunit/regress/regress-1969.js
new file mode 100644
index 0000000..2728c2c
--- /dev/null
+++ b/test/mjsunit/regress/regress-1969.js
@@ -0,0 +1,5045 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+var start = (new Date()).getTime();
+var array = f();
+var end = (new Date()).getTime();
+
+// Assert that recompiling and executing f() takes less than a second.
+assertTrue((end - start) < 1000);
+
+for (var i = 0; i < 5000; i++) assertEquals(0, array[i]);
+
+function f() {
+  var a = new Array(5000);
+  a[0]=0;
+  a[1]=0;
+  a[2]=0;
+  a[3]=0;
+  a[4]=0;
+  a[5]=0;
+  a[6]=0;
+  a[7]=0;
+  a[8]=0;
+  a[9]=0;
+  a[10]=0;
+  a[11]=0;
+  a[12]=0;
+  a[13]=0;
+  a[14]=0;
+  a[15]=0;
+  a[16]=0;
+  a[17]=0;
+  a[18]=0;
+  a[19]=0;
+  a[20]=0;
+  a[21]=0;
+  a[22]=0;
+  a[23]=0;
+  a[24]=0;
+  a[25]=0;
+  a[26]=0;
+  a[27]=0;
+  a[28]=0;
+  a[29]=0;
+  a[30]=0;
+  a[31]=0;
+  a[32]=0;
+  a[33]=0;
+  a[34]=0;
+  a[35]=0;
+  a[36]=0;
+  a[37]=0;
+  a[38]=0;
+  a[39]=0;
+  a[40]=0;
+  a[41]=0;
+  a[42]=0;
+  a[43]=0;
+  a[44]=0;
+  a[45]=0;
+  a[46]=0;
+  a[47]=0;
+  a[48]=0;
+  a[49]=0;
+  a[50]=0;
+  a[51]=0;
+  a[52]=0;
+  a[53]=0;
+  a[54]=0;
+  a[55]=0;
+  a[56]=0;
+  a[57]=0;
+  a[58]=0;
+  a[59]=0;
+  a[60]=0;
+  a[61]=0;
+  a[62]=0;
+  a[63]=0;
+  a[64]=0;
+  a[65]=0;
+  a[66]=0;
+  a[67]=0;
+  a[68]=0;
+  a[69]=0;
+  a[70]=0;
+  a[71]=0;
+  a[72]=0;
+  a[73]=0;
+  a[74]=0;
+  a[75]=0;
+  a[76]=0;
+  a[77]=0;
+  a[78]=0;
+  a[79]=0;
+  a[80]=0;
+  a[81]=0;
+  a[82]=0;
+  a[83]=0;
+  a[84]=0;
+  a[85]=0;
+  a[86]=0;
+  a[87]=0;
+  a[88]=0;
+  a[89]=0;
+  a[90]=0;
+  a[91]=0;
+  a[92]=0;
+  a[93]=0;
+  a[94]=0;
+  a[95]=0;
+  a[96]=0;
+  a[97]=0;
+  a[98]=0;
+  a[99]=0;
+  a[100]=0;
+  a[101]=0;
+  a[102]=0;
+  a[103]=0;
+  a[104]=0;
+  a[105]=0;
+  a[106]=0;
+  a[107]=0;
+  a[108]=0;
+  a[109]=0;
+  a[110]=0;
+  a[111]=0;
+  a[112]=0;
+  a[113]=0;
+  a[114]=0;
+  a[115]=0;
+  a[116]=0;
+  a[117]=0;
+  a[118]=0;
+  a[119]=0;
+  a[120]=0;
+  a[121]=0;
+  a[122]=0;
+  a[123]=0;
+  a[124]=0;
+  a[125]=0;
+  a[126]=0;
+  a[127]=0;
+  a[128]=0;
+  a[129]=0;
+  a[130]=0;
+  a[131]=0;
+  a[132]=0;
+  a[133]=0;
+  a[134]=0;
+  a[135]=0;
+  a[136]=0;
+  a[137]=0;
+  a[138]=0;
+  a[139]=0;
+  a[140]=0;
+  a[141]=0;
+  a[142]=0;
+  a[143]=0;
+  a[144]=0;
+  a[145]=0;
+  a[146]=0;
+  a[147]=0;
+  a[148]=0;
+  a[149]=0;
+  a[150]=0;
+  a[151]=0;
+  a[152]=0;
+  a[153]=0;
+  a[154]=0;
+  a[155]=0;
+  a[156]=0;
+  a[157]=0;
+  a[158]=0;
+  a[159]=0;
+  a[160]=0;
+  a[161]=0;
+  a[162]=0;
+  a[163]=0;
+  a[164]=0;
+  a[165]=0;
+  a[166]=0;
+  a[167]=0;
+  a[168]=0;
+  a[169]=0;
+  a[170]=0;
+  a[171]=0;
+  a[172]=0;
+  a[173]=0;
+  a[174]=0;
+  a[175]=0;
+  a[176]=0;
+  a[177]=0;
+  a[178]=0;
+  a[179]=0;
+  a[180]=0;
+  a[181]=0;
+  a[182]=0;
+  a[183]=0;
+  a[184]=0;
+  a[185]=0;
+  a[186]=0;
+  a[187]=0;
+  a[188]=0;
+  a[189]=0;
+  a[190]=0;
+  a[191]=0;
+  a[192]=0;
+  a[193]=0;
+  a[194]=0;
+  a[195]=0;
+  a[196]=0;
+  a[197]=0;
+  a[198]=0;
+  a[199]=0;
+  a[200]=0;
+  a[201]=0;
+  a[202]=0;
+  a[203]=0;
+  a[204]=0;
+  a[205]=0;
+  a[206]=0;
+  a[207]=0;
+  a[208]=0;
+  a[209]=0;
+  a[210]=0;
+  a[211]=0;
+  a[212]=0;
+  a[213]=0;
+  a[214]=0;
+  a[215]=0;
+  a[216]=0;
+  a[217]=0;
+  a[218]=0;
+  a[219]=0;
+  a[220]=0;
+  a[221]=0;
+  a[222]=0;
+  a[223]=0;
+  a[224]=0;
+  a[225]=0;
+  a[226]=0;
+  a[227]=0;
+  a[228]=0;
+  a[229]=0;
+  a[230]=0;
+  a[231]=0;
+  a[232]=0;
+  a[233]=0;
+  a[234]=0;
+  a[235]=0;
+  a[236]=0;
+  a[237]=0;
+  a[238]=0;
+  a[239]=0;
+  a[240]=0;
+  a[241]=0;
+  a[242]=0;
+  a[243]=0;
+  a[244]=0;
+  a[245]=0;
+  a[246]=0;
+  a[247]=0;
+  a[248]=0;
+  a[249]=0;
+  a[250]=0;
+  a[251]=0;
+  a[252]=0;
+  a[253]=0;
+  a[254]=0;
+  a[255]=0;
+  a[256]=0;
+  a[257]=0;
+  a[258]=0;
+  a[259]=0;
+  a[260]=0;
+  a[261]=0;
+  a[262]=0;
+  a[263]=0;
+  a[264]=0;
+  a[265]=0;
+  a[266]=0;
+  a[267]=0;
+  a[268]=0;
+  a[269]=0;
+  a[270]=0;
+  a[271]=0;
+  a[272]=0;
+  a[273]=0;
+  a[274]=0;
+  a[275]=0;
+  a[276]=0;
+  a[277]=0;
+  a[278]=0;
+  a[279]=0;
+  a[280]=0;
+  a[281]=0;
+  a[282]=0;
+  a[283]=0;
+  a[284]=0;
+  a[285]=0;
+  a[286]=0;
+  a[287]=0;
+  a[288]=0;
+  a[289]=0;
+  a[290]=0;
+  a[291]=0;
+  a[292]=0;
+  a[293]=0;
+  a[294]=0;
+  a[295]=0;
+  a[296]=0;
+  a[297]=0;
+  a[298]=0;
+  a[299]=0;
+  a[300]=0;
+  a[301]=0;
+  a[302]=0;
+  a[303]=0;
+  a[304]=0;
+  a[305]=0;
+  a[306]=0;
+  a[307]=0;
+  a[308]=0;
+  a[309]=0;
+  a[310]=0;
+  a[311]=0;
+  a[312]=0;
+  a[313]=0;
+  a[314]=0;
+  a[315]=0;
+  a[316]=0;
+  a[317]=0;
+  a[318]=0;
+  a[319]=0;
+  a[320]=0;
+  a[321]=0;
+  a[322]=0;
+  a[323]=0;
+  a[324]=0;
+  a[325]=0;
+  a[326]=0;
+  a[327]=0;
+  a[328]=0;
+  a[329]=0;
+  a[330]=0;
+  a[331]=0;
+  a[332]=0;
+  a[333]=0;
+  a[334]=0;
+  a[335]=0;
+  a[336]=0;
+  a[337]=0;
+  a[338]=0;
+  a[339]=0;
+  a[340]=0;
+  a[341]=0;
+  a[342]=0;
+  a[343]=0;
+  a[344]=0;
+  a[345]=0;
+  a[346]=0;
+  a[347]=0;
+  a[348]=0;
+  a[349]=0;
+  a[350]=0;
+  a[351]=0;
+  a[352]=0;
+  a[353]=0;
+  a[354]=0;
+  a[355]=0;
+  a[356]=0;
+  a[357]=0;
+  a[358]=0;
+  a[359]=0;
+  a[360]=0;
+  a[361]=0;
+  a[362]=0;
+  a[363]=0;
+  a[364]=0;
+  a[365]=0;
+  a[366]=0;
+  a[367]=0;
+  a[368]=0;
+  a[369]=0;
+  a[370]=0;
+  a[371]=0;
+  a[372]=0;
+  a[373]=0;
+  a[374]=0;
+  a[375]=0;
+  a[376]=0;
+  a[377]=0;
+  a[378]=0;
+  a[379]=0;
+  a[380]=0;
+  a[381]=0;
+  a[382]=0;
+  a[383]=0;
+  a[384]=0;
+  a[385]=0;
+  a[386]=0;
+  a[387]=0;
+  a[388]=0;
+  a[389]=0;
+  a[390]=0;
+  a[391]=0;
+  a[392]=0;
+  a[393]=0;
+  a[394]=0;
+  a[395]=0;
+  a[396]=0;
+  a[397]=0;
+  a[398]=0;
+  a[399]=0;
+  a[400]=0;
+  a[401]=0;
+  a[402]=0;
+  a[403]=0;
+  a[404]=0;
+  a[405]=0;
+  a[406]=0;
+  a[407]=0;
+  a[408]=0;
+  a[409]=0;
+  a[410]=0;
+  a[411]=0;
+  a[412]=0;
+  a[413]=0;
+  a[414]=0;
+  a[415]=0;
+  a[416]=0;
+  a[417]=0;
+  a[418]=0;
+  a[419]=0;
+  a[420]=0;
+  a[421]=0;
+  a[422]=0;
+  a[423]=0;
+  a[424]=0;
+  a[425]=0;
+  a[426]=0;
+  a[427]=0;
+  a[428]=0;
+  a[429]=0;
+  a[430]=0;
+  a[431]=0;
+  a[432]=0;
+  a[433]=0;
+  a[434]=0;
+  a[435]=0;
+  a[436]=0;
+  a[437]=0;
+  a[438]=0;
+  a[439]=0;
+  a[440]=0;
+  a[441]=0;
+  a[442]=0;
+  a[443]=0;
+  a[444]=0;
+  a[445]=0;
+  a[446]=0;
+  a[447]=0;
+  a[448]=0;
+  a[449]=0;
+  a[450]=0;
+  a[451]=0;
+  a[452]=0;
+  a[453]=0;
+  a[454]=0;
+  a[455]=0;
+  a[456]=0;
+  a[457]=0;
+  a[458]=0;
+  a[459]=0;
+  a[460]=0;
+  a[461]=0;
+  a[462]=0;
+  a[463]=0;
+  a[464]=0;
+  a[465]=0;
+  a[466]=0;
+  a[467]=0;
+  a[468]=0;
+  a[469]=0;
+  a[470]=0;
+  a[471]=0;
+  a[472]=0;
+  a[473]=0;
+  a[474]=0;
+  a[475]=0;
+  a[476]=0;
+  a[477]=0;
+  a[478]=0;
+  a[479]=0;
+  a[480]=0;
+  a[481]=0;
+  a[482]=0;
+  a[483]=0;
+  a[484]=0;
+  a[485]=0;
+  a[486]=0;
+  a[487]=0;
+  a[488]=0;
+  a[489]=0;
+  a[490]=0;
+  a[491]=0;
+  a[492]=0;
+  a[493]=0;
+  a[494]=0;
+  a[495]=0;
+  a[496]=0;
+  a[497]=0;
+  a[498]=0;
+  a[499]=0;
+  a[500]=0;
+  a[501]=0;
+  a[502]=0;
+  a[503]=0;
+  a[504]=0;
+  a[505]=0;
+  a[506]=0;
+  a[507]=0;
+  a[508]=0;
+  a[509]=0;
+  a[510]=0;
+  a[511]=0;
+  a[512]=0;
+  a[513]=0;
+  a[514]=0;
+  a[515]=0;
+  a[516]=0;
+  a[517]=0;
+  a[518]=0;
+  a[519]=0;
+  a[520]=0;
+  a[521]=0;
+  a[522]=0;
+  a[523]=0;
+  a[524]=0;
+  a[525]=0;
+  a[526]=0;
+  a[527]=0;
+  a[528]=0;
+  a[529]=0;
+  a[530]=0;
+  a[531]=0;
+  a[532]=0;
+  a[533]=0;
+  a[534]=0;
+  a[535]=0;
+  a[536]=0;
+  a[537]=0;
+  a[538]=0;
+  a[539]=0;
+  a[540]=0;
+  a[541]=0;
+  a[542]=0;
+  a[543]=0;
+  a[544]=0;
+  a[545]=0;
+  a[546]=0;
+  a[547]=0;
+  a[548]=0;
+  a[549]=0;
+  a[550]=0;
+  a[551]=0;
+  a[552]=0;
+  a[553]=0;
+  a[554]=0;
+  a[555]=0;
+  a[556]=0;
+  a[557]=0;
+  a[558]=0;
+  a[559]=0;
+  a[560]=0;
+  a[561]=0;
+  a[562]=0;
+  a[563]=0;
+  a[564]=0;
+  a[565]=0;
+  a[566]=0;
+  a[567]=0;
+  a[568]=0;
+  a[569]=0;
+  a[570]=0;
+  a[571]=0;
+  a[572]=0;
+  a[573]=0;
+  a[574]=0;
+  a[575]=0;
+  a[576]=0;
+  a[577]=0;
+  a[578]=0;
+  a[579]=0;
+  a[580]=0;
+  a[581]=0;
+  a[582]=0;
+  a[583]=0;
+  a[584]=0;
+  a[585]=0;
+  a[586]=0;
+  a[587]=0;
+  a[588]=0;
+  a[589]=0;
+  a[590]=0;
+  a[591]=0;
+  a[592]=0;
+  a[593]=0;
+  a[594]=0;
+  a[595]=0;
+  a[596]=0;
+  a[597]=0;
+  a[598]=0;
+  a[599]=0;
+  a[600]=0;
+  a[601]=0;
+  a[602]=0;
+  a[603]=0;
+  a[604]=0;
+  a[605]=0;
+  a[606]=0;
+  a[607]=0;
+  a[608]=0;
+  a[609]=0;
+  a[610]=0;
+  a[611]=0;
+  a[612]=0;
+  a[613]=0;
+  a[614]=0;
+  a[615]=0;
+  a[616]=0;
+  a[617]=0;
+  a[618]=0;
+  a[619]=0;
+  a[620]=0;
+  a[621]=0;
+  a[622]=0;
+  a[623]=0;
+  a[624]=0;
+  a[625]=0;
+  a[626]=0;
+  a[627]=0;
+  a[628]=0;
+  a[629]=0;
+  a[630]=0;
+  a[631]=0;
+  a[632]=0;
+  a[633]=0;
+  a[634]=0;
+  a[635]=0;
+  a[636]=0;
+  a[637]=0;
+  a[638]=0;
+  a[639]=0;
+  a[640]=0;
+  a[641]=0;
+  a[642]=0;
+  a[643]=0;
+  a[644]=0;
+  a[645]=0;
+  a[646]=0;
+  a[647]=0;
+  a[648]=0;
+  a[649]=0;
+  a[650]=0;
+  a[651]=0;
+  a[652]=0;
+  a[653]=0;
+  a[654]=0;
+  a[655]=0;
+  a[656]=0;
+  a[657]=0;
+  a[658]=0;
+  a[659]=0;
+  a[660]=0;
+  a[661]=0;
+  a[662]=0;
+  a[663]=0;
+  a[664]=0;
+  a[665]=0;
+  a[666]=0;
+  a[667]=0;
+  a[668]=0;
+  a[669]=0;
+  a[670]=0;
+  a[671]=0;
+  a[672]=0;
+  a[673]=0;
+  a[674]=0;
+  a[675]=0;
+  a[676]=0;
+  a[677]=0;
+  a[678]=0;
+  a[679]=0;
+  a[680]=0;
+  a[681]=0;
+  a[682]=0;
+  a[683]=0;
+  a[684]=0;
+  a[685]=0;
+  a[686]=0;
+  a[687]=0;
+  a[688]=0;
+  a[689]=0;
+  a[690]=0;
+  a[691]=0;
+  a[692]=0;
+  a[693]=0;
+  a[694]=0;
+  a[695]=0;
+  a[696]=0;
+  a[697]=0;
+  a[698]=0;
+  a[699]=0;
+  a[700]=0;
+  a[701]=0;
+  a[702]=0;
+  a[703]=0;
+  a[704]=0;
+  a[705]=0;
+  a[706]=0;
+  a[707]=0;
+  a[708]=0;
+  a[709]=0;
+  a[710]=0;
+  a[711]=0;
+  a[712]=0;
+  a[713]=0;
+  a[714]=0;
+  a[715]=0;
+  a[716]=0;
+  a[717]=0;
+  a[718]=0;
+  a[719]=0;
+  a[720]=0;
+  a[721]=0;
+  a[722]=0;
+  a[723]=0;
+  a[724]=0;
+  a[725]=0;
+  a[726]=0;
+  a[727]=0;
+  a[728]=0;
+  a[729]=0;
+  a[730]=0;
+  a[731]=0;
+  a[732]=0;
+  a[733]=0;
+  a[734]=0;
+  a[735]=0;
+  a[736]=0;
+  a[737]=0;
+  a[738]=0;
+  a[739]=0;
+  a[740]=0;
+  a[741]=0;
+  a[742]=0;
+  a[743]=0;
+  a[744]=0;
+  a[745]=0;
+  a[746]=0;
+  a[747]=0;
+  a[748]=0;
+  a[749]=0;
+  a[750]=0;
+  a[751]=0;
+  a[752]=0;
+  a[753]=0;
+  a[754]=0;
+  a[755]=0;
+  a[756]=0;
+  a[757]=0;
+  a[758]=0;
+  a[759]=0;
+  a[760]=0;
+  a[761]=0;
+  a[762]=0;
+  a[763]=0;
+  a[764]=0;
+  a[765]=0;
+  a[766]=0;
+  a[767]=0;
+  a[768]=0;
+  a[769]=0;
+  a[770]=0;
+  a[771]=0;
+  a[772]=0;
+  a[773]=0;
+  a[774]=0;
+  a[775]=0;
+  a[776]=0;
+  a[777]=0;
+  a[778]=0;
+  a[779]=0;
+  a[780]=0;
+  a[781]=0;
+  a[782]=0;
+  a[783]=0;
+  a[784]=0;
+  a[785]=0;
+  a[786]=0;
+  a[787]=0;
+  a[788]=0;
+  a[789]=0;
+  a[790]=0;
+  a[791]=0;
+  a[792]=0;
+  a[793]=0;
+  a[794]=0;
+  a[795]=0;
+  a[796]=0;
+  a[797]=0;
+  a[798]=0;
+  a[799]=0;
+  a[800]=0;
+  a[801]=0;
+  a[802]=0;
+  a[803]=0;
+  a[804]=0;
+  a[805]=0;
+  a[806]=0;
+  a[807]=0;
+  a[808]=0;
+  a[809]=0;
+  a[810]=0;
+  a[811]=0;
+  a[812]=0;
+  a[813]=0;
+  a[814]=0;
+  a[815]=0;
+  a[816]=0;
+  a[817]=0;
+  a[818]=0;
+  a[819]=0;
+  a[820]=0;
+  a[821]=0;
+  a[822]=0;
+  a[823]=0;
+  a[824]=0;
+  a[825]=0;
+  a[826]=0;
+  a[827]=0;
+  a[828]=0;
+  a[829]=0;
+  a[830]=0;
+  a[831]=0;
+  a[832]=0;
+  a[833]=0;
+  a[834]=0;
+  a[835]=0;
+  a[836]=0;
+  a[837]=0;
+  a[838]=0;
+  a[839]=0;
+  a[840]=0;
+  a[841]=0;
+  a[842]=0;
+  a[843]=0;
+  a[844]=0;
+  a[845]=0;
+  a[846]=0;
+  a[847]=0;
+  a[848]=0;
+  a[849]=0;
+  a[850]=0;
+  a[851]=0;
+  a[852]=0;
+  a[853]=0;
+  a[854]=0;
+  a[855]=0;
+  a[856]=0;
+  a[857]=0;
+  a[858]=0;
+  a[859]=0;
+  a[860]=0;
+  a[861]=0;
+  a[862]=0;
+  a[863]=0;
+  a[864]=0;
+  a[865]=0;
+  a[866]=0;
+  a[867]=0;
+  a[868]=0;
+  a[869]=0;
+  a[870]=0;
+  a[871]=0;
+  a[872]=0;
+  a[873]=0;
+  a[874]=0;
+  a[875]=0;
+  a[876]=0;
+  a[877]=0;
+  a[878]=0;
+  a[879]=0;
+  a[880]=0;
+  a[881]=0;
+  a[882]=0;
+  a[883]=0;
+  a[884]=0;
+  a[885]=0;
+  a[886]=0;
+  a[887]=0;
+  a[888]=0;
+  a[889]=0;
+  a[890]=0;
+  a[891]=0;
+  a[892]=0;
+  a[893]=0;
+  a[894]=0;
+  a[895]=0;
+  a[896]=0;
+  a[897]=0;
+  a[898]=0;
+  a[899]=0;
+  a[900]=0;
+  a[901]=0;
+  a[902]=0;
+  a[903]=0;
+  a[904]=0;
+  a[905]=0;
+  a[906]=0;
+  a[907]=0;
+  a[908]=0;
+  a[909]=0;
+  a[910]=0;
+  a[911]=0;
+  a[912]=0;
+  a[913]=0;
+  a[914]=0;
+  a[915]=0;
+  a[916]=0;
+  a[917]=0;
+  a[918]=0;
+  a[919]=0;
+  a[920]=0;
+  a[921]=0;
+  a[922]=0;
+  a[923]=0;
+  a[924]=0;
+  a[925]=0;
+  a[926]=0;
+  a[927]=0;
+  a[928]=0;
+  a[929]=0;
+  a[930]=0;
+  a[931]=0;
+  a[932]=0;
+  a[933]=0;
+  a[934]=0;
+  a[935]=0;
+  a[936]=0;
+  a[937]=0;
+  a[938]=0;
+  a[939]=0;
+  a[940]=0;
+  a[941]=0;
+  a[942]=0;
+  a[943]=0;
+  a[944]=0;
+  a[945]=0;
+  a[946]=0;
+  a[947]=0;
+  a[948]=0;
+  a[949]=0;
+  a[950]=0;
+  a[951]=0;
+  a[952]=0;
+  a[953]=0;
+  a[954]=0;
+  a[955]=0;
+  a[956]=0;
+  a[957]=0;
+  a[958]=0;
+  a[959]=0;
+  a[960]=0;
+  a[961]=0;
+  a[962]=0;
+  a[963]=0;
+  a[964]=0;
+  a[965]=0;
+  a[966]=0;
+  a[967]=0;
+  a[968]=0;
+  a[969]=0;
+  a[970]=0;
+  a[971]=0;
+  a[972]=0;
+  a[973]=0;
+  a[974]=0;
+  a[975]=0;
+  a[976]=0;
+  a[977]=0;
+  a[978]=0;
+  a[979]=0;
+  a[980]=0;
+  a[981]=0;
+  a[982]=0;
+  a[983]=0;
+  a[984]=0;
+  a[985]=0;
+  a[986]=0;
+  a[987]=0;
+  a[988]=0;
+  a[989]=0;
+  a[990]=0;
+  a[991]=0;
+  a[992]=0;
+  a[993]=0;
+  a[994]=0;
+  a[995]=0;
+  a[996]=0;
+  a[997]=0;
+  a[998]=0;
+  a[999]=0;
+  a[1000]=0;
+  a[1001]=0;
+  a[1002]=0;
+  a[1003]=0;
+  a[1004]=0;
+  a[1005]=0;
+  a[1006]=0;
+  a[1007]=0;
+  a[1008]=0;
+  a[1009]=0;
+  a[1010]=0;
+  a[1011]=0;
+  a[1012]=0;
+  a[1013]=0;
+  a[1014]=0;
+  a[1015]=0;
+  a[1016]=0;
+  a[1017]=0;
+  a[1018]=0;
+  a[1019]=0;
+  a[1020]=0;
+  a[1021]=0;
+  a[1022]=0;
+  a[1023]=0;
+  a[1024]=0;
+  a[1025]=0;
+  a[1026]=0;
+  a[1027]=0;
+  a[1028]=0;
+  a[1029]=0;
+  a[1030]=0;
+  a[1031]=0;
+  a[1032]=0;
+  a[1033]=0;
+  a[1034]=0;
+  a[1035]=0;
+  a[1036]=0;
+  a[1037]=0;
+  a[1038]=0;
+  a[1039]=0;
+  a[1040]=0;
+  a[1041]=0;
+  a[1042]=0;
+  a[1043]=0;
+  a[1044]=0;
+  a[1045]=0;
+  a[1046]=0;
+  a[1047]=0;
+  a[1048]=0;
+  a[1049]=0;
+  a[1050]=0;
+  a[1051]=0;
+  a[1052]=0;
+  a[1053]=0;
+  a[1054]=0;
+  a[1055]=0;
+  a[1056]=0;
+  a[1057]=0;
+  a[1058]=0;
+  a[1059]=0;
+  a[1060]=0;
+  a[1061]=0;
+  a[1062]=0;
+  a[1063]=0;
+  a[1064]=0;
+  a[1065]=0;
+  a[1066]=0;
+  a[1067]=0;
+  a[1068]=0;
+  a[1069]=0;
+  a[1070]=0;
+  a[1071]=0;
+  a[1072]=0;
+  a[1073]=0;
+  a[1074]=0;
+  a[1075]=0;
+  a[1076]=0;
+  a[1077]=0;
+  a[1078]=0;
+  a[1079]=0;
+  a[1080]=0;
+  a[1081]=0;
+  a[1082]=0;
+  a[1083]=0;
+  a[1084]=0;
+  a[1085]=0;
+  a[1086]=0;
+  a[1087]=0;
+  a[1088]=0;
+  a[1089]=0;
+  a[1090]=0;
+  a[1091]=0;
+  a[1092]=0;
+  a[1093]=0;
+  a[1094]=0;
+  a[1095]=0;
+  a[1096]=0;
+  a[1097]=0;
+  a[1098]=0;
+  a[1099]=0;
+  a[1100]=0;
+  a[1101]=0;
+  a[1102]=0;
+  a[1103]=0;
+  a[1104]=0;
+  a[1105]=0;
+  a[1106]=0;
+  a[1107]=0;
+  a[1108]=0;
+  a[1109]=0;
+  a[1110]=0;
+  a[1111]=0;
+  a[1112]=0;
+  a[1113]=0;
+  a[1114]=0;
+  a[1115]=0;
+  a[1116]=0;
+  a[1117]=0;
+  a[1118]=0;
+  a[1119]=0;
+  a[1120]=0;
+  a[1121]=0;
+  a[1122]=0;
+  a[1123]=0;
+  a[1124]=0;
+  a[1125]=0;
+  a[1126]=0;
+  a[1127]=0;
+  a[1128]=0;
+  a[1129]=0;
+  a[1130]=0;
+  a[1131]=0;
+  a[1132]=0;
+  a[1133]=0;
+  a[1134]=0;
+  a[1135]=0;
+  a[1136]=0;
+  a[1137]=0;
+  a[1138]=0;
+  a[1139]=0;
+  a[1140]=0;
+  a[1141]=0;
+  a[1142]=0;
+  a[1143]=0;
+  a[1144]=0;
+  a[1145]=0;
+  a[1146]=0;
+  a[1147]=0;
+  a[1148]=0;
+  a[1149]=0;
+  a[1150]=0;
+  a[1151]=0;
+  a[1152]=0;
+  a[1153]=0;
+  a[1154]=0;
+  a[1155]=0;
+  a[1156]=0;
+  a[1157]=0;
+  a[1158]=0;
+  a[1159]=0;
+  a[1160]=0;
+  a[1161]=0;
+  a[1162]=0;
+  a[1163]=0;
+  a[1164]=0;
+  a[1165]=0;
+  a[1166]=0;
+  a[1167]=0;
+  a[1168]=0;
+  a[1169]=0;
+  a[1170]=0;
+  a[1171]=0;
+  a[1172]=0;
+  a[1173]=0;
+  a[1174]=0;
+  a[1175]=0;
+  a[1176]=0;
+  a[1177]=0;
+  a[1178]=0;
+  a[1179]=0;
+  a[1180]=0;
+  a[1181]=0;
+  a[1182]=0;
+  a[1183]=0;
+  a[1184]=0;
+  a[1185]=0;
+  a[1186]=0;
+  a[1187]=0;
+  a[1188]=0;
+  a[1189]=0;
+  a[1190]=0;
+  a[1191]=0;
+  a[1192]=0;
+  a[1193]=0;
+  a[1194]=0;
+  a[1195]=0;
+  a[1196]=0;
+  a[1197]=0;
+  a[1198]=0;
+  a[1199]=0;
+  a[1200]=0;
+  a[1201]=0;
+  a[1202]=0;
+  a[1203]=0;
+  a[1204]=0;
+  a[1205]=0;
+  a[1206]=0;
+  a[1207]=0;
+  a[1208]=0;
+  a[1209]=0;
+  a[1210]=0;
+  a[1211]=0;
+  a[1212]=0;
+  a[1213]=0;
+  a[1214]=0;
+  a[1215]=0;
+  a[1216]=0;
+  a[1217]=0;
+  a[1218]=0;
+  a[1219]=0;
+  a[1220]=0;
+  a[1221]=0;
+  a[1222]=0;
+  a[1223]=0;
+  a[1224]=0;
+  a[1225]=0;
+  a[1226]=0;
+  a[1227]=0;
+  a[1228]=0;
+  a[1229]=0;
+  a[1230]=0;
+  a[1231]=0;
+  a[1232]=0;
+  a[1233]=0;
+  a[1234]=0;
+  a[1235]=0;
+  a[1236]=0;
+  a[1237]=0;
+  a[1238]=0;
+  a[1239]=0;
+  a[1240]=0;
+  a[1241]=0;
+  a[1242]=0;
+  a[1243]=0;
+  a[1244]=0;
+  a[1245]=0;
+  a[1246]=0;
+  a[1247]=0;
+  a[1248]=0;
+  a[1249]=0;
+  a[1250]=0;
+  a[1251]=0;
+  a[1252]=0;
+  a[1253]=0;
+  a[1254]=0;
+  a[1255]=0;
+  a[1256]=0;
+  a[1257]=0;
+  a[1258]=0;
+  a[1259]=0;
+  a[1260]=0;
+  a[1261]=0;
+  a[1262]=0;
+  a[1263]=0;
+  a[1264]=0;
+  a[1265]=0;
+  a[1266]=0;
+  a[1267]=0;
+  a[1268]=0;
+  a[1269]=0;
+  a[1270]=0;
+  a[1271]=0;
+  a[1272]=0;
+  a[1273]=0;
+  a[1274]=0;
+  a[1275]=0;
+  a[1276]=0;
+  a[1277]=0;
+  a[1278]=0;
+  a[1279]=0;
+  a[1280]=0;
+  a[1281]=0;
+  a[1282]=0;
+  a[1283]=0;
+  a[1284]=0;
+  a[1285]=0;
+  a[1286]=0;
+  a[1287]=0;
+  a[1288]=0;
+  a[1289]=0;
+  a[1290]=0;
+  a[1291]=0;
+  a[1292]=0;
+  a[1293]=0;
+  a[1294]=0;
+  a[1295]=0;
+  a[1296]=0;
+  a[1297]=0;
+  a[1298]=0;
+  a[1299]=0;
+  a[1300]=0;
+  a[1301]=0;
+  a[1302]=0;
+  a[1303]=0;
+  a[1304]=0;
+  a[1305]=0;
+  a[1306]=0;
+  a[1307]=0;
+  a[1308]=0;
+  a[1309]=0;
+  a[1310]=0;
+  a[1311]=0;
+  a[1312]=0;
+  a[1313]=0;
+  a[1314]=0;
+  a[1315]=0;
+  a[1316]=0;
+  a[1317]=0;
+  a[1318]=0;
+  a[1319]=0;
+  a[1320]=0;
+  a[1321]=0;
+  a[1322]=0;
+  a[1323]=0;
+  a[1324]=0;
+  a[1325]=0;
+  a[1326]=0;
+  a[1327]=0;
+  a[1328]=0;
+  a[1329]=0;
+  a[1330]=0;
+  a[1331]=0;
+  a[1332]=0;
+  a[1333]=0;
+  a[1334]=0;
+  a[1335]=0;
+  a[1336]=0;
+  a[1337]=0;
+  a[1338]=0;
+  a[1339]=0;
+  a[1340]=0;
+  a[1341]=0;
+  a[1342]=0;
+  a[1343]=0;
+  a[1344]=0;
+  a[1345]=0;
+  a[1346]=0;
+  a[1347]=0;
+  a[1348]=0;
+  a[1349]=0;
+  a[1350]=0;
+  a[1351]=0;
+  a[1352]=0;
+  a[1353]=0;
+  a[1354]=0;
+  a[1355]=0;
+  a[1356]=0;
+  a[1357]=0;
+  a[1358]=0;
+  a[1359]=0;
+  a[1360]=0;
+  a[1361]=0;
+  a[1362]=0;
+  a[1363]=0;
+  a[1364]=0;
+  a[1365]=0;
+  a[1366]=0;
+  a[1367]=0;
+  a[1368]=0;
+  a[1369]=0;
+  a[1370]=0;
+  a[1371]=0;
+  a[1372]=0;
+  a[1373]=0;
+  a[1374]=0;
+  a[1375]=0;
+  a[1376]=0;
+  a[1377]=0;
+  a[1378]=0;
+  a[1379]=0;
+  a[1380]=0;
+  a[1381]=0;
+  a[1382]=0;
+  a[1383]=0;
+  a[1384]=0;
+  a[1385]=0;
+  a[1386]=0;
+  a[1387]=0;
+  a[1388]=0;
+  a[1389]=0;
+  a[1390]=0;
+  a[1391]=0;
+  a[1392]=0;
+  a[1393]=0;
+  a[1394]=0;
+  a[1395]=0;
+  a[1396]=0;
+  a[1397]=0;
+  a[1398]=0;
+  a[1399]=0;
+  a[1400]=0;
+  a[1401]=0;
+  a[1402]=0;
+  a[1403]=0;
+  a[1404]=0;
+  a[1405]=0;
+  a[1406]=0;
+  a[1407]=0;
+  a[1408]=0;
+  a[1409]=0;
+  a[1410]=0;
+  a[1411]=0;
+  a[1412]=0;
+  a[1413]=0;
+  a[1414]=0;
+  a[1415]=0;
+  a[1416]=0;
+  a[1417]=0;
+  a[1418]=0;
+  a[1419]=0;
+  a[1420]=0;
+  a[1421]=0;
+  a[1422]=0;
+  a[1423]=0;
+  a[1424]=0;
+  a[1425]=0;
+  a[1426]=0;
+  a[1427]=0;
+  a[1428]=0;
+  a[1429]=0;
+  a[1430]=0;
+  a[1431]=0;
+  a[1432]=0;
+  a[1433]=0;
+  a[1434]=0;
+  a[1435]=0;
+  a[1436]=0;
+  a[1437]=0;
+  a[1438]=0;
+  a[1439]=0;
+  a[1440]=0;
+  a[1441]=0;
+  a[1442]=0;
+  a[1443]=0;
+  a[1444]=0;
+  a[1445]=0;
+  a[1446]=0;
+  a[1447]=0;
+  a[1448]=0;
+  a[1449]=0;
+  a[1450]=0;
+  a[1451]=0;
+  a[1452]=0;
+  a[1453]=0;
+  a[1454]=0;
+  a[1455]=0;
+  a[1456]=0;
+  a[1457]=0;
+  a[1458]=0;
+  a[1459]=0;
+  a[1460]=0;
+  a[1461]=0;
+  a[1462]=0;
+  a[1463]=0;
+  a[1464]=0;
+  a[1465]=0;
+  a[1466]=0;
+  a[1467]=0;
+  a[1468]=0;
+  a[1469]=0;
+  a[1470]=0;
+  a[1471]=0;
+  a[1472]=0;
+  a[1473]=0;
+  a[1474]=0;
+  a[1475]=0;
+  a[1476]=0;
+  a[1477]=0;
+  a[1478]=0;
+  a[1479]=0;
+  a[1480]=0;
+  a[1481]=0;
+  a[1482]=0;
+  a[1483]=0;
+  a[1484]=0;
+  a[1485]=0;
+  a[1486]=0;
+  a[1487]=0;
+  a[1488]=0;
+  a[1489]=0;
+  a[1490]=0;
+  a[1491]=0;
+  a[1492]=0;
+  a[1493]=0;
+  a[1494]=0;
+  a[1495]=0;
+  a[1496]=0;
+  a[1497]=0;
+  a[1498]=0;
+  a[1499]=0;
+  a[1500]=0;
+  a[1501]=0;
+  a[1502]=0;
+  a[1503]=0;
+  a[1504]=0;
+  a[1505]=0;
+  a[1506]=0;
+  a[1507]=0;
+  a[1508]=0;
+  a[1509]=0;
+  a[1510]=0;
+  a[1511]=0;
+  a[1512]=0;
+  a[1513]=0;
+  a[1514]=0;
+  a[1515]=0;
+  a[1516]=0;
+  a[1517]=0;
+  a[1518]=0;
+  a[1519]=0;
+  a[1520]=0;
+  a[1521]=0;
+  a[1522]=0;
+  a[1523]=0;
+  a[1524]=0;
+  a[1525]=0;
+  a[1526]=0;
+  a[1527]=0;
+  a[1528]=0;
+  a[1529]=0;
+  a[1530]=0;
+  a[1531]=0;
+  a[1532]=0;
+  a[1533]=0;
+  a[1534]=0;
+  a[1535]=0;
+  a[1536]=0;
+  a[1537]=0;
+  a[1538]=0;
+  a[1539]=0;
+  a[1540]=0;
+  a[1541]=0;
+  a[1542]=0;
+  a[1543]=0;
+  a[1544]=0;
+  a[1545]=0;
+  a[1546]=0;
+  a[1547]=0;
+  a[1548]=0;
+  a[1549]=0;
+  a[1550]=0;
+  a[1551]=0;
+  a[1552]=0;
+  a[1553]=0;
+  a[1554]=0;
+  a[1555]=0;
+  a[1556]=0;
+  a[1557]=0;
+  a[1558]=0;
+  a[1559]=0;
+  a[1560]=0;
+  a[1561]=0;
+  a[1562]=0;
+  a[1563]=0;
+  a[1564]=0;
+  a[1565]=0;
+  a[1566]=0;
+  a[1567]=0;
+  a[1568]=0;
+  a[1569]=0;
+  a[1570]=0;
+  a[1571]=0;
+  a[1572]=0;
+  a[1573]=0;
+  a[1574]=0;
+  a[1575]=0;
+  a[1576]=0;
+  a[1577]=0;
+  a[1578]=0;
+  a[1579]=0;
+  a[1580]=0;
+  a[1581]=0;
+  a[1582]=0;
+  a[1583]=0;
+  a[1584]=0;
+  a[1585]=0;
+  a[1586]=0;
+  a[1587]=0;
+  a[1588]=0;
+  a[1589]=0;
+  a[1590]=0;
+  a[1591]=0;
+  a[1592]=0;
+  a[1593]=0;
+  a[1594]=0;
+  a[1595]=0;
+  a[1596]=0;
+  a[1597]=0;
+  a[1598]=0;
+  a[1599]=0;
+  a[1600]=0;
+  a[1601]=0;
+  a[1602]=0;
+  a[1603]=0;
+  a[1604]=0;
+  a[1605]=0;
+  a[1606]=0;
+  a[1607]=0;
+  a[1608]=0;
+  a[1609]=0;
+  a[1610]=0;
+  a[1611]=0;
+  a[1612]=0;
+  a[1613]=0;
+  a[1614]=0;
+  a[1615]=0;
+  a[1616]=0;
+  a[1617]=0;
+  a[1618]=0;
+  a[1619]=0;
+  a[1620]=0;
+  a[1621]=0;
+  a[1622]=0;
+  a[1623]=0;
+  a[1624]=0;
+  a[1625]=0;
+  a[1626]=0;
+  a[1627]=0;
+  a[1628]=0;
+  a[1629]=0;
+  a[1630]=0;
+  a[1631]=0;
+  a[1632]=0;
+  a[1633]=0;
+  a[1634]=0;
+  a[1635]=0;
+  a[1636]=0;
+  a[1637]=0;
+  a[1638]=0;
+  a[1639]=0;
+  a[1640]=0;
+  a[1641]=0;
+  a[1642]=0;
+  a[1643]=0;
+  a[1644]=0;
+  a[1645]=0;
+  a[1646]=0;
+  a[1647]=0;
+  a[1648]=0;
+  a[1649]=0;
+  a[1650]=0;
+  a[1651]=0;
+  a[1652]=0;
+  a[1653]=0;
+  a[1654]=0;
+  a[1655]=0;
+  a[1656]=0;
+  a[1657]=0;
+  a[1658]=0;
+  a[1659]=0;
+  a[1660]=0;
+  a[1661]=0;
+  a[1662]=0;
+  a[1663]=0;
+  a[1664]=0;
+  a[1665]=0;
+  a[1666]=0;
+  a[1667]=0;
+  a[1668]=0;
+  a[1669]=0;
+  a[1670]=0;
+  a[1671]=0;
+  a[1672]=0;
+  a[1673]=0;
+  a[1674]=0;
+  a[1675]=0;
+  a[1676]=0;
+  a[1677]=0;
+  a[1678]=0;
+  a[1679]=0;
+  a[1680]=0;
+  a[1681]=0;
+  a[1682]=0;
+  a[1683]=0;
+  a[1684]=0;
+  a[1685]=0;
+  a[1686]=0;
+  a[1687]=0;
+  a[1688]=0;
+  a[1689]=0;
+  a[1690]=0;
+  a[1691]=0;
+  a[1692]=0;
+  a[1693]=0;
+  a[1694]=0;
+  a[1695]=0;
+  a[1696]=0;
+  a[1697]=0;
+  a[1698]=0;
+  a[1699]=0;
+  a[1700]=0;
+  a[1701]=0;
+  a[1702]=0;
+  a[1703]=0;
+  a[1704]=0;
+  a[1705]=0;
+  a[1706]=0;
+  a[1707]=0;
+  a[1708]=0;
+  a[1709]=0;
+  a[1710]=0;
+  a[1711]=0;
+  a[1712]=0;
+  a[1713]=0;
+  a[1714]=0;
+  a[1715]=0;
+  a[1716]=0;
+  a[1717]=0;
+  a[1718]=0;
+  a[1719]=0;
+  a[1720]=0;
+  a[1721]=0;
+  a[1722]=0;
+  a[1723]=0;
+  a[1724]=0;
+  a[1725]=0;
+  a[1726]=0;
+  a[1727]=0;
+  a[1728]=0;
+  a[1729]=0;
+  a[1730]=0;
+  a[1731]=0;
+  a[1732]=0;
+  a[1733]=0;
+  a[1734]=0;
+  a[1735]=0;
+  a[1736]=0;
+  a[1737]=0;
+  a[1738]=0;
+  a[1739]=0;
+  a[1740]=0;
+  a[1741]=0;
+  a[1742]=0;
+  a[1743]=0;
+  a[1744]=0;
+  a[1745]=0;
+  a[1746]=0;
+  a[1747]=0;
+  a[1748]=0;
+  a[1749]=0;
+  a[1750]=0;
+  a[1751]=0;
+  a[1752]=0;
+  a[1753]=0;
+  a[1754]=0;
+  a[1755]=0;
+  a[1756]=0;
+  a[1757]=0;
+  a[1758]=0;
+  a[1759]=0;
+  a[1760]=0;
+  a[1761]=0;
+  a[1762]=0;
+  a[1763]=0;
+  a[1764]=0;
+  a[1765]=0;
+  a[1766]=0;
+  a[1767]=0;
+  a[1768]=0;
+  a[1769]=0;
+  a[1770]=0;
+  a[1771]=0;
+  a[1772]=0;
+  a[1773]=0;
+  a[1774]=0;
+  a[1775]=0;
+  a[1776]=0;
+  a[1777]=0;
+  a[1778]=0;
+  a[1779]=0;
+  a[1780]=0;
+  a[1781]=0;
+  a[1782]=0;
+  a[1783]=0;
+  a[1784]=0;
+  a[1785]=0;
+  a[1786]=0;
+  a[1787]=0;
+  a[1788]=0;
+  a[1789]=0;
+  a[1790]=0;
+  a[1791]=0;
+  a[1792]=0;
+  a[1793]=0;
+  a[1794]=0;
+  a[1795]=0;
+  a[1796]=0;
+  a[1797]=0;
+  a[1798]=0;
+  a[1799]=0;
+  a[1800]=0;
+  a[1801]=0;
+  a[1802]=0;
+  a[1803]=0;
+  a[1804]=0;
+  a[1805]=0;
+  a[1806]=0;
+  a[1807]=0;
+  a[1808]=0;
+  a[1809]=0;
+  a[1810]=0;
+  a[1811]=0;
+  a[1812]=0;
+  a[1813]=0;
+  a[1814]=0;
+  a[1815]=0;
+  a[1816]=0;
+  a[1817]=0;
+  a[1818]=0;
+  a[1819]=0;
+  a[1820]=0;
+  a[1821]=0;
+  a[1822]=0;
+  a[1823]=0;
+  a[1824]=0;
+  a[1825]=0;
+  a[1826]=0;
+  a[1827]=0;
+  a[1828]=0;
+  a[1829]=0;
+  a[1830]=0;
+  a[1831]=0;
+  a[1832]=0;
+  a[1833]=0;
+  a[1834]=0;
+  a[1835]=0;
+  a[1836]=0;
+  a[1837]=0;
+  a[1838]=0;
+  a[1839]=0;
+  a[1840]=0;
+  a[1841]=0;
+  a[1842]=0;
+  a[1843]=0;
+  a[1844]=0;
+  a[1845]=0;
+  a[1846]=0;
+  a[1847]=0;
+  a[1848]=0;
+  a[1849]=0;
+  a[1850]=0;
+  a[1851]=0;
+  a[1852]=0;
+  a[1853]=0;
+  a[1854]=0;
+  a[1855]=0;
+  a[1856]=0;
+  a[1857]=0;
+  a[1858]=0;
+  a[1859]=0;
+  a[1860]=0;
+  a[1861]=0;
+  a[1862]=0;
+  a[1863]=0;
+  a[1864]=0;
+  a[1865]=0;
+  a[1866]=0;
+  a[1867]=0;
+  a[1868]=0;
+  a[1869]=0;
+  a[1870]=0;
+  a[1871]=0;
+  a[1872]=0;
+  a[1873]=0;
+  a[1874]=0;
+  a[1875]=0;
+  a[1876]=0;
+  a[1877]=0;
+  a[1878]=0;
+  a[1879]=0;
+  a[1880]=0;
+  a[1881]=0;
+  a[1882]=0;
+  a[1883]=0;
+  a[1884]=0;
+  a[1885]=0;
+  a[1886]=0;
+  a[1887]=0;
+  a[1888]=0;
+  a[1889]=0;
+  a[1890]=0;
+  a[1891]=0;
+  a[1892]=0;
+  a[1893]=0;
+  a[1894]=0;
+  a[1895]=0;
+  a[1896]=0;
+  a[1897]=0;
+  a[1898]=0;
+  a[1899]=0;
+  a[1900]=0;
+  a[1901]=0;
+  a[1902]=0;
+  a[1903]=0;
+  a[1904]=0;
+  a[1905]=0;
+  a[1906]=0;
+  a[1907]=0;
+  a[1908]=0;
+  a[1909]=0;
+  a[1910]=0;
+  a[1911]=0;
+  a[1912]=0;
+  a[1913]=0;
+  a[1914]=0;
+  a[1915]=0;
+  a[1916]=0;
+  a[1917]=0;
+  a[1918]=0;
+  a[1919]=0;
+  a[1920]=0;
+  a[1921]=0;
+  a[1922]=0;
+  a[1923]=0;
+  a[1924]=0;
+  a[1925]=0;
+  a[1926]=0;
+  a[1927]=0;
+  a[1928]=0;
+  a[1929]=0;
+  a[1930]=0;
+  a[1931]=0;
+  a[1932]=0;
+  a[1933]=0;
+  a[1934]=0;
+  a[1935]=0;
+  a[1936]=0;
+  a[1937]=0;
+  a[1938]=0;
+  a[1939]=0;
+  a[1940]=0;
+  a[1941]=0;
+  a[1942]=0;
+  a[1943]=0;
+  a[1944]=0;
+  a[1945]=0;
+  a[1946]=0;
+  a[1947]=0;
+  a[1948]=0;
+  a[1949]=0;
+  a[1950]=0;
+  a[1951]=0;
+  a[1952]=0;
+  a[1953]=0;
+  a[1954]=0;
+  a[1955]=0;
+  a[1956]=0;
+  a[1957]=0;
+  a[1958]=0;
+  a[1959]=0;
+  a[1960]=0;
+  a[1961]=0;
+  a[1962]=0;
+  a[1963]=0;
+  a[1964]=0;
+  a[1965]=0;
+  a[1966]=0;
+  a[1967]=0;
+  a[1968]=0;
+  a[1969]=0;
+  a[1970]=0;
+  a[1971]=0;
+  a[1972]=0;
+  a[1973]=0;
+  a[1974]=0;
+  a[1975]=0;
+  a[1976]=0;
+  a[1977]=0;
+  a[1978]=0;
+  a[1979]=0;
+  a[1980]=0;
+  a[1981]=0;
+  a[1982]=0;
+  a[1983]=0;
+  a[1984]=0;
+  a[1985]=0;
+  a[1986]=0;
+  a[1987]=0;
+  a[1988]=0;
+  a[1989]=0;
+  a[1990]=0;
+  a[1991]=0;
+  a[1992]=0;
+  a[1993]=0;
+  a[1994]=0;
+  a[1995]=0;
+  a[1996]=0;
+  a[1997]=0;
+  a[1998]=0;
+  a[1999]=0;
+  a[2000]=0;
+  a[2001]=0;
+  a[2002]=0;
+  a[2003]=0;
+  a[2004]=0;
+  a[2005]=0;
+  a[2006]=0;
+  a[2007]=0;
+  a[2008]=0;
+  a[2009]=0;
+  a[2010]=0;
+  a[2011]=0;
+  a[2012]=0;
+  a[2013]=0;
+  a[2014]=0;
+  a[2015]=0;
+  a[2016]=0;
+  a[2017]=0;
+  a[2018]=0;
+  a[2019]=0;
+  a[2020]=0;
+  a[2021]=0;
+  a[2022]=0;
+  a[2023]=0;
+  a[2024]=0;
+  a[2025]=0;
+  a[2026]=0;
+  a[2027]=0;
+  a[2028]=0;
+  a[2029]=0;
+  a[2030]=0;
+  a[2031]=0;
+  a[2032]=0;
+  a[2033]=0;
+  a[2034]=0;
+  a[2035]=0;
+  a[2036]=0;
+  a[2037]=0;
+  a[2038]=0;
+  a[2039]=0;
+  a[2040]=0;
+  a[2041]=0;
+  a[2042]=0;
+  a[2043]=0;
+  a[2044]=0;
+  a[2045]=0;
+  a[2046]=0;
+  a[2047]=0;
+  a[2048]=0;
+  a[2049]=0;
+  a[2050]=0;
+  a[2051]=0;
+  a[2052]=0;
+  a[2053]=0;
+  a[2054]=0;
+  a[2055]=0;
+  a[2056]=0;
+  a[2057]=0;
+  a[2058]=0;
+  a[2059]=0;
+  a[2060]=0;
+  a[2061]=0;
+  a[2062]=0;
+  a[2063]=0;
+  a[2064]=0;
+  a[2065]=0;
+  a[2066]=0;
+  a[2067]=0;
+  a[2068]=0;
+  a[2069]=0;
+  a[2070]=0;
+  a[2071]=0;
+  a[2072]=0;
+  a[2073]=0;
+  a[2074]=0;
+  a[2075]=0;
+  a[2076]=0;
+  a[2077]=0;
+  a[2078]=0;
+  a[2079]=0;
+  a[2080]=0;
+  a[2081]=0;
+  a[2082]=0;
+  a[2083]=0;
+  a[2084]=0;
+  a[2085]=0;
+  a[2086]=0;
+  a[2087]=0;
+  a[2088]=0;
+  a[2089]=0;
+  a[2090]=0;
+  a[2091]=0;
+  a[2092]=0;
+  a[2093]=0;
+  a[2094]=0;
+  a[2095]=0;
+  a[2096]=0;
+  a[2097]=0;
+  a[2098]=0;
+  a[2099]=0;
+  a[2100]=0;
+  a[2101]=0;
+  a[2102]=0;
+  a[2103]=0;
+  a[2104]=0;
+  a[2105]=0;
+  a[2106]=0;
+  a[2107]=0;
+  a[2108]=0;
+  a[2109]=0;
+  a[2110]=0;
+  a[2111]=0;
+  a[2112]=0;
+  a[2113]=0;
+  a[2114]=0;
+  a[2115]=0;
+  a[2116]=0;
+  a[2117]=0;
+  a[2118]=0;
+  a[2119]=0;
+  a[2120]=0;
+  a[2121]=0;
+  a[2122]=0;
+  a[2123]=0;
+  a[2124]=0;
+  a[2125]=0;
+  a[2126]=0;
+  a[2127]=0;
+  a[2128]=0;
+  a[2129]=0;
+  a[2130]=0;
+  a[2131]=0;
+  a[2132]=0;
+  a[2133]=0;
+  a[2134]=0;
+  a[2135]=0;
+  a[2136]=0;
+  a[2137]=0;
+  a[2138]=0;
+  a[2139]=0;
+  a[2140]=0;
+  a[2141]=0;
+  a[2142]=0;
+  a[2143]=0;
+  a[2144]=0;
+  a[2145]=0;
+  a[2146]=0;
+  a[2147]=0;
+  a[2148]=0;
+  a[2149]=0;
+  a[2150]=0;
+  a[2151]=0;
+  a[2152]=0;
+  a[2153]=0;
+  a[2154]=0;
+  a[2155]=0;
+  a[2156]=0;
+  a[2157]=0;
+  a[2158]=0;
+  a[2159]=0;
+  a[2160]=0;
+  a[2161]=0;
+  a[2162]=0;
+  a[2163]=0;
+  a[2164]=0;
+  a[2165]=0;
+  a[2166]=0;
+  a[2167]=0;
+  a[2168]=0;
+  a[2169]=0;
+  a[2170]=0;
+  a[2171]=0;
+  a[2172]=0;
+  a[2173]=0;
+  a[2174]=0;
+  a[2175]=0;
+  a[2176]=0;
+  a[2177]=0;
+  a[2178]=0;
+  a[2179]=0;
+  a[2180]=0;
+  a[2181]=0;
+  a[2182]=0;
+  a[2183]=0;
+  a[2184]=0;
+  a[2185]=0;
+  a[2186]=0;
+  a[2187]=0;
+  a[2188]=0;
+  a[2189]=0;
+  a[2190]=0;
+  a[2191]=0;
+  a[2192]=0;
+  a[2193]=0;
+  a[2194]=0;
+  a[2195]=0;
+  a[2196]=0;
+  a[2197]=0;
+  a[2198]=0;
+  a[2199]=0;
+  a[2200]=0;
+  a[2201]=0;
+  a[2202]=0;
+  a[2203]=0;
+  a[2204]=0;
+  a[2205]=0;
+  a[2206]=0;
+  a[2207]=0;
+  a[2208]=0;
+  a[2209]=0;
+  a[2210]=0;
+  a[2211]=0;
+  a[2212]=0;
+  a[2213]=0;
+  a[2214]=0;
+  a[2215]=0;
+  a[2216]=0;
+  a[2217]=0;
+  a[2218]=0;
+  a[2219]=0;
+  a[2220]=0;
+  a[2221]=0;
+  a[2222]=0;
+  a[2223]=0;
+  a[2224]=0;
+  a[2225]=0;
+  a[2226]=0;
+  a[2227]=0;
+  a[2228]=0;
+  a[2229]=0;
+  a[2230]=0;
+  a[2231]=0;
+  a[2232]=0;
+  a[2233]=0;
+  a[2234]=0;
+  a[2235]=0;
+  a[2236]=0;
+  a[2237]=0;
+  a[2238]=0;
+  a[2239]=0;
+  a[2240]=0;
+  a[2241]=0;
+  a[2242]=0;
+  a[2243]=0;
+  a[2244]=0;
+  a[2245]=0;
+  a[2246]=0;
+  a[2247]=0;
+  a[2248]=0;
+  a[2249]=0;
+  a[2250]=0;
+  a[2251]=0;
+  a[2252]=0;
+  a[2253]=0;
+  a[2254]=0;
+  a[2255]=0;
+  a[2256]=0;
+  a[2257]=0;
+  a[2258]=0;
+  a[2259]=0;
+  a[2260]=0;
+  a[2261]=0;
+  a[2262]=0;
+  a[2263]=0;
+  a[2264]=0;
+  a[2265]=0;
+  a[2266]=0;
+  a[2267]=0;
+  a[2268]=0;
+  a[2269]=0;
+  a[2270]=0;
+  a[2271]=0;
+  a[2272]=0;
+  a[2273]=0;
+  a[2274]=0;
+  a[2275]=0;
+  a[2276]=0;
+  a[2277]=0;
+  a[2278]=0;
+  a[2279]=0;
+  a[2280]=0;
+  a[2281]=0;
+  a[2282]=0;
+  a[2283]=0;
+  a[2284]=0;
+  a[2285]=0;
+  a[2286]=0;
+  a[2287]=0;
+  a[2288]=0;
+  a[2289]=0;
+  a[2290]=0;
+  a[2291]=0;
+  a[2292]=0;
+  a[2293]=0;
+  a[2294]=0;
+  a[2295]=0;
+  a[2296]=0;
+  a[2297]=0;
+  a[2298]=0;
+  a[2299]=0;
+  a[2300]=0;
+  a[2301]=0;
+  a[2302]=0;
+  a[2303]=0;
+  a[2304]=0;
+  a[2305]=0;
+  a[2306]=0;
+  a[2307]=0;
+  a[2308]=0;
+  a[2309]=0;
+  a[2310]=0;
+  a[2311]=0;
+  a[2312]=0;
+  a[2313]=0;
+  a[2314]=0;
+  a[2315]=0;
+  a[2316]=0;
+  a[2317]=0;
+  a[2318]=0;
+  a[2319]=0;
+  a[2320]=0;
+  a[2321]=0;
+  a[2322]=0;
+  a[2323]=0;
+  a[2324]=0;
+  a[2325]=0;
+  a[2326]=0;
+  a[2327]=0;
+  a[2328]=0;
+  a[2329]=0;
+  a[2330]=0;
+  a[2331]=0;
+  a[2332]=0;
+  a[2333]=0;
+  a[2334]=0;
+  a[2335]=0;
+  a[2336]=0;
+  a[2337]=0;
+  a[2338]=0;
+  a[2339]=0;
+  a[2340]=0;
+  a[2341]=0;
+  a[2342]=0;
+  a[2343]=0;
+  a[2344]=0;
+  a[2345]=0;
+  a[2346]=0;
+  a[2347]=0;
+  a[2348]=0;
+  a[2349]=0;
+  a[2350]=0;
+  a[2351]=0;
+  a[2352]=0;
+  a[2353]=0;
+  a[2354]=0;
+  a[2355]=0;
+  a[2356]=0;
+  a[2357]=0;
+  a[2358]=0;
+  a[2359]=0;
+  a[2360]=0;
+  a[2361]=0;
+  a[2362]=0;
+  a[2363]=0;
+  a[2364]=0;
+  a[2365]=0;
+  a[2366]=0;
+  a[2367]=0;
+  a[2368]=0;
+  a[2369]=0;
+  a[2370]=0;
+  a[2371]=0;
+  a[2372]=0;
+  a[2373]=0;
+  a[2374]=0;
+  a[2375]=0;
+  a[2376]=0;
+  a[2377]=0;
+  a[2378]=0;
+  a[2379]=0;
+  a[2380]=0;
+  a[2381]=0;
+  a[2382]=0;
+  a[2383]=0;
+  a[2384]=0;
+  a[2385]=0;
+  a[2386]=0;
+  a[2387]=0;
+  a[2388]=0;
+  a[2389]=0;
+  a[2390]=0;
+  a[2391]=0;
+  a[2392]=0;
+  a[2393]=0;
+  a[2394]=0;
+  a[2395]=0;
+  a[2396]=0;
+  a[2397]=0;
+  a[2398]=0;
+  a[2399]=0;
+  a[2400]=0;
+  a[2401]=0;
+  a[2402]=0;
+  a[2403]=0;
+  a[2404]=0;
+  a[2405]=0;
+  a[2406]=0;
+  a[2407]=0;
+  a[2408]=0;
+  a[2409]=0;
+  a[2410]=0;
+  a[2411]=0;
+  a[2412]=0;
+  a[2413]=0;
+  a[2414]=0;
+  a[2415]=0;
+  a[2416]=0;
+  a[2417]=0;
+  a[2418]=0;
+  a[2419]=0;
+  a[2420]=0;
+  a[2421]=0;
+  a[2422]=0;
+  a[2423]=0;
+  a[2424]=0;
+  a[2425]=0;
+  a[2426]=0;
+  a[2427]=0;
+  a[2428]=0;
+  a[2429]=0;
+  a[2430]=0;
+  a[2431]=0;
+  a[2432]=0;
+  a[2433]=0;
+  a[2434]=0;
+  a[2435]=0;
+  a[2436]=0;
+  a[2437]=0;
+  a[2438]=0;
+  a[2439]=0;
+  a[2440]=0;
+  a[2441]=0;
+  a[2442]=0;
+  a[2443]=0;
+  a[2444]=0;
+  a[2445]=0;
+  a[2446]=0;
+  a[2447]=0;
+  a[2448]=0;
+  a[2449]=0;
+  a[2450]=0;
+  a[2451]=0;
+  a[2452]=0;
+  a[2453]=0;
+  a[2454]=0;
+  a[2455]=0;
+  a[2456]=0;
+  a[2457]=0;
+  a[2458]=0;
+  a[2459]=0;
+  a[2460]=0;
+  a[2461]=0;
+  a[2462]=0;
+  a[2463]=0;
+  a[2464]=0;
+  a[2465]=0;
+  a[2466]=0;
+  a[2467]=0;
+  a[2468]=0;
+  a[2469]=0;
+  a[2470]=0;
+  a[2471]=0;
+  a[2472]=0;
+  a[2473]=0;
+  a[2474]=0;
+  a[2475]=0;
+  a[2476]=0;
+  a[2477]=0;
+  a[2478]=0;
+  a[2479]=0;
+  a[2480]=0;
+  a[2481]=0;
+  a[2482]=0;
+  a[2483]=0;
+  a[2484]=0;
+  a[2485]=0;
+  a[2486]=0;
+  a[2487]=0;
+  a[2488]=0;
+  a[2489]=0;
+  a[2490]=0;
+  a[2491]=0;
+  a[2492]=0;
+  a[2493]=0;
+  a[2494]=0;
+  a[2495]=0;
+  a[2496]=0;
+  a[2497]=0;
+  a[2498]=0;
+  a[2499]=0;
+  a[2500]=0;
+  a[2501]=0;
+  a[2502]=0;
+  a[2503]=0;
+  a[2504]=0;
+  a[2505]=0;
+  a[2506]=0;
+  a[2507]=0;
+  a[2508]=0;
+  a[2509]=0;
+  a[2510]=0;
+  a[2511]=0;
+  a[2512]=0;
+  a[2513]=0;
+  a[2514]=0;
+  a[2515]=0;
+  a[2516]=0;
+  a[2517]=0;
+  a[2518]=0;
+  a[2519]=0;
+  a[2520]=0;
+  a[2521]=0;
+  a[2522]=0;
+  a[2523]=0;
+  a[2524]=0;
+  a[2525]=0;
+  a[2526]=0;
+  a[2527]=0;
+  a[2528]=0;
+  a[2529]=0;
+  a[2530]=0;
+  a[2531]=0;
+  a[2532]=0;
+  a[2533]=0;
+  a[2534]=0;
+  a[2535]=0;
+  a[2536]=0;
+  a[2537]=0;
+  a[2538]=0;
+  a[2539]=0;
+  a[2540]=0;
+  a[2541]=0;
+  a[2542]=0;
+  a[2543]=0;
+  a[2544]=0;
+  a[2545]=0;
+  a[2546]=0;
+  a[2547]=0;
+  a[2548]=0;
+  a[2549]=0;
+  a[2550]=0;
+  a[2551]=0;
+  a[2552]=0;
+  a[2553]=0;
+  a[2554]=0;
+  a[2555]=0;
+  a[2556]=0;
+  a[2557]=0;
+  a[2558]=0;
+  a[2559]=0;
+  a[2560]=0;
+  a[2561]=0;
+  a[2562]=0;
+  a[2563]=0;
+  a[2564]=0;
+  a[2565]=0;
+  a[2566]=0;
+  a[2567]=0;
+  a[2568]=0;
+  a[2569]=0;
+  a[2570]=0;
+  a[2571]=0;
+  a[2572]=0;
+  a[2573]=0;
+  a[2574]=0;
+  a[2575]=0;
+  a[2576]=0;
+  a[2577]=0;
+  a[2578]=0;
+  a[2579]=0;
+  a[2580]=0;
+  a[2581]=0;
+  a[2582]=0;
+  a[2583]=0;
+  a[2584]=0;
+  a[2585]=0;
+  a[2586]=0;
+  a[2587]=0;
+  a[2588]=0;
+  a[2589]=0;
+  a[2590]=0;
+  a[2591]=0;
+  a[2592]=0;
+  a[2593]=0;
+  a[2594]=0;
+  a[2595]=0;
+  a[2596]=0;
+  a[2597]=0;
+  a[2598]=0;
+  a[2599]=0;
+  a[2600]=0;
+  a[2601]=0;
+  a[2602]=0;
+  a[2603]=0;
+  a[2604]=0;
+  a[2605]=0;
+  a[2606]=0;
+  a[2607]=0;
+  a[2608]=0;
+  a[2609]=0;
+  a[2610]=0;
+  a[2611]=0;
+  a[2612]=0;
+  a[2613]=0;
+  a[2614]=0;
+  a[2615]=0;
+  a[2616]=0;
+  a[2617]=0;
+  a[2618]=0;
+  a[2619]=0;
+  a[2620]=0;
+  a[2621]=0;
+  a[2622]=0;
+  a[2623]=0;
+  a[2624]=0;
+  a[2625]=0;
+  a[2626]=0;
+  a[2627]=0;
+  a[2628]=0;
+  a[2629]=0;
+  a[2630]=0;
+  a[2631]=0;
+  a[2632]=0;
+  a[2633]=0;
+  a[2634]=0;
+  a[2635]=0;
+  a[2636]=0;
+  a[2637]=0;
+  a[2638]=0;
+  a[2639]=0;
+  a[2640]=0;
+  a[2641]=0;
+  a[2642]=0;
+  a[2643]=0;
+  a[2644]=0;
+  a[2645]=0;
+  a[2646]=0;
+  a[2647]=0;
+  a[2648]=0;
+  a[2649]=0;
+  a[2650]=0;
+  a[2651]=0;
+  a[2652]=0;
+  a[2653]=0;
+  a[2654]=0;
+  a[2655]=0;
+  a[2656]=0;
+  a[2657]=0;
+  a[2658]=0;
+  a[2659]=0;
+  a[2660]=0;
+  a[2661]=0;
+  a[2662]=0;
+  a[2663]=0;
+  a[2664]=0;
+  a[2665]=0;
+  a[2666]=0;
+  a[2667]=0;
+  a[2668]=0;
+  a[2669]=0;
+  a[2670]=0;
+  a[2671]=0;
+  a[2672]=0;
+  a[2673]=0;
+  a[2674]=0;
+  a[2675]=0;
+  a[2676]=0;
+  a[2677]=0;
+  a[2678]=0;
+  a[2679]=0;
+  a[2680]=0;
+  a[2681]=0;
+  a[2682]=0;
+  a[2683]=0;
+  a[2684]=0;
+  a[2685]=0;
+  a[2686]=0;
+  a[2687]=0;
+  a[2688]=0;
+  a[2689]=0;
+  a[2690]=0;
+  a[2691]=0;
+  a[2692]=0;
+  a[2693]=0;
+  a[2694]=0;
+  a[2695]=0;
+  a[2696]=0;
+  a[2697]=0;
+  a[2698]=0;
+  a[2699]=0;
+  a[2700]=0;
+  a[2701]=0;
+  a[2702]=0;
+  a[2703]=0;
+  a[2704]=0;
+  a[2705]=0;
+  a[2706]=0;
+  a[2707]=0;
+  a[2708]=0;
+  a[2709]=0;
+  a[2710]=0;
+  a[2711]=0;
+  a[2712]=0;
+  a[2713]=0;
+  a[2714]=0;
+  a[2715]=0;
+  a[2716]=0;
+  a[2717]=0;
+  a[2718]=0;
+  a[2719]=0;
+  a[2720]=0;
+  a[2721]=0;
+  a[2722]=0;
+  a[2723]=0;
+  a[2724]=0;
+  a[2725]=0;
+  a[2726]=0;
+  a[2727]=0;
+  a[2728]=0;
+  a[2729]=0;
+  a[2730]=0;
+  a[2731]=0;
+  a[2732]=0;
+  a[2733]=0;
+  a[2734]=0;
+  a[2735]=0;
+  a[2736]=0;
+  a[2737]=0;
+  a[2738]=0;
+  a[2739]=0;
+  a[2740]=0;
+  a[2741]=0;
+  a[2742]=0;
+  a[2743]=0;
+  a[2744]=0;
+  a[2745]=0;
+  a[2746]=0;
+  a[2747]=0;
+  a[2748]=0;
+  a[2749]=0;
+  a[2750]=0;
+  a[2751]=0;
+  a[2752]=0;
+  a[2753]=0;
+  a[2754]=0;
+  a[2755]=0;
+  a[2756]=0;
+  a[2757]=0;
+  a[2758]=0;
+  a[2759]=0;
+  a[2760]=0;
+  a[2761]=0;
+  a[2762]=0;
+  a[2763]=0;
+  a[2764]=0;
+  a[2765]=0;
+  a[2766]=0;
+  a[2767]=0;
+  a[2768]=0;
+  a[2769]=0;
+  a[2770]=0;
+  a[2771]=0;
+  a[2772]=0;
+  a[2773]=0;
+  a[2774]=0;
+  a[2775]=0;
+  a[2776]=0;
+  a[2777]=0;
+  a[2778]=0;
+  a[2779]=0;
+  a[2780]=0;
+  a[2781]=0;
+  a[2782]=0;
+  a[2783]=0;
+  a[2784]=0;
+  a[2785]=0;
+  a[2786]=0;
+  a[2787]=0;
+  a[2788]=0;
+  a[2789]=0;
+  a[2790]=0;
+  a[2791]=0;
+  a[2792]=0;
+  a[2793]=0;
+  a[2794]=0;
+  a[2795]=0;
+  a[2796]=0;
+  a[2797]=0;
+  a[2798]=0;
+  a[2799]=0;
+  a[2800]=0;
+  a[2801]=0;
+  a[2802]=0;
+  a[2803]=0;
+  a[2804]=0;
+  a[2805]=0;
+  a[2806]=0;
+  a[2807]=0;
+  a[2808]=0;
+  a[2809]=0;
+  a[2810]=0;
+  a[2811]=0;
+  a[2812]=0;
+  a[2813]=0;
+  a[2814]=0;
+  a[2815]=0;
+  a[2816]=0;
+  a[2817]=0;
+  a[2818]=0;
+  a[2819]=0;
+  a[2820]=0;
+  a[2821]=0;
+  a[2822]=0;
+  a[2823]=0;
+  a[2824]=0;
+  a[2825]=0;
+  a[2826]=0;
+  a[2827]=0;
+  a[2828]=0;
+  a[2829]=0;
+  a[2830]=0;
+  a[2831]=0;
+  a[2832]=0;
+  a[2833]=0;
+  a[2834]=0;
+  a[2835]=0;
+  a[2836]=0;
+  a[2837]=0;
+  a[2838]=0;
+  a[2839]=0;
+  a[2840]=0;
+  a[2841]=0;
+  a[2842]=0;
+  a[2843]=0;
+  a[2844]=0;
+  a[2845]=0;
+  a[2846]=0;
+  a[2847]=0;
+  a[2848]=0;
+  a[2849]=0;
+  a[2850]=0;
+  a[2851]=0;
+  a[2852]=0;
+  a[2853]=0;
+  a[2854]=0;
+  a[2855]=0;
+  a[2856]=0;
+  a[2857]=0;
+  a[2858]=0;
+  a[2859]=0;
+  a[2860]=0;
+  a[2861]=0;
+  a[2862]=0;
+  a[2863]=0;
+  a[2864]=0;
+  a[2865]=0;
+  a[2866]=0;
+  a[2867]=0;
+  a[2868]=0;
+  a[2869]=0;
+  a[2870]=0;
+  a[2871]=0;
+  a[2872]=0;
+  a[2873]=0;
+  a[2874]=0;
+  a[2875]=0;
+  a[2876]=0;
+  a[2877]=0;
+  a[2878]=0;
+  a[2879]=0;
+  a[2880]=0;
+  a[2881]=0;
+  a[2882]=0;
+  a[2883]=0;
+  a[2884]=0;
+  a[2885]=0;
+  a[2886]=0;
+  a[2887]=0;
+  a[2888]=0;
+  a[2889]=0;
+  a[2890]=0;
+  a[2891]=0;
+  a[2892]=0;
+  a[2893]=0;
+  a[2894]=0;
+  a[2895]=0;
+  a[2896]=0;
+  a[2897]=0;
+  a[2898]=0;
+  a[2899]=0;
+  a[2900]=0;
+  a[2901]=0;
+  a[2902]=0;
+  a[2903]=0;
+  a[2904]=0;
+  a[2905]=0;
+  a[2906]=0;
+  a[2907]=0;
+  a[2908]=0;
+  a[2909]=0;
+  a[2910]=0;
+  a[2911]=0;
+  a[2912]=0;
+  a[2913]=0;
+  a[2914]=0;
+  a[2915]=0;
+  a[2916]=0;
+  a[2917]=0;
+  a[2918]=0;
+  a[2919]=0;
+  a[2920]=0;
+  a[2921]=0;
+  a[2922]=0;
+  a[2923]=0;
+  a[2924]=0;
+  a[2925]=0;
+  a[2926]=0;
+  a[2927]=0;
+  a[2928]=0;
+  a[2929]=0;
+  a[2930]=0;
+  a[2931]=0;
+  a[2932]=0;
+  a[2933]=0;
+  a[2934]=0;
+  a[2935]=0;
+  a[2936]=0;
+  a[2937]=0;
+  a[2938]=0;
+  a[2939]=0;
+  a[2940]=0;
+  a[2941]=0;
+  a[2942]=0;
+  a[2943]=0;
+  a[2944]=0;
+  a[2945]=0;
+  a[2946]=0;
+  a[2947]=0;
+  a[2948]=0;
+  a[2949]=0;
+  a[2950]=0;
+  a[2951]=0;
+  a[2952]=0;
+  a[2953]=0;
+  a[2954]=0;
+  a[2955]=0;
+  a[2956]=0;
+  a[2957]=0;
+  a[2958]=0;
+  a[2959]=0;
+  a[2960]=0;
+  a[2961]=0;
+  a[2962]=0;
+  a[2963]=0;
+  a[2964]=0;
+  a[2965]=0;
+  a[2966]=0;
+  a[2967]=0;
+  a[2968]=0;
+  a[2969]=0;
+  a[2970]=0;
+  a[2971]=0;
+  a[2972]=0;
+  a[2973]=0;
+  a[2974]=0;
+  a[2975]=0;
+  a[2976]=0;
+  a[2977]=0;
+  a[2978]=0;
+  a[2979]=0;
+  a[2980]=0;
+  a[2981]=0;
+  a[2982]=0;
+  a[2983]=0;
+  a[2984]=0;
+  a[2985]=0;
+  a[2986]=0;
+  a[2987]=0;
+  a[2988]=0;
+  a[2989]=0;
+  a[2990]=0;
+  a[2991]=0;
+  a[2992]=0;
+  a[2993]=0;
+  a[2994]=0;
+  a[2995]=0;
+  a[2996]=0;
+  a[2997]=0;
+  a[2998]=0;
+  a[2999]=0;
+  a[3000]=0;
+  a[3001]=0;
+  a[3002]=0;
+  a[3003]=0;
+  a[3004]=0;
+  a[3005]=0;
+  a[3006]=0;
+  a[3007]=0;
+  a[3008]=0;
+  a[3009]=0;
+  a[3010]=0;
+  a[3011]=0;
+  a[3012]=0;
+  a[3013]=0;
+  a[3014]=0;
+  a[3015]=0;
+  a[3016]=0;
+  a[3017]=0;
+  a[3018]=0;
+  a[3019]=0;
+  a[3020]=0;
+  a[3021]=0;
+  a[3022]=0;
+  a[3023]=0;
+  a[3024]=0;
+  a[3025]=0;
+  a[3026]=0;
+  a[3027]=0;
+  a[3028]=0;
+  a[3029]=0;
+  a[3030]=0;
+  a[3031]=0;
+  a[3032]=0;
+  a[3033]=0;
+  a[3034]=0;
+  a[3035]=0;
+  a[3036]=0;
+  a[3037]=0;
+  a[3038]=0;
+  a[3039]=0;
+  a[3040]=0;
+  a[3041]=0;
+  a[3042]=0;
+  a[3043]=0;
+  a[3044]=0;
+  a[3045]=0;
+  a[3046]=0;
+  a[3047]=0;
+  a[3048]=0;
+  a[3049]=0;
+  a[3050]=0;
+  a[3051]=0;
+  a[3052]=0;
+  a[3053]=0;
+  a[3054]=0;
+  a[3055]=0;
+  a[3056]=0;
+  a[3057]=0;
+  a[3058]=0;
+  a[3059]=0;
+  a[3060]=0;
+  a[3061]=0;
+  a[3062]=0;
+  a[3063]=0;
+  a[3064]=0;
+  a[3065]=0;
+  a[3066]=0;
+  a[3067]=0;
+  a[3068]=0;
+  a[3069]=0;
+  a[3070]=0;
+  a[3071]=0;
+  a[3072]=0;
+  a[3073]=0;
+  a[3074]=0;
+  a[3075]=0;
+  a[3076]=0;
+  a[3077]=0;
+  a[3078]=0;
+  a[3079]=0;
+  a[3080]=0;
+  a[3081]=0;
+  a[3082]=0;
+  a[3083]=0;
+  a[3084]=0;
+  a[3085]=0;
+  a[3086]=0;
+  a[3087]=0;
+  a[3088]=0;
+  a[3089]=0;
+  a[3090]=0;
+  a[3091]=0;
+  a[3092]=0;
+  a[3093]=0;
+  a[3094]=0;
+  a[3095]=0;
+  a[3096]=0;
+  a[3097]=0;
+  a[3098]=0;
+  a[3099]=0;
+  a[3100]=0;
+  a[3101]=0;
+  a[3102]=0;
+  a[3103]=0;
+  a[3104]=0;
+  a[3105]=0;
+  a[3106]=0;
+  a[3107]=0;
+  a[3108]=0;
+  a[3109]=0;
+  a[3110]=0;
+  a[3111]=0;
+  a[3112]=0;
+  a[3113]=0;
+  a[3114]=0;
+  a[3115]=0;
+  a[3116]=0;
+  a[3117]=0;
+  a[3118]=0;
+  a[3119]=0;
+  a[3120]=0;
+  a[3121]=0;
+  a[3122]=0;
+  a[3123]=0;
+  a[3124]=0;
+  a[3125]=0;
+  a[3126]=0;
+  a[3127]=0;
+  a[3128]=0;
+  a[3129]=0;
+  a[3130]=0;
+  a[3131]=0;
+  a[3132]=0;
+  a[3133]=0;
+  a[3134]=0;
+  a[3135]=0;
+  a[3136]=0;
+  a[3137]=0;
+  a[3138]=0;
+  a[3139]=0;
+  a[3140]=0;
+  a[3141]=0;
+  a[3142]=0;
+  a[3143]=0;
+  a[3144]=0;
+  a[3145]=0;
+  a[3146]=0;
+  a[3147]=0;
+  a[3148]=0;
+  a[3149]=0;
+  a[3150]=0;
+  a[3151]=0;
+  a[3152]=0;
+  a[3153]=0;
+  a[3154]=0;
+  a[3155]=0;
+  a[3156]=0;
+  a[3157]=0;
+  a[3158]=0;
+  a[3159]=0;
+  a[3160]=0;
+  a[3161]=0;
+  a[3162]=0;
+  a[3163]=0;
+  a[3164]=0;
+  a[3165]=0;
+  a[3166]=0;
+  a[3167]=0;
+  a[3168]=0;
+  a[3169]=0;
+  a[3170]=0;
+  a[3171]=0;
+  a[3172]=0;
+  a[3173]=0;
+  a[3174]=0;
+  a[3175]=0;
+  a[3176]=0;
+  a[3177]=0;
+  a[3178]=0;
+  a[3179]=0;
+  a[3180]=0;
+  a[3181]=0;
+  a[3182]=0;
+  a[3183]=0;
+  a[3184]=0;
+  a[3185]=0;
+  a[3186]=0;
+  a[3187]=0;
+  a[3188]=0;
+  a[3189]=0;
+  a[3190]=0;
+  a[3191]=0;
+  a[3192]=0;
+  a[3193]=0;
+  a[3194]=0;
+  a[3195]=0;
+  a[3196]=0;
+  a[3197]=0;
+  a[3198]=0;
+  a[3199]=0;
+  a[3200]=0;
+  a[3201]=0;
+  a[3202]=0;
+  a[3203]=0;
+  a[3204]=0;
+  a[3205]=0;
+  a[3206]=0;
+  a[3207]=0;
+  a[3208]=0;
+  a[3209]=0;
+  a[3210]=0;
+  a[3211]=0;
+  a[3212]=0;
+  a[3213]=0;
+  a[3214]=0;
+  a[3215]=0;
+  a[3216]=0;
+  a[3217]=0;
+  a[3218]=0;
+  a[3219]=0;
+  a[3220]=0;
+  a[3221]=0;
+  a[3222]=0;
+  a[3223]=0;
+  a[3224]=0;
+  a[3225]=0;
+  a[3226]=0;
+  a[3227]=0;
+  a[3228]=0;
+  a[3229]=0;
+  a[3230]=0;
+  a[3231]=0;
+  a[3232]=0;
+  a[3233]=0;
+  a[3234]=0;
+  a[3235]=0;
+  a[3236]=0;
+  a[3237]=0;
+  a[3238]=0;
+  a[3239]=0;
+  a[3240]=0;
+  a[3241]=0;
+  a[3242]=0;
+  a[3243]=0;
+  a[3244]=0;
+  a[3245]=0;
+  a[3246]=0;
+  a[3247]=0;
+  a[3248]=0;
+  a[3249]=0;
+  a[3250]=0;
+  a[3251]=0;
+  a[3252]=0;
+  a[3253]=0;
+  a[3254]=0;
+  a[3255]=0;
+  a[3256]=0;
+  a[3257]=0;
+  a[3258]=0;
+  a[3259]=0;
+  a[3260]=0;
+  a[3261]=0;
+  a[3262]=0;
+  a[3263]=0;
+  a[3264]=0;
+  a[3265]=0;
+  a[3266]=0;
+  a[3267]=0;
+  a[3268]=0;
+  a[3269]=0;
+  a[3270]=0;
+  a[3271]=0;
+  a[3272]=0;
+  a[3273]=0;
+  a[3274]=0;
+  a[3275]=0;
+  a[3276]=0;
+  a[3277]=0;
+  a[3278]=0;
+  a[3279]=0;
+  a[3280]=0;
+  a[3281]=0;
+  a[3282]=0;
+  a[3283]=0;
+  a[3284]=0;
+  a[3285]=0;
+  a[3286]=0;
+  a[3287]=0;
+  a[3288]=0;
+  a[3289]=0;
+  a[3290]=0;
+  a[3291]=0;
+  a[3292]=0;
+  a[3293]=0;
+  a[3294]=0;
+  a[3295]=0;
+  a[3296]=0;
+  a[3297]=0;
+  a[3298]=0;
+  a[3299]=0;
+  a[3300]=0;
+  a[3301]=0;
+  a[3302]=0;
+  a[3303]=0;
+  a[3304]=0;
+  a[3305]=0;
+  a[3306]=0;
+  a[3307]=0;
+  a[3308]=0;
+  a[3309]=0;
+  a[3310]=0;
+  a[3311]=0;
+  a[3312]=0;
+  a[3313]=0;
+  a[3314]=0;
+  a[3315]=0;
+  a[3316]=0;
+  a[3317]=0;
+  a[3318]=0;
+  a[3319]=0;
+  a[3320]=0;
+  a[3321]=0;
+  a[3322]=0;
+  a[3323]=0;
+  a[3324]=0;
+  a[3325]=0;
+  a[3326]=0;
+  a[3327]=0;
+  a[3328]=0;
+  a[3329]=0;
+  a[3330]=0;
+  a[3331]=0;
+  a[3332]=0;
+  a[3333]=0;
+  a[3334]=0;
+  a[3335]=0;
+  a[3336]=0;
+  a[3337]=0;
+  a[3338]=0;
+  a[3339]=0;
+  a[3340]=0;
+  a[3341]=0;
+  a[3342]=0;
+  a[3343]=0;
+  a[3344]=0;
+  a[3345]=0;
+  a[3346]=0;
+  a[3347]=0;
+  a[3348]=0;
+  a[3349]=0;
+  a[3350]=0;
+  a[3351]=0;
+  a[3352]=0;
+  a[3353]=0;
+  a[3354]=0;
+  a[3355]=0;
+  a[3356]=0;
+  a[3357]=0;
+  a[3358]=0;
+  a[3359]=0;
+  a[3360]=0;
+  a[3361]=0;
+  a[3362]=0;
+  a[3363]=0;
+  a[3364]=0;
+  a[3365]=0;
+  a[3366]=0;
+  a[3367]=0;
+  a[3368]=0;
+  a[3369]=0;
+  a[3370]=0;
+  a[3371]=0;
+  a[3372]=0;
+  a[3373]=0;
+  a[3374]=0;
+  a[3375]=0;
+  a[3376]=0;
+  a[3377]=0;
+  a[3378]=0;
+  a[3379]=0;
+  a[3380]=0;
+  a[3381]=0;
+  a[3382]=0;
+  a[3383]=0;
+  a[3384]=0;
+  a[3385]=0;
+  a[3386]=0;
+  a[3387]=0;
+  a[3388]=0;
+  a[3389]=0;
+  a[3390]=0;
+  a[3391]=0;
+  a[3392]=0;
+  a[3393]=0;
+  a[3394]=0;
+  a[3395]=0;
+  a[3396]=0;
+  a[3397]=0;
+  a[3398]=0;
+  a[3399]=0;
+  a[3400]=0;
+  a[3401]=0;
+  a[3402]=0;
+  a[3403]=0;
+  a[3404]=0;
+  a[3405]=0;
+  a[3406]=0;
+  a[3407]=0;
+  a[3408]=0;
+  a[3409]=0;
+  a[3410]=0;
+  a[3411]=0;
+  a[3412]=0;
+  a[3413]=0;
+  a[3414]=0;
+  a[3415]=0;
+  a[3416]=0;
+  a[3417]=0;
+  a[3418]=0;
+  a[3419]=0;
+  a[3420]=0;
+  a[3421]=0;
+  a[3422]=0;
+  a[3423]=0;
+  a[3424]=0;
+  a[3425]=0;
+  a[3426]=0;
+  a[3427]=0;
+  a[3428]=0;
+  a[3429]=0;
+  a[3430]=0;
+  a[3431]=0;
+  a[3432]=0;
+  a[3433]=0;
+  a[3434]=0;
+  a[3435]=0;
+  a[3436]=0;
+  a[3437]=0;
+  a[3438]=0;
+  a[3439]=0;
+  a[3440]=0;
+  a[3441]=0;
+  a[3442]=0;
+  a[3443]=0;
+  a[3444]=0;
+  a[3445]=0;
+  a[3446]=0;
+  a[3447]=0;
+  a[3448]=0;
+  a[3449]=0;
+  a[3450]=0;
+  a[3451]=0;
+  a[3452]=0;
+  a[3453]=0;
+  a[3454]=0;
+  a[3455]=0;
+  a[3456]=0;
+  a[3457]=0;
+  a[3458]=0;
+  a[3459]=0;
+  a[3460]=0;
+  a[3461]=0;
+  a[3462]=0;
+  a[3463]=0;
+  a[3464]=0;
+  a[3465]=0;
+  a[3466]=0;
+  a[3467]=0;
+  a[3468]=0;
+  a[3469]=0;
+  a[3470]=0;
+  a[3471]=0;
+  a[3472]=0;
+  a[3473]=0;
+  a[3474]=0;
+  a[3475]=0;
+  a[3476]=0;
+  a[3477]=0;
+  a[3478]=0;
+  a[3479]=0;
+  a[3480]=0;
+  a[3481]=0;
+  a[3482]=0;
+  a[3483]=0;
+  a[3484]=0;
+  a[3485]=0;
+  a[3486]=0;
+  a[3487]=0;
+  a[3488]=0;
+  a[3489]=0;
+  a[3490]=0;
+  a[3491]=0;
+  a[3492]=0;
+  a[3493]=0;
+  a[3494]=0;
+  a[3495]=0;
+  a[3496]=0;
+  a[3497]=0;
+  a[3498]=0;
+  a[3499]=0;
+  a[3500]=0;
+  a[3501]=0;
+  a[3502]=0;
+  a[3503]=0;
+  a[3504]=0;
+  a[3505]=0;
+  a[3506]=0;
+  a[3507]=0;
+  a[3508]=0;
+  a[3509]=0;
+  a[3510]=0;
+  a[3511]=0;
+  a[3512]=0;
+  a[3513]=0;
+  a[3514]=0;
+  a[3515]=0;
+  a[3516]=0;
+  a[3517]=0;
+  a[3518]=0;
+  a[3519]=0;
+  a[3520]=0;
+  a[3521]=0;
+  a[3522]=0;
+  a[3523]=0;
+  a[3524]=0;
+  a[3525]=0;
+  a[3526]=0;
+  a[3527]=0;
+  a[3528]=0;
+  a[3529]=0;
+  a[3530]=0;
+  a[3531]=0;
+  a[3532]=0;
+  a[3533]=0;
+  a[3534]=0;
+  a[3535]=0;
+  a[3536]=0;
+  a[3537]=0;
+  a[3538]=0;
+  a[3539]=0;
+  a[3540]=0;
+  a[3541]=0;
+  a[3542]=0;
+  a[3543]=0;
+  a[3544]=0;
+  a[3545]=0;
+  a[3546]=0;
+  a[3547]=0;
+  a[3548]=0;
+  a[3549]=0;
+  a[3550]=0;
+  a[3551]=0;
+  a[3552]=0;
+  a[3553]=0;
+  a[3554]=0;
+  a[3555]=0;
+  a[3556]=0;
+  a[3557]=0;
+  a[3558]=0;
+  a[3559]=0;
+  a[3560]=0;
+  a[3561]=0;
+  a[3562]=0;
+  a[3563]=0;
+  a[3564]=0;
+  a[3565]=0;
+  a[3566]=0;
+  a[3567]=0;
+  a[3568]=0;
+  a[3569]=0;
+  a[3570]=0;
+  a[3571]=0;
+  a[3572]=0;
+  a[3573]=0;
+  a[3574]=0;
+  a[3575]=0;
+  a[3576]=0;
+  a[3577]=0;
+  a[3578]=0;
+  a[3579]=0;
+  a[3580]=0;
+  a[3581]=0;
+  a[3582]=0;
+  a[3583]=0;
+  a[3584]=0;
+  a[3585]=0;
+  a[3586]=0;
+  a[3587]=0;
+  a[3588]=0;
+  a[3589]=0;
+  a[3590]=0;
+  a[3591]=0;
+  a[3592]=0;
+  a[3593]=0;
+  a[3594]=0;
+  a[3595]=0;
+  a[3596]=0;
+  a[3597]=0;
+  a[3598]=0;
+  a[3599]=0;
+  a[3600]=0;
+  a[3601]=0;
+  a[3602]=0;
+  a[3603]=0;
+  a[3604]=0;
+  a[3605]=0;
+  a[3606]=0;
+  a[3607]=0;
+  a[3608]=0;
+  a[3609]=0;
+  a[3610]=0;
+  a[3611]=0;
+  a[3612]=0;
+  a[3613]=0;
+  a[3614]=0;
+  a[3615]=0;
+  a[3616]=0;
+  a[3617]=0;
+  a[3618]=0;
+  a[3619]=0;
+  a[3620]=0;
+  a[3621]=0;
+  a[3622]=0;
+  a[3623]=0;
+  a[3624]=0;
+  a[3625]=0;
+  a[3626]=0;
+  a[3627]=0;
+  a[3628]=0;
+  a[3629]=0;
+  a[3630]=0;
+  a[3631]=0;
+  a[3632]=0;
+  a[3633]=0;
+  a[3634]=0;
+  a[3635]=0;
+  a[3636]=0;
+  a[3637]=0;
+  a[3638]=0;
+  a[3639]=0;
+  a[3640]=0;
+  a[3641]=0;
+  a[3642]=0;
+  a[3643]=0;
+  a[3644]=0;
+  a[3645]=0;
+  a[3646]=0;
+  a[3647]=0;
+  a[3648]=0;
+  a[3649]=0;
+  a[3650]=0;
+  a[3651]=0;
+  a[3652]=0;
+  a[3653]=0;
+  a[3654]=0;
+  a[3655]=0;
+  a[3656]=0;
+  a[3657]=0;
+  a[3658]=0;
+  a[3659]=0;
+  a[3660]=0;
+  a[3661]=0;
+  a[3662]=0;
+  a[3663]=0;
+  a[3664]=0;
+  a[3665]=0;
+  a[3666]=0;
+  a[3667]=0;
+  a[3668]=0;
+  a[3669]=0;
+  a[3670]=0;
+  a[3671]=0;
+  a[3672]=0;
+  a[3673]=0;
+  a[3674]=0;
+  a[3675]=0;
+  a[3676]=0;
+  a[3677]=0;
+  a[3678]=0;
+  a[3679]=0;
+  a[3680]=0;
+  a[3681]=0;
+  a[3682]=0;
+  a[3683]=0;
+  a[3684]=0;
+  a[3685]=0;
+  a[3686]=0;
+  a[3687]=0;
+  a[3688]=0;
+  a[3689]=0;
+  a[3690]=0;
+  a[3691]=0;
+  a[3692]=0;
+  a[3693]=0;
+  a[3694]=0;
+  a[3695]=0;
+  a[3696]=0;
+  a[3697]=0;
+  a[3698]=0;
+  a[3699]=0;
+  a[3700]=0;
+  a[3701]=0;
+  a[3702]=0;
+  a[3703]=0;
+  a[3704]=0;
+  a[3705]=0;
+  a[3706]=0;
+  a[3707]=0;
+  a[3708]=0;
+  a[3709]=0;
+  a[3710]=0;
+  a[3711]=0;
+  a[3712]=0;
+  a[3713]=0;
+  a[3714]=0;
+  a[3715]=0;
+  a[3716]=0;
+  a[3717]=0;
+  a[3718]=0;
+  a[3719]=0;
+  a[3720]=0;
+  a[3721]=0;
+  a[3722]=0;
+  a[3723]=0;
+  a[3724]=0;
+  a[3725]=0;
+  a[3726]=0;
+  a[3727]=0;
+  a[3728]=0;
+  a[3729]=0;
+  a[3730]=0;
+  a[3731]=0;
+  a[3732]=0;
+  a[3733]=0;
+  a[3734]=0;
+  a[3735]=0;
+  a[3736]=0;
+  a[3737]=0;
+  a[3738]=0;
+  a[3739]=0;
+  a[3740]=0;
+  a[3741]=0;
+  a[3742]=0;
+  a[3743]=0;
+  a[3744]=0;
+  a[3745]=0;
+  a[3746]=0;
+  a[3747]=0;
+  a[3748]=0;
+  a[3749]=0;
+  a[3750]=0;
+  a[3751]=0;
+  a[3752]=0;
+  a[3753]=0;
+  a[3754]=0;
+  a[3755]=0;
+  a[3756]=0;
+  a[3757]=0;
+  a[3758]=0;
+  a[3759]=0;
+  a[3760]=0;
+  a[3761]=0;
+  a[3762]=0;
+  a[3763]=0;
+  a[3764]=0;
+  a[3765]=0;
+  a[3766]=0;
+  a[3767]=0;
+  a[3768]=0;
+  a[3769]=0;
+  a[3770]=0;
+  a[3771]=0;
+  a[3772]=0;
+  a[3773]=0;
+  a[3774]=0;
+  a[3775]=0;
+  a[3776]=0;
+  a[3777]=0;
+  a[3778]=0;
+  a[3779]=0;
+  a[3780]=0;
+  a[3781]=0;
+  a[3782]=0;
+  a[3783]=0;
+  a[3784]=0;
+  a[3785]=0;
+  a[3786]=0;
+  a[3787]=0;
+  a[3788]=0;
+  a[3789]=0;
+  a[3790]=0;
+  a[3791]=0;
+  a[3792]=0;
+  a[3793]=0;
+  a[3794]=0;
+  a[3795]=0;
+  a[3796]=0;
+  a[3797]=0;
+  a[3798]=0;
+  a[3799]=0;
+  a[3800]=0;
+  a[3801]=0;
+  a[3802]=0;
+  a[3803]=0;
+  a[3804]=0;
+  a[3805]=0;
+  a[3806]=0;
+  a[3807]=0;
+  a[3808]=0;
+  a[3809]=0;
+  a[3810]=0;
+  a[3811]=0;
+  a[3812]=0;
+  a[3813]=0;
+  a[3814]=0;
+  a[3815]=0;
+  a[3816]=0;
+  a[3817]=0;
+  a[3818]=0;
+  a[3819]=0;
+  a[3820]=0;
+  a[3821]=0;
+  a[3822]=0;
+  a[3823]=0;
+  a[3824]=0;
+  a[3825]=0;
+  a[3826]=0;
+  a[3827]=0;
+  a[3828]=0;
+  a[3829]=0;
+  a[3830]=0;
+  a[3831]=0;
+  a[3832]=0;
+  a[3833]=0;
+  a[3834]=0;
+  a[3835]=0;
+  a[3836]=0;
+  a[3837]=0;
+  a[3838]=0;
+  a[3839]=0;
+  a[3840]=0;
+  a[3841]=0;
+  a[3842]=0;
+  a[3843]=0;
+  a[3844]=0;
+  a[3845]=0;
+  a[3846]=0;
+  a[3847]=0;
+  a[3848]=0;
+  a[3849]=0;
+  a[3850]=0;
+  a[3851]=0;
+  a[3852]=0;
+  a[3853]=0;
+  a[3854]=0;
+  a[3855]=0;
+  a[3856]=0;
+  a[3857]=0;
+  a[3858]=0;
+  a[3859]=0;
+  a[3860]=0;
+  a[3861]=0;
+  a[3862]=0;
+  a[3863]=0;
+  a[3864]=0;
+  a[3865]=0;
+  a[3866]=0;
+  a[3867]=0;
+  a[3868]=0;
+  a[3869]=0;
+  a[3870]=0;
+  a[3871]=0;
+  a[3872]=0;
+  a[3873]=0;
+  a[3874]=0;
+  a[3875]=0;
+  a[3876]=0;
+  a[3877]=0;
+  a[3878]=0;
+  a[3879]=0;
+  a[3880]=0;
+  a[3881]=0;
+  a[3882]=0;
+  a[3883]=0;
+  a[3884]=0;
+  a[3885]=0;
+  a[3886]=0;
+  a[3887]=0;
+  a[3888]=0;
+  a[3889]=0;
+  a[3890]=0;
+  a[3891]=0;
+  a[3892]=0;
+  a[3893]=0;
+  a[3894]=0;
+  a[3895]=0;
+  a[3896]=0;
+  a[3897]=0;
+  a[3898]=0;
+  a[3899]=0;
+  a[3900]=0;
+  a[3901]=0;
+  a[3902]=0;
+  a[3903]=0;
+  a[3904]=0;
+  a[3905]=0;
+  a[3906]=0;
+  a[3907]=0;
+  a[3908]=0;
+  a[3909]=0;
+  a[3910]=0;
+  a[3911]=0;
+  a[3912]=0;
+  a[3913]=0;
+  a[3914]=0;
+  a[3915]=0;
+  a[3916]=0;
+  a[3917]=0;
+  a[3918]=0;
+  a[3919]=0;
+  a[3920]=0;
+  a[3921]=0;
+  a[3922]=0;
+  a[3923]=0;
+  a[3924]=0;
+  a[3925]=0;
+  a[3926]=0;
+  a[3927]=0;
+  a[3928]=0;
+  a[3929]=0;
+  a[3930]=0;
+  a[3931]=0;
+  a[3932]=0;
+  a[3933]=0;
+  a[3934]=0;
+  a[3935]=0;
+  a[3936]=0;
+  a[3937]=0;
+  a[3938]=0;
+  a[3939]=0;
+  a[3940]=0;
+  a[3941]=0;
+  a[3942]=0;
+  a[3943]=0;
+  a[3944]=0;
+  a[3945]=0;
+  a[3946]=0;
+  a[3947]=0;
+  a[3948]=0;
+  a[3949]=0;
+  a[3950]=0;
+  a[3951]=0;
+  a[3952]=0;
+  a[3953]=0;
+  a[3954]=0;
+  a[3955]=0;
+  a[3956]=0;
+  a[3957]=0;
+  a[3958]=0;
+  a[3959]=0;
+  a[3960]=0;
+  a[3961]=0;
+  a[3962]=0;
+  a[3963]=0;
+  a[3964]=0;
+  a[3965]=0;
+  a[3966]=0;
+  a[3967]=0;
+  a[3968]=0;
+  a[3969]=0;
+  a[3970]=0;
+  a[3971]=0;
+  a[3972]=0;
+  a[3973]=0;
+  a[3974]=0;
+  a[3975]=0;
+  a[3976]=0;
+  a[3977]=0;
+  a[3978]=0;
+  a[3979]=0;
+  a[3980]=0;
+  a[3981]=0;
+  a[3982]=0;
+  a[3983]=0;
+  a[3984]=0;
+  a[3985]=0;
+  a[3986]=0;
+  a[3987]=0;
+  a[3988]=0;
+  a[3989]=0;
+  a[3990]=0;
+  a[3991]=0;
+  a[3992]=0;
+  a[3993]=0;
+  a[3994]=0;
+  a[3995]=0;
+  a[3996]=0;
+  a[3997]=0;
+  a[3998]=0;
+  a[3999]=0;
+  a[4000]=0;
+  a[4001]=0;
+  a[4002]=0;
+  a[4003]=0;
+  a[4004]=0;
+  a[4005]=0;
+  a[4006]=0;
+  a[4007]=0;
+  a[4008]=0;
+  a[4009]=0;
+  a[4010]=0;
+  a[4011]=0;
+  a[4012]=0;
+  a[4013]=0;
+  a[4014]=0;
+  a[4015]=0;
+  a[4016]=0;
+  a[4017]=0;
+  a[4018]=0;
+  a[4019]=0;
+  a[4020]=0;
+  a[4021]=0;
+  a[4022]=0;
+  a[4023]=0;
+  a[4024]=0;
+  a[4025]=0;
+  a[4026]=0;
+  a[4027]=0;
+  a[4028]=0;
+  a[4029]=0;
+  a[4030]=0;
+  a[4031]=0;
+  a[4032]=0;
+  a[4033]=0;
+  a[4034]=0;
+  a[4035]=0;
+  a[4036]=0;
+  a[4037]=0;
+  a[4038]=0;
+  a[4039]=0;
+  a[4040]=0;
+  a[4041]=0;
+  a[4042]=0;
+  a[4043]=0;
+  a[4044]=0;
+  a[4045]=0;
+  a[4046]=0;
+  a[4047]=0;
+  a[4048]=0;
+  a[4049]=0;
+  a[4050]=0;
+  a[4051]=0;
+  a[4052]=0;
+  a[4053]=0;
+  a[4054]=0;
+  a[4055]=0;
+  a[4056]=0;
+  a[4057]=0;
+  a[4058]=0;
+  a[4059]=0;
+  a[4060]=0;
+  a[4061]=0;
+  a[4062]=0;
+  a[4063]=0;
+  a[4064]=0;
+  a[4065]=0;
+  a[4066]=0;
+  a[4067]=0;
+  a[4068]=0;
+  a[4069]=0;
+  a[4070]=0;
+  a[4071]=0;
+  a[4072]=0;
+  a[4073]=0;
+  a[4074]=0;
+  a[4075]=0;
+  a[4076]=0;
+  a[4077]=0;
+  a[4078]=0;
+  a[4079]=0;
+  a[4080]=0;
+  a[4081]=0;
+  a[4082]=0;
+  a[4083]=0;
+  a[4084]=0;
+  a[4085]=0;
+  a[4086]=0;
+  a[4087]=0;
+  a[4088]=0;
+  a[4089]=0;
+  a[4090]=0;
+  a[4091]=0;
+  a[4092]=0;
+  a[4093]=0;
+  a[4094]=0;
+  a[4095]=0;
+  a[4096]=0;
+  a[4097]=0;
+  a[4098]=0;
+  a[4099]=0;
+  a[4100]=0;
+  a[4101]=0;
+  a[4102]=0;
+  a[4103]=0;
+  a[4104]=0;
+  a[4105]=0;
+  a[4106]=0;
+  a[4107]=0;
+  a[4108]=0;
+  a[4109]=0;
+  a[4110]=0;
+  a[4111]=0;
+  a[4112]=0;
+  a[4113]=0;
+  a[4114]=0;
+  a[4115]=0;
+  a[4116]=0;
+  a[4117]=0;
+  a[4118]=0;
+  a[4119]=0;
+  a[4120]=0;
+  a[4121]=0;
+  a[4122]=0;
+  a[4123]=0;
+  a[4124]=0;
+  a[4125]=0;
+  a[4126]=0;
+  a[4127]=0;
+  a[4128]=0;
+  a[4129]=0;
+  a[4130]=0;
+  a[4131]=0;
+  a[4132]=0;
+  a[4133]=0;
+  a[4134]=0;
+  a[4135]=0;
+  a[4136]=0;
+  a[4137]=0;
+  a[4138]=0;
+  a[4139]=0;
+  a[4140]=0;
+  a[4141]=0;
+  a[4142]=0;
+  a[4143]=0;
+  a[4144]=0;
+  a[4145]=0;
+  a[4146]=0;
+  a[4147]=0;
+  a[4148]=0;
+  a[4149]=0;
+  a[4150]=0;
+  a[4151]=0;
+  a[4152]=0;
+  a[4153]=0;
+  a[4154]=0;
+  a[4155]=0;
+  a[4156]=0;
+  a[4157]=0;
+  a[4158]=0;
+  a[4159]=0;
+  a[4160]=0;
+  a[4161]=0;
+  a[4162]=0;
+  a[4163]=0;
+  a[4164]=0;
+  a[4165]=0;
+  a[4166]=0;
+  a[4167]=0;
+  a[4168]=0;
+  a[4169]=0;
+  a[4170]=0;
+  a[4171]=0;
+  a[4172]=0;
+  a[4173]=0;
+  a[4174]=0;
+  a[4175]=0;
+  a[4176]=0;
+  a[4177]=0;
+  a[4178]=0;
+  a[4179]=0;
+  a[4180]=0;
+  a[4181]=0;
+  a[4182]=0;
+  a[4183]=0;
+  a[4184]=0;
+  a[4185]=0;
+  a[4186]=0;
+  a[4187]=0;
+  a[4188]=0;
+  a[4189]=0;
+  a[4190]=0;
+  a[4191]=0;
+  a[4192]=0;
+  a[4193]=0;
+  a[4194]=0;
+  a[4195]=0;
+  a[4196]=0;
+  a[4197]=0;
+  a[4198]=0;
+  a[4199]=0;
+  a[4200]=0;
+  a[4201]=0;
+  a[4202]=0;
+  a[4203]=0;
+  a[4204]=0;
+  a[4205]=0;
+  a[4206]=0;
+  a[4207]=0;
+  a[4208]=0;
+  a[4209]=0;
+  a[4210]=0;
+  a[4211]=0;
+  a[4212]=0;
+  a[4213]=0;
+  a[4214]=0;
+  a[4215]=0;
+  a[4216]=0;
+  a[4217]=0;
+  a[4218]=0;
+  a[4219]=0;
+  a[4220]=0;
+  a[4221]=0;
+  a[4222]=0;
+  a[4223]=0;
+  a[4224]=0;
+  a[4225]=0;
+  a[4226]=0;
+  a[4227]=0;
+  a[4228]=0;
+  a[4229]=0;
+  a[4230]=0;
+  a[4231]=0;
+  a[4232]=0;
+  a[4233]=0;
+  a[4234]=0;
+  a[4235]=0;
+  a[4236]=0;
+  a[4237]=0;
+  a[4238]=0;
+  a[4239]=0;
+  a[4240]=0;
+  a[4241]=0;
+  a[4242]=0;
+  a[4243]=0;
+  a[4244]=0;
+  a[4245]=0;
+  a[4246]=0;
+  a[4247]=0;
+  a[4248]=0;
+  a[4249]=0;
+  a[4250]=0;
+  a[4251]=0;
+  a[4252]=0;
+  a[4253]=0;
+  a[4254]=0;
+  a[4255]=0;
+  a[4256]=0;
+  a[4257]=0;
+  a[4258]=0;
+  a[4259]=0;
+  a[4260]=0;
+  a[4261]=0;
+  a[4262]=0;
+  a[4263]=0;
+  a[4264]=0;
+  a[4265]=0;
+  a[4266]=0;
+  a[4267]=0;
+  a[4268]=0;
+  a[4269]=0;
+  a[4270]=0;
+  a[4271]=0;
+  a[4272]=0;
+  a[4273]=0;
+  a[4274]=0;
+  a[4275]=0;
+  a[4276]=0;
+  a[4277]=0;
+  a[4278]=0;
+  a[4279]=0;
+  a[4280]=0;
+  a[4281]=0;
+  a[4282]=0;
+  a[4283]=0;
+  a[4284]=0;
+  a[4285]=0;
+  a[4286]=0;
+  a[4287]=0;
+  a[4288]=0;
+  a[4289]=0;
+  a[4290]=0;
+  a[4291]=0;
+  a[4292]=0;
+  a[4293]=0;
+  a[4294]=0;
+  a[4295]=0;
+  a[4296]=0;
+  a[4297]=0;
+  a[4298]=0;
+  a[4299]=0;
+  a[4300]=0;
+  a[4301]=0;
+  a[4302]=0;
+  a[4303]=0;
+  a[4304]=0;
+  a[4305]=0;
+  a[4306]=0;
+  a[4307]=0;
+  a[4308]=0;
+  a[4309]=0;
+  a[4310]=0;
+  a[4311]=0;
+  a[4312]=0;
+  a[4313]=0;
+  a[4314]=0;
+  a[4315]=0;
+  a[4316]=0;
+  a[4317]=0;
+  a[4318]=0;
+  a[4319]=0;
+  a[4320]=0;
+  a[4321]=0;
+  a[4322]=0;
+  a[4323]=0;
+  a[4324]=0;
+  a[4325]=0;
+  a[4326]=0;
+  a[4327]=0;
+  a[4328]=0;
+  a[4329]=0;
+  a[4330]=0;
+  a[4331]=0;
+  a[4332]=0;
+  a[4333]=0;
+  a[4334]=0;
+  a[4335]=0;
+  a[4336]=0;
+  a[4337]=0;
+  a[4338]=0;
+  a[4339]=0;
+  a[4340]=0;
+  a[4341]=0;
+  a[4342]=0;
+  a[4343]=0;
+  a[4344]=0;
+  a[4345]=0;
+  a[4346]=0;
+  a[4347]=0;
+  a[4348]=0;
+  a[4349]=0;
+  a[4350]=0;
+  a[4351]=0;
+  a[4352]=0;
+  a[4353]=0;
+  a[4354]=0;
+  a[4355]=0;
+  a[4356]=0;
+  a[4357]=0;
+  a[4358]=0;
+  a[4359]=0;
+  a[4360]=0;
+  a[4361]=0;
+  a[4362]=0;
+  a[4363]=0;
+  a[4364]=0;
+  a[4365]=0;
+  a[4366]=0;
+  a[4367]=0;
+  a[4368]=0;
+  a[4369]=0;
+  a[4370]=0;
+  a[4371]=0;
+  a[4372]=0;
+  a[4373]=0;
+  a[4374]=0;
+  a[4375]=0;
+  a[4376]=0;
+  a[4377]=0;
+  a[4378]=0;
+  a[4379]=0;
+  a[4380]=0;
+  a[4381]=0;
+  a[4382]=0;
+  a[4383]=0;
+  a[4384]=0;
+  a[4385]=0;
+  a[4386]=0;
+  a[4387]=0;
+  a[4388]=0;
+  a[4389]=0;
+  a[4390]=0;
+  a[4391]=0;
+  a[4392]=0;
+  a[4393]=0;
+  a[4394]=0;
+  a[4395]=0;
+  a[4396]=0;
+  a[4397]=0;
+  a[4398]=0;
+  a[4399]=0;
+  a[4400]=0;
+  a[4401]=0;
+  a[4402]=0;
+  a[4403]=0;
+  a[4404]=0;
+  a[4405]=0;
+  a[4406]=0;
+  a[4407]=0;
+  a[4408]=0;
+  a[4409]=0;
+  a[4410]=0;
+  a[4411]=0;
+  a[4412]=0;
+  a[4413]=0;
+  a[4414]=0;
+  a[4415]=0;
+  a[4416]=0;
+  a[4417]=0;
+  a[4418]=0;
+  a[4419]=0;
+  a[4420]=0;
+  a[4421]=0;
+  a[4422]=0;
+  a[4423]=0;
+  a[4424]=0;
+  a[4425]=0;
+  a[4426]=0;
+  a[4427]=0;
+  a[4428]=0;
+  a[4429]=0;
+  a[4430]=0;
+  a[4431]=0;
+  a[4432]=0;
+  a[4433]=0;
+  a[4434]=0;
+  a[4435]=0;
+  a[4436]=0;
+  a[4437]=0;
+  a[4438]=0;
+  a[4439]=0;
+  a[4440]=0;
+  a[4441]=0;
+  a[4442]=0;
+  a[4443]=0;
+  a[4444]=0;
+  a[4445]=0;
+  a[4446]=0;
+  a[4447]=0;
+  a[4448]=0;
+  a[4449]=0;
+  a[4450]=0;
+  a[4451]=0;
+  a[4452]=0;
+  a[4453]=0;
+  a[4454]=0;
+  a[4455]=0;
+  a[4456]=0;
+  a[4457]=0;
+  a[4458]=0;
+  a[4459]=0;
+  a[4460]=0;
+  a[4461]=0;
+  a[4462]=0;
+  a[4463]=0;
+  a[4464]=0;
+  a[4465]=0;
+  a[4466]=0;
+  a[4467]=0;
+  a[4468]=0;
+  a[4469]=0;
+  a[4470]=0;
+  a[4471]=0;
+  a[4472]=0;
+  a[4473]=0;
+  a[4474]=0;
+  a[4475]=0;
+  a[4476]=0;
+  a[4477]=0;
+  a[4478]=0;
+  a[4479]=0;
+  a[4480]=0;
+  a[4481]=0;
+  a[4482]=0;
+  a[4483]=0;
+  a[4484]=0;
+  a[4485]=0;
+  a[4486]=0;
+  a[4487]=0;
+  a[4488]=0;
+  a[4489]=0;
+  a[4490]=0;
+  a[4491]=0;
+  a[4492]=0;
+  a[4493]=0;
+  a[4494]=0;
+  a[4495]=0;
+  a[4496]=0;
+  a[4497]=0;
+  a[4498]=0;
+  a[4499]=0;
+  a[4500]=0;
+  a[4501]=0;
+  a[4502]=0;
+  a[4503]=0;
+  a[4504]=0;
+  a[4505]=0;
+  a[4506]=0;
+  a[4507]=0;
+  a[4508]=0;
+  a[4509]=0;
+  a[4510]=0;
+  a[4511]=0;
+  a[4512]=0;
+  a[4513]=0;
+  a[4514]=0;
+  a[4515]=0;
+  a[4516]=0;
+  a[4517]=0;
+  a[4518]=0;
+  a[4519]=0;
+  a[4520]=0;
+  a[4521]=0;
+  a[4522]=0;
+  a[4523]=0;
+  a[4524]=0;
+  a[4525]=0;
+  a[4526]=0;
+  a[4527]=0;
+  a[4528]=0;
+  a[4529]=0;
+  a[4530]=0;
+  a[4531]=0;
+  a[4532]=0;
+  a[4533]=0;
+  a[4534]=0;
+  a[4535]=0;
+  a[4536]=0;
+  a[4537]=0;
+  a[4538]=0;
+  a[4539]=0;
+  a[4540]=0;
+  a[4541]=0;
+  a[4542]=0;
+  a[4543]=0;
+  a[4544]=0;
+  a[4545]=0;
+  a[4546]=0;
+  a[4547]=0;
+  a[4548]=0;
+  a[4549]=0;
+  a[4550]=0;
+  a[4551]=0;
+  a[4552]=0;
+  a[4553]=0;
+  a[4554]=0;
+  a[4555]=0;
+  a[4556]=0;
+  a[4557]=0;
+  a[4558]=0;
+  a[4559]=0;
+  a[4560]=0;
+  a[4561]=0;
+  a[4562]=0;
+  a[4563]=0;
+  a[4564]=0;
+  a[4565]=0;
+  a[4566]=0;
+  a[4567]=0;
+  a[4568]=0;
+  a[4569]=0;
+  a[4570]=0;
+  a[4571]=0;
+  a[4572]=0;
+  a[4573]=0;
+  a[4574]=0;
+  a[4575]=0;
+  a[4576]=0;
+  a[4577]=0;
+  a[4578]=0;
+  a[4579]=0;
+  a[4580]=0;
+  a[4581]=0;
+  a[4582]=0;
+  a[4583]=0;
+  a[4584]=0;
+  a[4585]=0;
+  a[4586]=0;
+  a[4587]=0;
+  a[4588]=0;
+  a[4589]=0;
+  a[4590]=0;
+  a[4591]=0;
+  a[4592]=0;
+  a[4593]=0;
+  a[4594]=0;
+  a[4595]=0;
+  a[4596]=0;
+  a[4597]=0;
+  a[4598]=0;
+  a[4599]=0;
+  a[4600]=0;
+  a[4601]=0;
+  a[4602]=0;
+  a[4603]=0;
+  a[4604]=0;
+  a[4605]=0;
+  a[4606]=0;
+  a[4607]=0;
+  a[4608]=0;
+  a[4609]=0;
+  a[4610]=0;
+  a[4611]=0;
+  a[4612]=0;
+  a[4613]=0;
+  a[4614]=0;
+  a[4615]=0;
+  a[4616]=0;
+  a[4617]=0;
+  a[4618]=0;
+  a[4619]=0;
+  a[4620]=0;
+  a[4621]=0;
+  a[4622]=0;
+  a[4623]=0;
+  a[4624]=0;
+  a[4625]=0;
+  a[4626]=0;
+  a[4627]=0;
+  a[4628]=0;
+  a[4629]=0;
+  a[4630]=0;
+  a[4631]=0;
+  a[4632]=0;
+  a[4633]=0;
+  a[4634]=0;
+  a[4635]=0;
+  a[4636]=0;
+  a[4637]=0;
+  a[4638]=0;
+  a[4639]=0;
+  a[4640]=0;
+  a[4641]=0;
+  a[4642]=0;
+  a[4643]=0;
+  a[4644]=0;
+  a[4645]=0;
+  a[4646]=0;
+  a[4647]=0;
+  a[4648]=0;
+  a[4649]=0;
+  a[4650]=0;
+  a[4651]=0;
+  a[4652]=0;
+  a[4653]=0;
+  a[4654]=0;
+  a[4655]=0;
+  a[4656]=0;
+  a[4657]=0;
+  a[4658]=0;
+  a[4659]=0;
+  a[4660]=0;
+  a[4661]=0;
+  a[4662]=0;
+  a[4663]=0;
+  a[4664]=0;
+  a[4665]=0;
+  a[4666]=0;
+  a[4667]=0;
+  a[4668]=0;
+  a[4669]=0;
+  a[4670]=0;
+  a[4671]=0;
+  a[4672]=0;
+  a[4673]=0;
+  a[4674]=0;
+  a[4675]=0;
+  a[4676]=0;
+  a[4677]=0;
+  a[4678]=0;
+  a[4679]=0;
+  a[4680]=0;
+  a[4681]=0;
+  a[4682]=0;
+  a[4683]=0;
+  a[4684]=0;
+  a[4685]=0;
+  a[4686]=0;
+  a[4687]=0;
+  a[4688]=0;
+  a[4689]=0;
+  a[4690]=0;
+  a[4691]=0;
+  a[4692]=0;
+  a[4693]=0;
+  a[4694]=0;
+  a[4695]=0;
+  a[4696]=0;
+  a[4697]=0;
+  a[4698]=0;
+  a[4699]=0;
+  a[4700]=0;
+  a[4701]=0;
+  a[4702]=0;
+  a[4703]=0;
+  a[4704]=0;
+  a[4705]=0;
+  a[4706]=0;
+  a[4707]=0;
+  a[4708]=0;
+  a[4709]=0;
+  a[4710]=0;
+  a[4711]=0;
+  a[4712]=0;
+  a[4713]=0;
+  a[4714]=0;
+  a[4715]=0;
+  a[4716]=0;
+  a[4717]=0;
+  a[4718]=0;
+  a[4719]=0;
+  a[4720]=0;
+  a[4721]=0;
+  a[4722]=0;
+  a[4723]=0;
+  a[4724]=0;
+  a[4725]=0;
+  a[4726]=0;
+  a[4727]=0;
+  a[4728]=0;
+  a[4729]=0;
+  a[4730]=0;
+  a[4731]=0;
+  a[4732]=0;
+  a[4733]=0;
+  a[4734]=0;
+  a[4735]=0;
+  a[4736]=0;
+  a[4737]=0;
+  a[4738]=0;
+  a[4739]=0;
+  a[4740]=0;
+  a[4741]=0;
+  a[4742]=0;
+  a[4743]=0;
+  a[4744]=0;
+  a[4745]=0;
+  a[4746]=0;
+  a[4747]=0;
+  a[4748]=0;
+  a[4749]=0;
+  a[4750]=0;
+  a[4751]=0;
+  a[4752]=0;
+  a[4753]=0;
+  a[4754]=0;
+  a[4755]=0;
+  a[4756]=0;
+  a[4757]=0;
+  a[4758]=0;
+  a[4759]=0;
+  a[4760]=0;
+  a[4761]=0;
+  a[4762]=0;
+  a[4763]=0;
+  a[4764]=0;
+  a[4765]=0;
+  a[4766]=0;
+  a[4767]=0;
+  a[4768]=0;
+  a[4769]=0;
+  a[4770]=0;
+  a[4771]=0;
+  a[4772]=0;
+  a[4773]=0;
+  a[4774]=0;
+  a[4775]=0;
+  a[4776]=0;
+  a[4777]=0;
+  a[4778]=0;
+  a[4779]=0;
+  a[4780]=0;
+  a[4781]=0;
+  a[4782]=0;
+  a[4783]=0;
+  a[4784]=0;
+  a[4785]=0;
+  a[4786]=0;
+  a[4787]=0;
+  a[4788]=0;
+  a[4789]=0;
+  a[4790]=0;
+  a[4791]=0;
+  a[4792]=0;
+  a[4793]=0;
+  a[4794]=0;
+  a[4795]=0;
+  a[4796]=0;
+  a[4797]=0;
+  a[4798]=0;
+  a[4799]=0;
+  a[4800]=0;
+  a[4801]=0;
+  a[4802]=0;
+  a[4803]=0;
+  a[4804]=0;
+  a[4805]=0;
+  a[4806]=0;
+  a[4807]=0;
+  a[4808]=0;
+  a[4809]=0;
+  a[4810]=0;
+  a[4811]=0;
+  a[4812]=0;
+  a[4813]=0;
+  a[4814]=0;
+  a[4815]=0;
+  a[4816]=0;
+  a[4817]=0;
+  a[4818]=0;
+  a[4819]=0;
+  a[4820]=0;
+  a[4821]=0;
+  a[4822]=0;
+  a[4823]=0;
+  a[4824]=0;
+  a[4825]=0;
+  a[4826]=0;
+  a[4827]=0;
+  a[4828]=0;
+  a[4829]=0;
+  a[4830]=0;
+  a[4831]=0;
+  a[4832]=0;
+  a[4833]=0;
+  a[4834]=0;
+  a[4835]=0;
+  a[4836]=0;
+  a[4837]=0;
+  a[4838]=0;
+  a[4839]=0;
+  a[4840]=0;
+  a[4841]=0;
+  a[4842]=0;
+  a[4843]=0;
+  a[4844]=0;
+  a[4845]=0;
+  a[4846]=0;
+  a[4847]=0;
+  a[4848]=0;
+  a[4849]=0;
+  a[4850]=0;
+  a[4851]=0;
+  a[4852]=0;
+  a[4853]=0;
+  a[4854]=0;
+  a[4855]=0;
+  a[4856]=0;
+  a[4857]=0;
+  a[4858]=0;
+  a[4859]=0;
+  a[4860]=0;
+  a[4861]=0;
+  a[4862]=0;
+  a[4863]=0;
+  a[4864]=0;
+  a[4865]=0;
+  a[4866]=0;
+  a[4867]=0;
+  a[4868]=0;
+  a[4869]=0;
+  a[4870]=0;
+  a[4871]=0;
+  a[4872]=0;
+  a[4873]=0;
+  a[4874]=0;
+  a[4875]=0;
+  a[4876]=0;
+  a[4877]=0;
+  a[4878]=0;
+  a[4879]=0;
+  a[4880]=0;
+  a[4881]=0;
+  a[4882]=0;
+  a[4883]=0;
+  a[4884]=0;
+  a[4885]=0;
+  a[4886]=0;
+  a[4887]=0;
+  a[4888]=0;
+  a[4889]=0;
+  a[4890]=0;
+  a[4891]=0;
+  a[4892]=0;
+  a[4893]=0;
+  a[4894]=0;
+  a[4895]=0;
+  a[4896]=0;
+  a[4897]=0;
+  a[4898]=0;
+  a[4899]=0;
+  a[4900]=0;
+  a[4901]=0;
+  a[4902]=0;
+  a[4903]=0;
+  a[4904]=0;
+  a[4905]=0;
+  a[4906]=0;
+  a[4907]=0;
+  a[4908]=0;
+  a[4909]=0;
+  a[4910]=0;
+  a[4911]=0;
+  a[4912]=0;
+  a[4913]=0;
+  a[4914]=0;
+  a[4915]=0;
+  a[4916]=0;
+  a[4917]=0;
+  a[4918]=0;
+  a[4919]=0;
+  a[4920]=0;
+  a[4921]=0;
+  a[4922]=0;
+  a[4923]=0;
+  a[4924]=0;
+  a[4925]=0;
+  a[4926]=0;
+  a[4927]=0;
+  a[4928]=0;
+  a[4929]=0;
+  a[4930]=0;
+  a[4931]=0;
+  a[4932]=0;
+  a[4933]=0;
+  a[4934]=0;
+  a[4935]=0;
+  a[4936]=0;
+  a[4937]=0;
+  a[4938]=0;
+  a[4939]=0;
+  a[4940]=0;
+  a[4941]=0;
+  a[4942]=0;
+  a[4943]=0;
+  a[4944]=0;
+  a[4945]=0;
+  a[4946]=0;
+  a[4947]=0;
+  a[4948]=0;
+  a[4949]=0;
+  a[4950]=0;
+  a[4951]=0;
+  a[4952]=0;
+  a[4953]=0;
+  a[4954]=0;
+  a[4955]=0;
+  a[4956]=0;
+  a[4957]=0;
+  a[4958]=0;
+  a[4959]=0;
+  a[4960]=0;
+  a[4961]=0;
+  a[4962]=0;
+  a[4963]=0;
+  a[4964]=0;
+  a[4965]=0;
+  a[4966]=0;
+  a[4967]=0;
+  a[4968]=0;
+  a[4969]=0;
+  a[4970]=0;
+  a[4971]=0;
+  a[4972]=0;
+  a[4973]=0;
+  a[4974]=0;
+  a[4975]=0;
+  a[4976]=0;
+  a[4977]=0;
+  a[4978]=0;
+  a[4979]=0;
+  a[4980]=0;
+  a[4981]=0;
+  a[4982]=0;
+  a[4983]=0;
+  a[4984]=0;
+  a[4985]=0;
+  a[4986]=0;
+  a[4987]=0;
+  a[4988]=0;
+  a[4989]=0;
+  a[4990]=0;
+  a[4991]=0;
+  a[4992]=0;
+  a[4993]=0;
+  a[4994]=0;
+  a[4995]=0;
+  a[4996]=0;
+  a[4997]=0;
+  a[4998]=0;
+  a[4999]=0;
+  return a;
+}
diff --git a/test/mjsunit/regress/regress-1973.js b/test/mjsunit/regress/regress-1973.js
new file mode 100644
index 0000000..8708bf1
--- /dev/null
+++ b/test/mjsunit/regress/regress-1973.js
@@ -0,0 +1,52 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that getters and setters pass unwrapped this values in strict mode
+// and wrapped this values is non-strict mode.
+
+function TestAccessorWrapping(primitive) {
+  var prototype = Object.getPrototypeOf(Object(primitive))
+  // Check that strict mode passes unwrapped this value.
+  var strict_type = typeof primitive;
+  Object.defineProperty(prototype, "strict", {
+    get: function() { "use strict"; assertSame(strict_type, typeof this); },
+    set: function() { "use strict"; assertSame(strict_type, typeof this); }
+  });
+  primitive.strict = primitive.strict;
+  // Check that non-strict mode passes wrapped this value.
+  var sloppy_type = typeof Object(primitive);
+  Object.defineProperty(prototype, "sloppy", {
+    get: function() { assertSame(sloppy_type, typeof this); },
+    set: function() { assertSame(sloppy_type, typeof this); }
+  });
+  primitive.sloppy = primitive.sloppy;
+}
+
+TestAccessorWrapping(true);
+TestAccessorWrapping(0);
+TestAccessorWrapping({});
+TestAccessorWrapping("");
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-1980.js
similarity index 80%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-1980.js
index 2502b53..49dfd06 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-1980.js
@@ -25,22 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// See: http://code.google.com/p/v8/issues/detail?id=1980
 
-var e = new Error();
-assertEquals('Error', e + '');
-
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
-
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+var invalid_this = [ "invalid", 23, undefined, null ];
+for (var i = 0; i < invalid_this.length; i++) {
+  var exception = false;
+  try {
+    Error.prototype.toString.call(invalid_this[i]);
+  } catch (e) {
+    exception = true;
+    assertTrue("called_on_non_object" == e.type);
+  }
+  assertTrue(exception);
+}
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-2045.js
similarity index 81%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-2045.js
index d3f2e35..822ee1f 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-2045.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,25 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+function foo() {
+  assertEquals(2, arguments.length);
+}
 
-assertThrows('eval(delete eval)');
+function bar() {
+  G.x;
+  return foo.apply(this, arguments);
+}
 
+function baz() {
+  return bar(1, 2);
+}
+
+G = {x: 0};
+baz();
+baz();
+%OptimizeFunctionOnNextCall(baz);
+baz();
+delete G.x;
+baz();
diff --git a/test/mjsunit/regress/regress-2056.js b/test/mjsunit/regress/regress-2056.js
new file mode 100644
index 0000000..d34a750
--- /dev/null
+++ b/test/mjsunit/regress/regress-2056.js
@@ -0,0 +1,66 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var cases = [
+    [0.0, 0.0, 0.0, 0,0],
+    [undefined, 0.0, NaN, NaN],
+    [0.0, undefined, NaN, NaN],
+    [NaN, 0.0, NaN, NaN],
+    [0.0, NaN, NaN, NaN],
+    [-NaN, 0.0, NaN, NaN],
+    [0.0, -NaN, NaN, NaN],
+    [Infinity, 0.0, Infinity, 0.0],
+    [0.0, Infinity, Infinity, 0.0],
+    [-Infinity, 0.0, 0.0, -Infinity],
+    [0.0, -Infinity, 0.0, -Infinity]
+];
+
+function do_min(a, b) {
+    return Math.min(a, b);
+}
+
+function do_max(a, b) {
+    return Math.max(a, b);
+}
+
+// Make sure that non-crankshaft results match expectations.
+for (i = 0; i < cases.length; ++i) {
+    var c = cases[i];
+    assertEquals(c[3], do_min(c[0], c[1]));
+    assertEquals(c[2], do_max(c[0], c[1]));
+}
+
+// Make sure that crankshaft results match expectations.
+for (i = 0; i < cases.length; ++i) {
+    var c = cases[i];
+    %OptimizeFunctionOnNextCall(do_min);
+    %OptimizeFunctionOnNextCall(do_max);
+    assertEquals(c[3], do_min(c[0], c[1]));
+    assertEquals(c[2], do_max(c[0], c[1]));
+}
diff --git a/test/mjsunit/regress/regress-397.js b/test/mjsunit/regress/regress-397.js
index 111f4a6..0e4143d 100644
--- a/test/mjsunit/regress/regress-397.js
+++ b/test/mjsunit/regress/regress-397.js
@@ -25,10 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
 // See http://code.google.com/p/v8/issues/detail?id=397
 
-assertEquals("Infinity", String(Math.pow(Infinity, 0.5)));
-assertEquals(0, Math.pow(Infinity, -0.5));
 
-assertEquals("Infinity", String(Math.pow(-Infinity, 0.5)));
-assertEquals(0, Math.pow(-Infinity, -0.5));
+function test() {
+  assertEquals("Infinity", String(Math.pow(Infinity, 0.5)));
+  assertEquals(0, Math.pow(Infinity, -0.5));
+
+  assertEquals("Infinity", String(Math.pow(-Infinity, 0.5)));
+  assertEquals(0, Math.pow(-Infinity, -0.5));
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/test/mjsunit/regress/regress-877615.js b/test/mjsunit/regress/regress-877615.js
index d35aba6..bec5a4d 100644
--- a/test/mjsunit/regress/regress-877615.js
+++ b/test/mjsunit/regress/regress-877615.js
@@ -25,13 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-Number.prototype.toLocaleString = function() { return 'invalid'};
-assertEquals([1].toLocaleString(), 'invalid');  // invalid
+Number.prototype.toLocaleString = function() { return 'invalid'; };
+assertEquals('invalid', [1].toLocaleString());  // invalid
 
 Number.prototype.toLocaleString = 'invalid';
-assertEquals([1].toLocaleString(), '1');  // 1
+assertThrows(function() { [1].toLocaleString(); });  // Not callable.
 
+delete Number.prototype.toLocaleString;
 Number.prototype.toString = function() { return 'invalid' };
-assertEquals([1].toLocaleString(), '1');  // 1
-assertEquals([1].toString(), '1');        // 1
-
+assertEquals([1].toLocaleString(), 'invalid');  // Uses ToObject on elements.
+assertEquals([1].toString(), '1');        // Uses ToString directly on elements.
diff --git a/test/mjsunit/regress/regress-91517.js b/test/mjsunit/regress/regress-91517.js
deleted file mode 100644
index 68a768c..0000000
--- a/test/mjsunit/regress/regress-91517.js
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Getting property names of an object with a prototype chain that
-// triggers dictionary elements in GetLocalPropertyNames() shouldn't
-// crash the runtime
-
-// Flags: --allow-natives-syntax
-
-function Object1() {
-  this.foo = 1;
-}
-
-function Object2() {
-  this.fuz = 2;
-  this.objects = new Object();
-  this.fuz1 = 2;
-  this.fuz2 = 2;
-  this.fuz3 = 2;
-  this.fuz4 = 2;
-  this.fuz5 = 2;
-  this.fuz6 = 2;
-  this.fuz7 = 2;
-  this.fuz8 = 2;
-  this.fuz9 = 2;
-  this.fuz10 = 2;
-  this.fuz11 = 2;
-  this.fuz12 = 2;
-  this.fuz13 = 2;
-  this.fuz14 = 2;
-  this.fuz15 = 2;
-  this.fuz16 = 2;
-  this.fuz17 = 2;
-  // Force dictionary-based properties
-  for (x=1;x<1000;x++) {
-    this["sdf" + x] = 2;
-  }
-}
-
-function Object3() {
-  this.boo = 3;
-}
-
-function Object4() {
-  this.baz = 4;
-}
-
-obj1 = new Object1();
-obj2 = new Object2();
-obj3 = new Object3();
-obj4 = new Object4();
-
-%SetHiddenPrototype(obj4, obj3);
-%SetHiddenPrototype(obj3, obj2);
-%SetHiddenPrototype(obj2, obj1);
-
-function contains(a, obj) {
-  for(var i = 0; i < a.length; i++) {
-    if(a[i] === obj){
-      return true;
-    }
-  }
-  return false;
-}
-names = %GetLocalPropertyNames(obj4);
-assertEquals(1021, names.length);
-assertTrue(contains(names, "baz"));
-assertTrue(contains(names, "boo"));
-assertTrue(contains(names, "foo"));
-assertTrue(contains(names, "fuz"));
-assertTrue(contains(names, "fuz1"));
-assertTrue(contains(names, "fuz2"));
-assertTrue(contains(names, "fuz3"));
-assertTrue(contains(names, "fuz4"));
-assertTrue(contains(names, "fuz5"));
-assertTrue(contains(names, "fuz6"));
-assertTrue(contains(names, "fuz7"));
-assertTrue(contains(names, "fuz8"));
-assertTrue(contains(names, "fuz9"));
-assertTrue(contains(names, "fuz10"));
-assertTrue(contains(names, "fuz11"));
-assertTrue(contains(names, "fuz12"));
-assertTrue(contains(names, "fuz13"));
-assertTrue(contains(names, "fuz14"));
-assertTrue(contains(names, "fuz15"));
-assertTrue(contains(names, "fuz16"));
-assertTrue(contains(names, "fuz17"));
-assertFalse(names[1020] == undefined);
diff --git a/test/mjsunit/regress/regress-94873.js b/test/mjsunit/regress/regress-94873.js
new file mode 100644
index 0000000..41ca992
--- /dev/null
+++ b/test/mjsunit/regress/regress-94873.js
@@ -0,0 +1,78 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug;
+
+function sendCommand(state, cmd) {
+  // Get the debug command processor in paused state.
+  var dcp = state.debugCommandProcessor(false);
+  var request = JSON.stringify(cmd);
+  var response = dcp.processDebugJSONRequest(request);
+  return JSON.parse(response);
+}
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      var line = event_data.sourceLineText();
+      print('break: ' + line);
+
+      var frame = sendCommand(exec_state, {
+        seq: 0,
+        type: "request",
+        command: "frame"
+      });
+
+      sendCommand(exec_state, {
+        seq: 0,
+        type: "request",
+        command: "evaluate",
+        arguments: {
+          expression: "obj.x.toString()",
+          additional_context: [{
+            name: "obj",
+            handle: frame.body.receiver.ref
+          }]
+        }
+      });
+    }
+  } catch (e) {
+    print(e);
+  }
+}
+
+Debug.setListener(listener);
+
+function a(x, y) {
+  this.x = x;
+  this.y = y;
+}
+
+Debug.setBreakPoint(a, 0, 0);
+new a(1, 2);
\ No newline at end of file
diff --git a/test/mjsunit/regress/regress-95113.js b/test/mjsunit/regress/regress-95113.js
index f01b270..468bff8 100644
--- a/test/mjsunit/regress/regress-95113.js
+++ b/test/mjsunit/regress/regress-95113.js
@@ -32,7 +32,7 @@
   var i = 0;
   while (!%HasFastDoubleElements(a)) {
     a[i] = i;
-    i++;
+    i += 0.5;
   }
   assertTrue(%HasFastDoubleElements(a));
   a.length = 1;
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-98773.js
similarity index 80%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-98773.js
index 2502b53..eb24eb5 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-98773.js
@@ -25,22 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Calling Array.sort on an external array is not supposed to crash.
 
-var e = new Error();
-assertEquals('Error', e + '');
+var array = new Int16Array(23);
+array[7] = 7; array[9] = 9;
+assertEquals(23, array.length);
+assertEquals(7, array[7]);
+assertEquals(9, array[9]);
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
-
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+Array.prototype.sort.call(array);
+assertEquals(23, array.length);
+assertEquals(7, array[21]);
+assertEquals(9, array[22]);
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-99167.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-99167.js
index d3f2e35..5053ae5 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-99167.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --expose-gc --max-new-space-size=1024
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+eval("function Node() { this.a = 1; this.a = 3; }");
+new Node;
+for (var i = 0; i < 4; ++i) gc();
+for (var i = 0; i < 100000; ++i) new Node;
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-crbug-100859.js
similarity index 81%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-crbug-100859.js
index d3f2e35..6824426 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-crbug-100859.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
-
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+// This used to trigger a crash because of an unhandled stack overflow.
+function setx() {
+  setx(typeof new Uint16Array('x') === 'object');
+}
+var exception = false;
+try {
+  setx();
+} catch (ex) {
+  assertTrue(ex instanceof RangeError);
+  exception = true;
+}
+assertTrue(exception);
diff --git a/src/extensions/experimental/i18n-extension.h b/test/mjsunit/regress/regress-crbug-107996.js
similarity index 64%
copy from src/extensions/experimental/i18n-extension.h
copy to test/mjsunit/regress/regress-crbug-107996.js
index 5401f25..dfe07e5 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/test/mjsunit/regress/regress-crbug-107996.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,30 +25,40 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+// Flags: --expose-debug-as debug
 
-#include "include/v8.h"
+Debug = debug.Debug;
 
-namespace v8 {
-namespace internal {
+Debug.setListener(listener);
 
+var fourteen;
+var four_in_debugger = [];
 
-class I18NExtension : public v8::Extension {
- public:
-  I18NExtension();
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    for (var i = 0; i < exec_state.frameCount(); i++) {
+      var frame = exec_state.frame(i);
+      four_in_debugger[i] = frame.evaluate("four", false).value();
+    }
+  }
+}
 
-  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
-      v8::Handle<v8::String> name);
+function f1() {
+  var three = 3;
+  var four = 4;
+  (function f2() {
+     var seven = 7;
+     (function f3() {
+        debugger;
+        fourteen = three + four + seven;
+     })();
+  })();
+}
 
-  // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
-  static void Register();
-  static I18NExtension* get();
+f1();
+assertEquals(14, fourteen);
+assertEquals(4, four_in_debugger[0]);
+assertEquals(4, four_in_debugger[1]);
+assertEquals(4, four_in_debugger[2]);
 
- private:
-  static I18NExtension* extension_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+Debug.setListener(null);
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-crbug-119926.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-crbug-119926.js
index d3f2e35..26b84fa 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-crbug-119926.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Test that array elements don't break upon garbage collection.
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+var a = new Array(500);
+for (var i = 0; i < 500000; i++) {
+  a[i] = new Object();
+}
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-debug-code-recompilation.js
similarity index 72%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-debug-code-recompilation.js
index d3f2e35..1a608b1 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-debug-code-recompilation.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,23 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax --hydrogen-filter=Debug.setBreakPoint --expose-debug-as debug
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+Debug = debug.Debug
 
-assertThrows('eval(delete eval)');
+function f() {a=1;b=2};
+function g() {
+  a=1;
+  b=2;
+}
 
+bp = Debug.setBreakPoint(f, 0, 0);
+Debug.clearBreakPoint(bp);
+%OptimizeFunctionOnNextCall(Debug.setBreakPoint);
+bp = Debug.setBreakPoint(f, 0, 0);
+Debug.clearBreakPoint(bp);
+bp = Debug.setBreakPoint(f, 0, 0);
+Debug.clearBreakPoint(bp);
+%OptimizeFunctionOnNextCall(Debug.setBreakPoint);
+bp = Debug.setBreakPoint(f, 0, 0);
+Debug.clearBreakPoint(bp);
diff --git a/test/mjsunit/regress/regress-deopt-gc.js b/test/mjsunit/regress/regress-deopt-gc.js
index 7b7c29a..a74e2c5 100644
--- a/test/mjsunit/regress/regress-deopt-gc.js
+++ b/test/mjsunit/regress/regress-deopt-gc.js
@@ -42,7 +42,7 @@
   // Make sure we don't inline this function
   try { var a = 42; } catch(o) {};
   %DeoptimizeFunction(opt_me);
-  gc(true);
+  gc();
 }
 
 
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-inlining-function-literal-context.js
similarity index 77%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-inlining-function-literal-context.js
index d3f2e35..9b7f7ac 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-inlining-function-literal-context.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,29 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax --expose-gc
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+function mkbaz(x) {
+  function baz() {
+    return function () {
+      return [x];
+    }
+  }
+  return baz;
+}
 
-assertThrows('eval(delete eval)');
+var baz = mkbaz(1);
 
+function foo() {
+  var f = baz();
+  return f();
+}
+
+// Tenure.
+gc();
+gc();
+
+assertArrayEquals([1], foo());
+assertArrayEquals([1], foo());
+%OptimizeFunctionOnNextCall(foo);
+assertArrayEquals([1], foo());
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-smi-only-concat.js
similarity index 80%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-smi-only-concat.js
index d3f2e35..a9a6d89 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-smi-only-concat.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// This tests that concatenating a fast smi-only array and a fast object array
+// results in a fast object array.
 
-assertThrows('eval(delete eval)');
+var fast_array = ['a', 'b'];
+var array = fast_array.concat(fast_array);
 
+assertTrue(%HasFastElements(fast_array));
+assertTrue(%HasFastElements(array));
\ No newline at end of file
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-sqrt.js
similarity index 80%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-sqrt.js
index d3f2e35..f2a7e55 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-sqrt.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,23 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// Check that Math.sqrt returns the same value regardless of being
+// optimized or not.
 
-assertThrows('eval(delete eval)');
+function f(x) {
+  return Math.sqrt(x);
+}
 
+var x = 7.0506280066499245e-233;
+
+var a = f(x);
+
+f(0.1);
+f(0.2);
+%OptimizeFunctionOnNextCall(f);
+
+var b = f(x);
+
+assertEquals(a, b);
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/short-circuit.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/short-circuit.js
index d3f2e35..25363d6 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/short-circuit.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+var arr = [];
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+for (var i = 0; i < 28000; i++) {
+  arr.push(new RegExp("prefix" + i.toString() + i.toString() + i.toString()));
+}
diff --git a/test/mjsunit/stack-traces-2.js b/test/mjsunit/stack-traces-2.js
new file mode 100644
index 0000000..165c4df
--- /dev/null
+++ b/test/mjsunit/stack-traces-2.js
@@ -0,0 +1,87 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --builtins-in-stack-traces
+
+
+// Poisonous object that throws a reference error if attempted converted to
+// a primitive values.
+var thrower = { valueOf: function() { FAIL; },
+                toString: function() { FAIL; } };
+
+// Tests that a native constructor function is included in the
+// stack trace.
+function testTraceNativeConstructor(nativeFunc) {
+  var nativeFuncName = nativeFunc.name;
+  try {
+    new nativeFunc(thrower);
+    assertUnreachable(nativeFuncName);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(nativeFuncName) >= 0, nativeFuncName);
+  }
+}
+
+// Tests that a native conversion function is included in the
+// stack trace.
+function testTraceNativeConversion(nativeFunc) {
+  var nativeFuncName = nativeFunc.name;
+  try {
+    nativeFunc(thrower);
+    assertUnreachable(nativeFuncName);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(nativeFuncName) >= 0, nativeFuncName);
+  }
+}
+
+
+function testNotOmittedBuiltin(throwing, included) {
+  try {
+    throwing();
+    assertUnreachable(included);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(included) >= 0, included);
+  }
+}
+
+
+testTraceNativeConversion(String);  // Does ToString on argument.
+testTraceNativeConversion(Number);  // Does ToNumber on argument.
+testTraceNativeConversion(RegExp);  // Does ToString on argument.
+
+testTraceNativeConstructor(String);  // Does ToString on argument.
+testTraceNativeConstructor(Number);  // Does ToNumber on argument.
+testTraceNativeConstructor(RegExp);  // Does ToString on argument.
+testTraceNativeConstructor(Date);    // Does ToNumber on argument.
+
+// QuickSort has builtins object as receiver, and is non-native
+// builtin. Should not be omitted with the --builtins-in-stack-traces flag.
+testNotOmittedBuiltin(function(){ [thrower, 2].sort(function (a,b) {
+                                                     (b < a) - (a < b); });
+                      }, "QuickSort");
+
+// Not omitted even though ADD from runtime.js is a non-native builtin.
+testNotOmittedBuiltin(function(){ thrower + 2; }, "ADD");
\ No newline at end of file
diff --git a/test/mjsunit/stack-traces.js b/test/mjsunit/stack-traces.js
index 47a5cc5..536e71b 100644
--- a/test/mjsunit/stack-traces.js
+++ b/test/mjsunit/stack-traces.js
@@ -194,6 +194,46 @@
 }
 
 
+// Poisonous object that throws a reference error if attempted converted to
+// a primitive values.
+var thrower = { valueOf: function() { FAIL; },
+                toString: function() { FAIL; } };
+
+// Tests that a native constructor function is included in the
+// stack trace.
+function testTraceNativeConstructor(nativeFunc) {
+  var nativeFuncName = nativeFunc.name;
+  try {
+    new nativeFunc(thrower);
+    assertUnreachable(nativeFuncName);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(nativeFuncName) >= 0, nativeFuncName);
+  }
+}
+
+// Tests that a native conversion function is included in the
+// stack trace.
+function testTraceNativeConversion(nativeFunc) {
+  var nativeFuncName = nativeFunc.name;
+  try {
+    nativeFunc(thrower);
+    assertUnreachable(nativeFuncName);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(nativeFuncName) >= 0, nativeFuncName);
+  }
+}
+
+
+function testOmittedBuiltin(throwing, omitted) {
+  try {
+    throwing();
+    assertUnreachable(omitted);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(omitted) < 0, omitted);
+  }
+}
+
+
 testTrace("testArrayNative", testArrayNative, ["Array.map (native)"]);
 testTrace("testNested", testNested, ["at one", "at two", "at three"]);
 testTrace("testMethodNameInference", testMethodNameInference, ["at Foo.bar"]);
@@ -217,3 +257,21 @@
 testCallerCensorship();
 testUnintendedCallerCensorship();
 testErrorsDuringFormatting();
+
+testTraceNativeConversion(String);  // Does ToString on argument.
+testTraceNativeConversion(Number);  // Does ToNumber on argument.
+testTraceNativeConversion(RegExp);  // Does ToString on argument.
+
+testTraceNativeConstructor(String);  // Does ToString on argument.
+testTraceNativeConstructor(Number);  // Does ToNumber on argument.
+testTraceNativeConstructor(RegExp);  // Does ToString on argument.
+testTraceNativeConstructor(Date);    // Does ToNumber on argument.
+
+// Omitted because QuickSort has builtins object as receiver, and is non-native
+// builtin.
+testOmittedBuiltin(function(){ [thrower, 2].sort(function (a,b) {
+                                                     (b < a) - (a < b); });
+                   }, "QuickSort");
+
+// Omitted because ADD from runtime.js is non-native builtin.
+testOmittedBuiltin(function(){ thrower + 2; }, "ADD");
\ No newline at end of file
diff --git a/test/mjsunit/strict-mode-implicit-receiver.js b/test/mjsunit/strict-mode-implicit-receiver.js
index 338f6d1..8284edd 100644
--- a/test/mjsunit/strict-mode-implicit-receiver.js
+++ b/test/mjsunit/strict-mode-implicit-receiver.js
@@ -168,12 +168,7 @@
 outer_eval_conversion3(strict_eval, 'undefined');
 outer_eval_conversion3(non_strict_eval, 'object');
 
-// TODO(ager): I'm not sure this is in accordance with the spec. At
-// the moment, any call to eval where eval is not bound in the global
-// context is treated as an indirect call to eval which means that the
-// global context is used and the global object is passed as the
-// receiver.
-outer_eval_conversion3(eval, 'object');
+outer_eval_conversion3(eval, 'undefined');
 
 function test_constant_function() {
   var o = { f: function() { "use strict"; return this; } };
diff --git a/test/mjsunit/strict-mode.js b/test/mjsunit/strict-mode.js
index 30234ba..9c9bdfd 100644
--- a/test/mjsunit/strict-mode.js
+++ b/test/mjsunit/strict-mode.js
@@ -1051,14 +1051,20 @@
   }
   assertThrows(function() { strict.caller; }, TypeError);
   assertThrows(function() { strict.arguments; }, TypeError);
+  assertThrows(function() { strict.caller = 42; }, TypeError);
+  assertThrows(function() { strict.arguments = 42; }, TypeError);
 
   var another = new Function("'use strict'");
   assertThrows(function() { another.caller; }, TypeError);
   assertThrows(function() { another.arguments; }, TypeError);
+  assertThrows(function() { another.caller = 42; }, TypeError);
+  assertThrows(function() { another.arguments = 42; }, TypeError);
 
   var third = (function() { "use strict"; return function() {}; })();
   assertThrows(function() { third.caller; }, TypeError);
   assertThrows(function() { third.arguments; }, TypeError);
+  assertThrows(function() { third.caller = 42; }, TypeError);
+  assertThrows(function() { third.arguments = 42; }, TypeError);
 
   CheckPillDescriptor(strict, "caller");
   CheckPillDescriptor(strict, "arguments");
diff --git a/test/mjsunit/string-external-cached.js b/test/mjsunit/string-external-cached.js
new file mode 100644
index 0000000..6e24285
--- /dev/null
+++ b/test/mjsunit/string-external-cached.js
@@ -0,0 +1,121 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-externalize-string --expose-gc
+// Test data pointer caching of external strings.
+
+function test() {
+  // Test string.charAt.
+  var charat_str = new Array(5);
+  charat_str[0] = "0123456789ABCDEF0123456789ABCDEF\
+0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF\
+0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF\
+0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF\
+0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF";
+  charat_str[1] = "0123456789ABCDEF";
+  for (var i = 0; i < 6; i++) charat_str[1] += charat_str[1];
+  try {  // String can only be externalized once
+    externalizeString(charat_str[0], false);
+    externalizeString(charat_str[1], true);
+  } catch (ex) { }
+  charat_str[2] = charat_str[0].slice(0, -1);
+  charat_str[3] = charat_str[1].slice(0, -1);
+  charat_str[4] = charat_str[0] + charat_str[0];
+
+  for (var i = 0; i < 5; i++) {
+    assertEquals('B', charat_str[i].charAt(6*16 + 11));
+    assertEquals('C', charat_str[i].charAt(6*16 + 12));
+    assertEquals('A', charat_str[i].charAt(3*16 + 10));
+    assertEquals('B', charat_str[i].charAt(3*16 + 11));
+  }
+
+  charat_short = "012";
+  try {  // String can only be externalized once
+    externalizeString(charat_short, true);
+  } catch (ex) { }
+  assertEquals("1", charat_short.charAt(1));
+
+  // Test regexp and short substring.
+  var re = /(A|B)/;
+  var rere = /(T.{1,2}B)/;
+  var ascii = "ABCDEFGHIJKLMNOPQRST";
+  var twobyte = "_ABCDEFGHIJKLMNOPQRST";
+  try {
+    externalizeString(ascii, false);
+    externalizeString(twobyte, true);
+  } catch (ex) { }
+  assertTrue(isAsciiString(ascii));
+  assertFalse(isAsciiString(twobyte));
+  var ascii_slice = ascii.slice(1,-1);
+  var twobyte_slice = twobyte.slice(2,-1);
+  var ascii_cons = ascii + ascii;
+  var twobyte_cons = twobyte + twobyte;
+  for (var i = 0; i < 2; i++) {
+    assertEquals(["A", "A"], re.exec(ascii));
+    assertEquals(["B", "B"], re.exec(ascii_slice));
+    assertEquals(["TAB", "TAB"], rere.exec(ascii_cons));
+    assertEquals(["A", "A"], re.exec(twobyte));
+    assertEquals(["B", "B"], re.exec(twobyte_slice));
+    assertEquals(["T_AB", "T_AB"], rere.exec(twobyte_cons));
+    assertEquals("DEFG", ascii_slice.substr(2, 4));
+    assertEquals("DEFG", twobyte_slice.substr(2, 4));
+    assertEquals("DEFG", ascii_cons.substr(3, 4));
+    assertEquals("DEFG", twobyte_cons.substr(4, 4));
+  }
+
+  // Test adding external strings
+  var short_ascii = "E=";
+  var long_ascii = "MCsquared";
+  var short_twobyte = "E\u1234";
+  var long_twobyte = "MCsquare\u1234";
+  try {  // String can only be externalized once
+    externalizeString(short_ascii, false);
+    externalizeString(long_ascii, false);
+    externalizeString(short_twobyte, true);
+    externalizeString(long_twobyte, true);
+    assertTrue(isAsciiString(short_asii) && isAsciiString(long_ascii));
+    assertFalse(isAsciiString(short_twobyte) || isAsciiString(long_twobyte));
+  } catch (ex) { }
+  assertEquals("E=MCsquared", short_ascii + long_ascii);
+  assertTrue(isAsciiString(short_ascii + long_ascii));
+  assertEquals("MCsquaredE=", long_ascii + short_ascii);
+  assertEquals("E\u1234MCsquare\u1234", short_twobyte + long_twobyte);
+  assertFalse(isAsciiString(short_twobyte + long_twobyte));
+  assertEquals("E=MCsquared", "E=" + long_ascii);
+  assertEquals("E\u1234MCsquared", short_twobyte + "MCsquared");
+  assertEquals("E\u1234MCsquared", short_twobyte + long_ascii);
+  assertFalse(isAsciiString(short_twobyte + long_ascii));
+}
+
+// Run the test many times to ensure IC-s don't break things.
+for (var i = 0; i < 10; i++) {
+  test();
+}
+
+// Clean up string to make Valgrind happy.
+gc();
+gc();
diff --git a/test/mjsunit/string-externalize.js b/test/mjsunit/string-externalize.js
index da89786..d52a7e2 100644
--- a/test/mjsunit/string-externalize.js
+++ b/test/mjsunit/string-externalize.js
@@ -44,7 +44,7 @@
   assertFalse(isAsciiString(twoByteExternalWithAsciiData));
 
   var realTwoByteExternalString =
-      "\u1234\u1234" + (function() { return "\u1234"; })();
+      "\u1234\u1234\u1234\u1234" + (function() { return "\u1234"; })();
   externalizeString(realTwoByteExternalString);
   assertFalse(isAsciiString(realTwoByteExternalString));
 
diff --git a/test/mjsunit/string-replace-one-char.js b/test/mjsunit/string-replace-one-char.js
new file mode 100644
index 0000000..cb4167b
--- /dev/null
+++ b/test/mjsunit/string-replace-one-char.js
@@ -0,0 +1,92 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Make sure the strings are long enough to trigger the one-char string replace.
+var prefix1024 = "0123456789ABCDEF";
+for (var i = 0; i < 6; i++) prefix1024 += prefix1024;
+
+function test_replace(result, expected, search, replace) {
+  assertEquals(expected, result.replace(search, replace));
+}
+
+// '$' in the replace string.
+test_replace(prefix1024 + "abcdefghijklmnopqrstuvwxyz",
+             prefix1024 + "abcdefghijk#l#mnopqrstuvwxyz",
+             "l", "#$&#");
+
+test_replace(prefix1024 + "abcdefghijklmnopqrstuvwxyz\u1234",
+             prefix1024 + "abcdefghijk\u2012l\u2012mnopqrstuvwxyz\u1234",
+             "l", "\u2012$&\u2012");
+
+test_replace(prefix1024 + "abcdefghijklmnopqrstuvwxyz",
+             prefix1024 + "abcdefghijk$mnopqrstuvwxyz",
+             "l", "$$");
+
+test_replace(prefix1024 + "abcdefghijklmnopqrstuvwxyz\u1234",
+             prefix1024 + "abcdefghijk$mnopqrstuvwxyz\u1234",
+             "l", "$$");
+
+// Zero length replace string.
+test_replace(prefix1024 + "abcdefghijklmnopqrstuvwxyz",
+             prefix1024 + "abcdefghijklmnopqstuvwxyz",
+             "r", "");
+
+test_replace(prefix1024 + "abcdefghijklmnopq\u1234stuvwxyz",
+             prefix1024 + "abcdefghijklmnopqstuvwxyz",
+             "\u1234", "");
+
+// Search char not found.
+var not_found_1 = prefix1024 + "abcdefghijklmnopqrstuvwxyz";
+test_replace(not_found_1, not_found_1, "+", "-");
+
+var not_found_2 = prefix1024 + "abcdefghijklm\u1234nopqrstuvwxyz";
+test_replace(not_found_2, not_found_2, "+", "---");
+
+var not_found_3 = prefix1024 + "abcdefghijklmnopqrstuvwxyz";
+test_replace(not_found_3, not_found_3, "\u1234", "ZZZ");
+
+// Deep cons tree.
+var nested_1 = "";
+for (var i = 0; i < 100000; i++) nested_1 += "y";
+var nested_1_result = prefix1024 + nested_1 + "aa";
+nested_1 = prefix1024 + nested_1 + "z";
+test_replace(nested_1, nested_1_result, "z", "aa");
+
+var nested_2 = "\u2244";
+for (var i = 0; i < 100000; i++) nested_2 += "y";
+var nested_2_result = prefix1024 + nested_2 + "aa";
+nested_2 = prefix1024 + nested_2 + "\u2012";
+test_replace(nested_2, nested_2_result, "\u2012", "aa");
+
+// Sliced string as input.  A cons string is always flattened before sliced.
+var slice_1 = ("ab" + prefix1024 + "cdefghijklmnopqrstuvwxyz").slice(1, -1);
+var slice_1_result = "b" + prefix1024 + "cdefghijklmnopqrstuvwxQ";
+test_replace(slice_1, slice_1_result, "y", "Q");
+
+var slice_2 = (prefix1024 + "abcdefghijklmno\u1234\u1234p").slice(1, -1);
+var slice_2_result = prefix1024.substr(1) + "abcdefghijklmnoQ\u1234";
+test_replace(slice_2, slice_2_result, "\u1234", "Q");
diff --git a/test/mjsunit/string-slices-regexp.js b/test/mjsunit/string-slices-regexp.js
index a8cadae..98b8ef9 100644
--- a/test/mjsunit/string-slices-regexp.js
+++ b/test/mjsunit/string-slices-regexp.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -24,11 +24,6 @@
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Flags: --string-slices
-
-//assertEquals('345"12345 6"1234567"123',
-//             '12345""12345 6""1234567""1234'.slice(2,-1).replace(/""/g, '"'));
 
 var foo = "lsdfj sldkfj sdklfj læsdfjl sdkfjlsdk fjsdl fjsdljskdj flsj flsdkj flskd regexp: /foobar/\nldkfj sdlkfj sdkl";
 for(var i = 0; i < 1000; i++) {
diff --git a/test/mjsunit/string-slices.js b/test/mjsunit/string-slices.js
index 8cc1f81..5b1dc36 100755
--- a/test/mjsunit/string-slices.js
+++ b/test/mjsunit/string-slices.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --string-slices --expose-externalize-string
+// Flags: --expose-externalize-string --allow-natives-syntax
 
 var s = 'abcdefghijklmn';
 assertEquals(s, s.substr());
@@ -100,14 +100,7 @@
 
 // Keep creating strings to to force allocation failure on substring creation.
 var x = "0123456789ABCDEF";
-x += x;  // 2^5
-x += x;
-x += x;
-x += x;
-x += x;
-x += x;  // 2^10
-x += x;
-x += x;
+for (var i = 0; i < 8; i++) x += x;
 var xl = x.length;
 var cache = [];
 for (var i = 0; i < 1000; i++) {
@@ -119,14 +112,7 @@
 
 // Same with two-byte strings
 var x = "\u2028123456789ABCDEF";
-x += x;  // 2^5
-x += x;
-x += x;
-x += x;
-x += x;
-x += x;  // 2^10
-x += x;
-x += x;
+for (var i = 0; i < 8; i++) x += x;
 var xl = x.length;
 var cache = [];
 for (var i = 0; i < 1000; i++) {
@@ -174,6 +160,23 @@
   f(flat, cons, slice, i);
 }
 
+// Short substrings.
+flat = "abcdefghijkl12345";
+cons = flat + flat.toUpperCase();
+/x/.exec(cons);  // Flatten cons
+slice = "abcdefghijklmn12345".slice(1, -1);
+assertEquals("cdefg", flat.substr(2, 5));
+assertEquals("cdefg", cons.substr(2, 5));
+assertEquals("cdefg", slice.substr(1, 5));
+
+flat = "abc\u1234defghijkl12345";
+cons = flat + flat.toUpperCase();
+/x/.exec(cons);  // Flatten cons
+slice = "abc\u1234defghijklmn12345".slice(1, -1);
+assertEquals("c\u1234def", flat.substr(2, 5));
+assertEquals("c\u1234def", cons.substr(2, 5));
+assertEquals("c\u1234def", slice.substr(1, 5));
+
 // Concatenate substrings.
 var ascii = 'abcdefghijklmnop';
 var utf = '\u03B1\u03B2\u03B3\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9\u03BA\u03BB';
@@ -189,11 +192,34 @@
 assertEquals("\u03B2\u03B3\u03B4\u03B5\u03B4\u03B5\u03B6\u03B7",
     utf.substring(5,1) + utf.substring(3,7));
 
-/*
 // Externalizing strings.
-var a = "123456789qwertyuiopasdfghjklzxcvbnm";
-var b = a.slice(1,-1);
+var a = "123456789" + "qwertyuiopasdfghjklzxcvbnm";
+var b = "23456789qwertyuiopasdfghjklzxcvbn"
 assertEquals(a.slice(1,-1), b);
-externalizeString(a);
+
+assertTrue(isAsciiString(a));
+externalizeString(a, true);
+assertFalse(isAsciiString(a));
+
 assertEquals(a.slice(1,-1), b);
-*/
+assertTrue(/3456789qwe/.test(a));
+assertEquals(5, a.indexOf("678"));
+assertEquals("12345", a.split("6")[0]);
+
+// Create a slice with an external string as parent string.
+var c = a.slice(1,-1);
+
+function test_crankshaft() {
+  for (var i = 0; i < 20; i++) {
+    assertEquals(b.charAt(i), a.charAt(i + 1));
+    assertEquals(b.charAt(i), c.charAt(i));
+    assertEquals(b.charAt(4), c.charAt(4));
+    assertTrue(/3456789qwe/.test(c));
+    assertEquals(4, c.indexOf("678"));
+    assertEquals("2345", c.split("6")[0]);
+  }
+}
+
+test_crankshaft();
+%OptimizeFunctionOnNextCall(test_crankshaft);
+test_crankshaft();
\ No newline at end of file
diff --git a/test/mjsunit/switch.js b/test/mjsunit/switch.js
index 180f994..6a61fe5 100644
--- a/test/mjsunit/switch.js
+++ b/test/mjsunit/switch.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 function f0() {
   switch (0) {
     // switch deliberately left empty
@@ -126,6 +128,42 @@
 assertEquals(3, f4(2), "fallthrough-switch.2");
 assertEquals(5, f4(3), "fallthrough-switch.3");
 
+function f4_string(tag, x) {
+  switch(tag) {
+    case 'zero':
+      x++;
+    case 'two':
+      x++;
+  }
+  return x;
+}
+
+// Symbols
+assertEquals(2, f4_string('zero', 0), "fallthrough-string-switch.0");
+assertEquals(1, f4_string('one', 1), "fallthrough-string-switch.1");
+assertEquals(3, f4_string('two', 2), "fallthrough-string-switch.2");
+
+// Strings
+assertEquals(2, f4_string('_zero'.slice(1), 0), "fallthrough-string-switch.3");
+assertEquals(1, f4_string('_one'.slice(1), 1), "fallthrough-string-switch.4");
+assertEquals(3, f4_string('_two'.slice(1), 2), "fallthrough-string-switch.5");
+
+// Oddball
+assertEquals(3, f4_string(null, 3), "fallthrough-string-switch.6");
+
+// Test for regression
+function regress_string(value) {
+  var json = 1;
+  switch (typeof value) {
+    case 'object':
+      break;
+
+    default:
+
+  }
+  return json;
+};
+assertEquals(1, regress_string('object'), 'regression-string');
 
 function f5(x) {
   switch(x) {
@@ -287,3 +325,138 @@
 var verylong = makeVeryLong(verylong_size);
 
 assertEquals(verylong_size * 2 + 1, verylong());
+
+//
+// Test suite below aims to cover all possible combinations of following:
+//
+//  clauses  |   tags   |   type feedback   |  optimization
+// =========================================================
+//  strings  |  symbol  |     all           |      on
+//  smis     |  string  |     target        |      off
+//  mixed    |  oddball |     non-target    |
+//           |  smis    |     none          |
+//           |  heapnum |                   |
+// =========================================================
+
+// Function-with-switch generator
+var test_id = 0,
+    clause_values = {
+      string: ['abc', 'def', 'ghi', 'jkl'],
+      smi: [1, 2, 3, 4],
+      mixed: ['abc', 1, 'def', 2, 'ghi', 3, 'jkl', 4]
+    };
+
+function switch_gen(clause_type, feedback, optimize) {
+  var values = clause_values[clause_type];
+
+  function opt(fn) {
+    if (feedback === 'all') {
+      values.forEach(fn);
+    } else if (Array.isArray(feedback)) {
+      // Non-target
+      values.filter(function(v) {
+        return feedback.indexOf(v) === -1;
+      }).forEach(fn);
+    } else if (feedback !== undefined) {
+      // Target
+      fn(feedback);
+    } else {
+      // None
+    }
+
+    if (optimize) %OptimizeFunctionOnNextCall(fn);
+
+    return fn;
+  };
+
+  return opt(new Function(
+      'tag',
+      '"' + (test_id++) + '";' +
+      'switch(tag) {' +
+      values.map(function(value) {
+        return 'case ' + JSON.stringify(value) + ': return' +
+               JSON.stringify('ok-' + value);
+      }).join(';') +
+      '}'
+  ));
+};
+
+function test_switch(clause_type, test_type, feedback, optimize) {
+  var pairs = [],
+      fn = switch_gen(clause_type, feedback, optimize);
+
+  if (Array.isArray(test_type)) {
+    pairs = test_type.map(function(v) {
+      return {
+        value: v,
+        expected: 'ok-' + v
+      };
+    });
+  } else if (test_type === 'symbols') {
+    pairs = clause_values.string.map(function(v) {
+      return {
+        value: v,
+        expected: clause_type !== 'smi' ? 'ok-' + v : undefined
+      };
+    });
+  } else if (test_type === 'strings') {
+    pairs = clause_values.string.map(function(v) {
+      return {
+        value: ('%%' + v).slice(2),
+        expected: clause_type !== 'smi' ? 'ok-' + v : undefined
+      };
+    });
+  } else if (test_type === 'oddball') {
+    pairs = [
+      { value: null, expected: undefined },
+      { value: NaN, expected: undefined },
+      { value: undefined, expected: undefined }
+    ];
+  } else if (test_type === 'smi') {
+    pairs = clause_values.smi.map(function(v) {
+      return {
+        value: v,
+        expected: clause_type !== 'string' ? 'ok-' + v : undefined
+      };
+    });
+  } else if (test_type === 'heapnum') {
+    pairs = clause_values.smi.map(function(v) {
+      return {
+        value: ((v * 17)/16) - ((v*17)%16/16),
+        expected: clause_type !== 'string' ? 'ok-' + v : undefined
+      };
+    });
+  }
+
+  pairs.forEach(function(pair) {
+    assertEquals(fn(pair.value), pair.expected);
+  });
+};
+
+// test_switch(clause_type, test_type, feedback, optimize);
+
+function test_switches(opt) {
+  var test_types = ['symbols', 'strings', 'oddball', 'smi', 'heapnum'];
+
+  function test(clause_type) {
+    var values = clause_values[clause_type];
+
+    test_types.forEach(function(test_type) {
+      test_switch(clause_type, test_type, 'all', opt);
+      test_switch(clause_type, test_type, 'none', opt);
+
+      // Targeting specific clause feedback
+      values.forEach(function(value) {
+        test_switch(clause_type, test_type, [value], value, opt);
+        test_switch(clause_type, test_type, value, value, opt);
+      });
+    });
+  };
+
+  test('string');
+  test('smi');
+  test('mixed');
+};
+
+test_switches(false);
+test_switches(true);
diff --git a/test/mjsunit/to_number_order.js b/test/mjsunit/to_number_order.js
index d17e600..50e4bc7 100644
--- a/test/mjsunit/to_number_order.js
+++ b/test/mjsunit/to_number_order.js
@@ -161,7 +161,7 @@
 
 x = "";
 assertFalse(a > b, "Compare objects a > b");
-assertEquals("fiskhest", x, "Compare objects a > b valueOf order");
+assertEquals("hestfisk", x, "Compare objects a > b valueOf order");
 
 x = "";
 assertFalse(a > void(0), "Compare objects a > undefined");
@@ -195,7 +195,7 @@
 
   x = "";
   assertFalse(a > b, "Compare objects a > b");
-  assertEquals("fiskhest", x, "Compare objects a > b valueOf order");
+  assertEquals("hestfisk", x, "Compare objects a > b valueOf order");
 
   x = "";
   assertFalse(a > void(0), "Compare objects a > undefined");
diff --git a/test/mjsunit/tools/tickprocessor.js b/test/mjsunit/tools/tickprocessor.js
index 30b0ec2..c48d9f3 100644
--- a/test/mjsunit/tools/tickprocessor.js
+++ b/test/mjsunit/tools/tickprocessor.js
@@ -376,8 +376,11 @@
   }
   assertTrue(pathLen != -1);
   var testsPath = TEST_FILE_NAME.substr(0, pathLen + 1);
-  var tp = new TickProcessor(
-      new CppEntriesProviderMock(), separateIc, ignoreUnknown, stateFilter);
+  var tp = new TickProcessor(new CppEntriesProviderMock(),
+                             separateIc,
+                             TickProcessor.CALL_GRAPH_SIZE,
+                             ignoreUnknown,
+                             stateFilter);
   var pm = new PrintMonitor(testsPath + refOutput);
   tp.processLogFileInTest(testsPath + logInput);
   tp.printStatistics();
diff --git a/test/mjsunit/unbox-double-arrays.js b/test/mjsunit/unbox-double-arrays.js
index feecaec..fd7db28 100644
--- a/test/mjsunit/unbox-double-arrays.js
+++ b/test/mjsunit/unbox-double-arrays.js
@@ -77,8 +77,6 @@
     assertEquals(value_6, a[6]);
     assertEquals(value_6, a[computed_6()]); // Test non-constant key
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
@@ -89,8 +87,6 @@
     assertEquals(value_6, a[6]);
     assertEquals(value_6, a[computed_6()]); // Test non-constant key
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
@@ -101,8 +97,6 @@
     assertEquals(value_6, a[6]);
     assertEquals(value_6, a[computed_6()]); // Test non-constant key
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
@@ -113,20 +107,20 @@
     assertEquals(value_6, a[6]);
     assertEquals(value_6, a[computed_6()]); // Test non-constant key
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
 
   function test_various_loads5(a, value_5, value_6, value_7) {
     assertTrue(%HasFastDoubleElements(a));
-    assertEquals(value_5, a[5]);
-    assertEquals(value_6, a[6]);
-    assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    if (value_5 != undefined) {
+      assertEquals(value_5, a[5]);
+    };
+    if (value_6 != undefined) {
+      assertEquals(value_6, a[6]);
+      assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    }
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
@@ -137,8 +131,16 @@
     assertEquals(value_6, a[6]);
     assertEquals(value_6, a[computed_6()]); // Test non-constant key
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
+    assertEquals(large_array_size, a.length);
+    assertTrue(%HasFastDoubleElements(a));
+  }
+
+  function test_various_loads7(a, value_5, value_6, value_7) {
+    assertTrue(%HasFastDoubleElements(a));
+    assertEquals(value_5, a[5]);
+    assertEquals(value_6, a[6]);
+    assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    assertEquals(value_7, a[7]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
@@ -248,6 +250,8 @@
                       expected_array_value(7));
 
   // Make sure Crankshaft code handles the hole correctly (bailout)
+  var large_array = new allocator(large_array_size);
+  force_to_fast_double_array(large_array);
   test_various_stores(large_array,
                       expected_array_value(5),
                       expected_array_value(6),
@@ -273,7 +277,12 @@
                       undefined,
                       expected_array_value(7));
 
+  %DeoptimizeFunction(test_various_loads6);
+  gc();
+
   // Test stores for non-NaN.
+  var large_array = new allocator(large_array_size);
+  force_to_fast_double_array(large_array);
   %OptimizeFunctionOnNextCall(test_various_stores);
   test_various_stores(large_array,
                       expected_array_value(5),
@@ -285,7 +294,19 @@
                       expected_array_value(6),
                       expected_array_value(7));
 
-  test_various_loads6(large_array,
+  test_various_loads7(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+
+  test_various_loads7(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+
+  %OptimizeFunctionOnNextCall(test_various_loads7);
+
+  test_various_loads7(large_array,
                       expected_array_value(5),
                       expected_array_value(6),
                       expected_array_value(7));
@@ -301,7 +322,7 @@
                       -NaN,
                       expected_array_value(7));
 
-  test_various_loads6(large_array,
+  test_various_loads7(large_array,
                       NaN,
                       -NaN,
                       expected_array_value(7));
@@ -317,7 +338,7 @@
                       -Infinity,
                       expected_array_value(7));
 
-  test_various_loads6(large_array,
+  test_various_loads7(large_array,
                       Infinity,
                       -Infinity,
                       expected_array_value(7));
@@ -434,7 +455,6 @@
 large_array3[4] = -Infinity;
 
 function call_apply() {
-  assertTrue(%HasFastDoubleElements(large_array3));
   called_by_apply.apply({}, large_array3);
 }
 
@@ -449,7 +469,6 @@
 function test_for_in() {
   // Due to previous tests, keys 0..25 and 95 should be present.
   next_expected = 0;
-  assertTrue(%HasFastDoubleElements(large_array3));
   for (x in large_array3) {
     assertTrue(next_expected++ == x);
     if (next_expected == 25) {
diff --git a/test/mjsunit/undeletable-functions.js b/test/mjsunit/undeletable-functions.js
index 04fd060..635ea6f 100644
--- a/test/mjsunit/undeletable-functions.js
+++ b/test/mjsunit/undeletable-functions.js
@@ -25,11 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that we match JSC in making some functions undeletable.
-// See http://code.google.com/p/chromium/issues/detail?id=1717
-// The functions on these prototypes are not just undeletable.  It is
-// possible to override them with new definitions, then get the old
-// version back by deleting the new definition.
+// Test that we match ECMAScript in making most builtin functions
+// deletable and only specific ones undeletable or read-only.
 
 var array;
 
@@ -37,7 +34,7 @@
   "toString", "toLocaleString", "join", "pop", "push", "concat", "reverse",
   "shift", "unshift", "slice", "splice", "sort", "filter", "forEach", "some",
   "every", "map", "indexOf", "lastIndexOf", "reduce", "reduceRight"];
-CheckJSCSemantics(Array.prototype, array, "Array prototype");
+CheckEcmaSemantics(Array.prototype, array, "Array prototype");
 
 var old_Array_prototype = Array.prototype;
 var new_Array_prototype = {};
@@ -57,12 +54,12 @@
   "setUTCMinutes", "setHours", "setUTCHours", "setDate", "setUTCDate",
   "setMonth", "setUTCMonth", "setFullYear", "setUTCFullYear", "toGMTString",
   "toUTCString", "getYear", "setYear", "toISOString", "toJSON"];
-CheckJSCSemantics(Date.prototype, array, "Date prototype");
+CheckEcmaSemantics(Date.prototype, array, "Date prototype");
 
 array = [
   "random", "abs", "acos", "asin", "atan", "ceil", "cos", "exp", "floor", "log",
   "round", "sin", "sqrt", "tan", "atan2", "pow", "max", "min"];
-CheckJSCSemantics(Math, array, "Math1");
+CheckEcmaSemantics(Math, array, "Math1");
 
 CheckEcmaSemantics(Date, ["UTC", "parse", "now"], "Date");
 
@@ -76,6 +73,8 @@
   "execScript"];
 CheckEcmaSemantics(this, array, "Global");
 CheckReadOnlyAttr(this, "Infinity");
+CheckReadOnlyAttr(this, "NaN");
+CheckReadOnlyAttr(this, "undefined");
 
 array = ["exec", "test", "toString", "compile"];
 CheckEcmaSemantics(RegExp.prototype, array, "RegExp prototype");
@@ -112,7 +111,7 @@
   "toUpperCase", "toLocaleUpperCase", "link", "anchor", "fontcolor", "fontsize",
   "big", "blink", "bold", "fixed", "italics", "small", "strike", "sub", "sup",
   "toJSON", "toString", "valueOf"];
-CheckJSCSemantics(String.prototype, array, "String prototype");
+CheckEcmaSemantics(String.prototype, array, "String prototype");
 CheckEcmaSemantics(String, ["fromCharCode"], "String");
 
 
@@ -124,14 +123,6 @@
 }
 
 
-function CheckJSCSemantics(type, props, name) {
-  print(name);
-  for (var i = 0; i < props.length; i++) {
-    CheckNotDeletable(type, props[i]);
-  }
-}
-
-
 function CheckDontDelete(type, props, name) {
   print(name);
   for (var i = 0; i < props.length; i++) {
@@ -154,21 +145,6 @@
 }
 
 
-function CheckNotDeletable(type, prop) {
-  var old = type[prop];
-  if (!type[prop]) return;
-  assertTrue(type.hasOwnProperty(prop), "inherited: " + prop);
-  var deleted = delete type[prop];
-  assertTrue(deleted, "delete operator returned false: " + prop);
-  assertTrue(type.hasOwnProperty(prop), "not there after delete: " + prop);
-  type[prop] = "foo";
-  assertEquals("foo", type[prop], "not overwritable: " + prop);
-  deleted = delete type[prop];
-  assertTrue(deleted, "delete operator returned false 2nd time: " + prop);
-  assertEquals(old.toString(), type[prop].toString(), "delete didn't restore the old value: " + prop);
-}
-
-
 function CheckDontDeleteAttr(type, prop) {
   var old = type[prop];
   if (!type[prop]) return;
@@ -189,7 +165,7 @@
   assertFalse(deleted, "delete operator returned true: " + prop);
   assertTrue(type.hasOwnProperty(prop), "not there after delete: " + prop);
   type[prop] = "foo";
-  assertEquals("foo", type[prop], "overwritable: " + prop);
+  assertEquals(old, type[prop], "overwritable: " + prop);
 }
 
 print("OK");
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 3a27130..e64959a 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -1,4 +1,4 @@
-# Copyright 2009 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -69,6 +69,8 @@
 ecma_3/Date/15.9.3.2-1: SKIP
 js1_2/function/Number: SKIP
 
+# TODO(2018): Temporarily allow timeout in debug mode.
+js1_5/GC/regress-203278-2: PASS || TIMEOUT if $mode == debug
 
 ##################### SLOW TESTS #####################
 
@@ -123,14 +125,14 @@
 ecma/Date/15.9.2.2-5: PASS || FAIL
 ecma/Date/15.9.2.2-6: PASS || FAIL
 
-# 1026139: These date tests fail on arm
-ecma/Date/15.9.5.29-1: PASS || ($ARM && FAIL)
-ecma/Date/15.9.5.34-1: PASS || ($ARM && FAIL)
-ecma/Date/15.9.5.28-1: PASS || ($ARM && FAIL)
+# 1026139: These date tests fail on arm and mips
+ecma/Date/15.9.5.29-1: PASS || (($ARM || $MIPS) && FAIL)
+ecma/Date/15.9.5.34-1: PASS || (($ARM || $MIPS) && FAIL)
+ecma/Date/15.9.5.28-1: PASS || (($ARM || $MIPS) && FAIL)
 
-# 1050186: Arm vm is broken; probably unrelated to dates
-ecma/Array/15.4.4.5-3: PASS || ($ARM && FAIL)
-ecma/Date/15.9.5.22-2: PASS || ($ARM && FAIL)
+# 1050186: Arm/MIPS vm is broken; probably unrelated to dates
+ecma/Array/15.4.4.5-3: PASS || (($ARM || $MIPS) && FAIL)
+ecma/Date/15.9.5.22-2: PASS || (($ARM || $MIPS) && FAIL)
 
 # Flaky test that fails due to what appears to be a bug in the test.
 # Occurs depending on current time
@@ -214,6 +216,10 @@
 # builtin to have undefined as the receiver.
 ecma/String/15.5.4.6-2: FAIL_OK
 
+# Fail because it expects String.prototype.split to distinguish whether
+# separator was undefined or not passed at all.
+ecma/String/15.5.4.8-2: FAIL_OK
+
 # Fail because of toLowerCase and toUpperCase conversion.
 ecma/String/15.5.4.11-2: FAIL_OK
 ecma/String/15.5.4.11-5: FAIL_OK
@@ -227,7 +233,7 @@
 ecma/String/15.5.4.12-5: FAIL_OK
 
 # Creates a linked list of arrays until we run out of memory or timeout.
-js1_5/Regress/regress-312588: FAIL || TIMEOUT
+js1_5/Regress/regress-312588: SKIP
 
 
 # Runs out of memory because it compiles huge functions.
@@ -301,6 +307,11 @@
 js1_2/regexp/beginLine: FAIL_OK
 js1_2/regexp/endLine: FAIL_OK
 
+# We no longer let calls to test and exec with no argument implicitly
+# use the previous input.
+js1_2/regexp/RegExp_input: FAIL_OK
+js1_2/regexp/RegExp_input_as_array: FAIL_OK
+
 
 # To be compatible with safari typeof a regexp yields 'function';
 # in firefox it yields 'object'.
@@ -411,12 +422,6 @@
 js1_5/extensions/regress-455413: FAIL_OK
 
 
-# The spec specifies reverse evaluation order for < and >=.
-# See section 11.8.2 and 11.8.5.
-# We implement the spec here but the test tests the more straigtforward order.
-ecma_3/Operators/order-01: FAIL_OK
-
-
 # Uses Mozilla-specific QName, XML, XMLList and Iterator.
 js1_5/Regress/regress-407323: FAIL_OK
 js1_5/Regress/regress-407957: FAIL_OK
@@ -619,6 +624,10 @@
 # We do not correctly handle assignments within "with"
 /ecma_3/Statements/12.10-01: FAIL
 
+# We do not throw an exception when a const is redeclared.
+# (We only fail section 1 of the test.)
+js1_5/Regress/regress-103602: FAIL
+
 ##################### MOZILLA EXTENSION TESTS #####################
 
 ecma/extensions/15.1.2.1-1: FAIL_OK
@@ -861,3 +870,59 @@
 ecma/Expressions/11.10-3: SKIP
 ecma/Expressions/11.7.1: SKIP
 ecma_3/RegExp/regress-209067: SKIP
+
+[ $arch == mips ]
+
+# Times out and print so much output that we need to skip it to not
+# hang the builder.
+js1_5/extensions/regress-342960: SKIP
+
+# BUG(3251229): Times out when running new crankshaft test script.
+ecma_3/RegExp/regress-311414: SKIP
+ecma/Date/15.9.5.8: SKIP
+ecma/Date/15.9.5.10-2: SKIP
+ecma/Date/15.9.5.11-2: SKIP
+ecma/Date/15.9.5.12-2: SKIP
+js1_5/Array/regress-99120-02: SKIP
+js1_5/extensions/regress-371636: SKIP
+js1_5/Regress/regress-203278-1: SKIP
+js1_5/Regress/regress-404755:  SKIP
+js1_5/Regress/regress-451322: SKIP
+
+
+# BUG(1040): Allow this test to timeout.
+js1_5/GC/regress-203278-2: PASS || TIMEOUT
+
+
+[ $fast == yes && $arch == mips ]
+
+# In fast mode on mips we try to skip all tests that would time out,
+# since running the tests takes so long in the first place.
+
+js1_5/Regress/regress-280769-2: SKIP
+js1_5/Regress/regress-280769-3: SKIP
+js1_5/Regress/regress-244470: SKIP
+js1_5/Regress/regress-203278-1: SKIP
+js1_5/Regress/regress-290575: SKIP
+js1_5/Regress/regress-159334: SKIP
+js1_5/Regress/regress-321971: SKIP
+js1_5/Regress/regress-347306-01: SKIP
+js1_5/Regress/regress-280769-1: SKIP
+js1_5/Regress/regress-280769-5: SKIP
+js1_5/GC/regress-306788: SKIP
+js1_5/GC/regress-278725: SKIP
+js1_5/GC/regress-203278-3: SKIP
+js1_5/GC/regress-311497: SKIP
+js1_5/Array/regress-99120-02: SKIP
+ecma/Date/15.9.5.22-1: SKIP
+ecma/Date/15.9.5.20: SKIP
+ecma/Date/15.9.5.12-2: SKIP
+ecma/Date/15.9.5.8: SKIP
+ecma/Date/15.9.5.9: SKIP
+ecma/Date/15.9.5.11-2: SKIP
+ecma/Expressions/11.7.2: SKIP
+ecma/Expressions/11.10-2: SKIP
+ecma/Expressions/11.7.3: SKIP
+ecma/Expressions/11.10-3: SKIP
+ecma/Expressions/11.7.1: SKIP
+ecma_3/RegExp/regress-209067: SKIP
diff --git a/test/preparser/preparser.status b/test/preparser/preparser.status
index db17778..6f15fed 100644
--- a/test/preparser/preparser.status
+++ b/test/preparser/preparser.status
@@ -31,9 +31,3 @@
 # escapes (we need to parse to distinguish octal escapes from valid
 # back-references).
 strict-octal-regexp: FAIL
-
-##############################################################################
-[ $arch == mips ]
-
-# Skip all tests on MIPS.
-*: SKIP
diff --git a/test/preparser/strict-identifiers.pyt b/test/preparser/strict-identifiers.pyt
index 72808e2..aa3d521 100644
--- a/test/preparser/strict-identifiers.pyt
+++ b/test/preparser/strict-identifiers.pyt
@@ -138,6 +138,38 @@
   var x = {set foo($id) { }};
 """)
 
+label_normal = Template("label-normal-$id", """
+  $id: '';
+""")
+
+label_strict = StrictTemplate("label-strict-$id", """
+  $id: '';
+""")
+
+break_normal = Template("break-normal-$id", """
+  for (;;) {
+    break $id;
+  }
+""")
+
+break_strict = StrictTemplate("break-strict-$id", """
+  for (;;) {
+    break $id;
+  }
+""")
+
+continue_normal = Template("continue-normal-$id", """
+  for (;;) {
+    continue $id;
+  }
+""")
+
+continue_strict = StrictTemplate("continue-strict-$id", """
+  for (;;) {
+    continue $id;
+  }
+""")
+
 non_strict_use = Template("nonstrict-$id", """
   var $id = 42;
   $id++;
@@ -162,6 +194,7 @@
   function $id($id) { }
   x = {$id: 42};
   x = {get $id() {}, set $id(value) {}};
+  $id: '';
 """)
 
 identifier_name_source = """
@@ -197,6 +230,12 @@
   prefix_var({"id": id, "op":"--", "opname":"dec"}, "strict_lhs_prefix")
   postfix_var({"id": id, "op":"++", "opname":"inc"}, "strict_lhs_postfix")
   postfix_var({"id": id, "op":"--", "opname":"dec"}, "strict_lhs_postfix")
+  label_normal({"id": id}, None)
+  label_strict({"id": id}, None)
+  break_normal({"id": id}, None)
+  break_strict({"id": id}, None)
+  continue_normal({"id": id}, None)
+  continue_strict({"id": id}, None)
   non_strict_use({"id": id}, None)
 
 
@@ -205,10 +244,13 @@
 for reserved_word in reserved_words + strict_reserved_words:
   if (reserved_word in strict_reserved_words):
     message = "strict_reserved_word"
+    label_message = None
   elif (reserved_word == "const"):
     message = "unexpected_token"
+    label_message = message
   else:
     message = "reserved_word"
+    label_message = message
   arg_name_own({"id":reserved_word}, message)
   arg_name_nested({"id":reserved_word}, message)
   setter_arg({"id": reserved_word}, message)
@@ -225,6 +267,19 @@
   read_var({"id": reserved_word}, message)
   identifier_name({"id": reserved_word}, None);
   identifier_name_strict({"id": reserved_word}, None);
+  label_normal({"id": reserved_word}, label_message)
+  break_normal({"id": reserved_word}, label_message)
+  continue_normal({"id": reserved_word}, label_message)
+  if (reserved_word == "const"):
+    # The error message for this case is different because
+    # ParseLabelledStatementOrExpression will try to parse this as an expression
+    # first, effectively disallowing the use in ParseVariableDeclarations, i.e.
+    # the preparser never sees that 'const' was intended to be a label.
+    label_strict({"id": reserved_word}, "strict_const")
+  else:
+    label_strict({"id": reserved_word}, message)
+  break_strict({"id": reserved_word}, message)
+  continue_strict({"id": reserved_word}, message)
 
 
 # Future reserved words in strict mode behave like normal identifiers
diff --git a/test/sputnik/sputnik.status b/test/sputnik/sputnik.status
index 868509d..a4c7d57 100644
--- a/test/sputnik/sputnik.status
+++ b/test/sputnik/sputnik.status
@@ -30,10 +30,6 @@
 
 ############################### BUGS ###################################
 
-# A bound function should fail on access to 'caller' and 'arguments'.
-S15.3.4.5_A1: FAIL
-S15.3.4.5_A2: FAIL
-
 # '__proto__' should be treated as a normal property in JSON.
 S15.12.2_A1: FAIL
 
@@ -46,11 +42,8 @@
 S15.8.2.18_A7: PASS || FAIL_OK
 S15.8.2.13_A23: PASS || FAIL_OK
 
-# We allow calls to regexp exec() with no arguments to fail for
-# compatibility reasons.
-S15.10.6.2_A1_T16: FAIL_OK
-S15.10.6.2_A12: FAIL_OK
-S15.10.6.3_A1_T16: FAIL_OK
+# Sputnik tests (r97) assume RegExp.prototype is an Object, not a RegExp.
+S15.10.6_A2: FAIL_OK
 
 # We are silent in some regexp cases where the spec wants us to give
 # errors, for compatibility.
@@ -132,6 +125,13 @@
 S15.5.4.11_D1.1_T1: PASS || FAIL_OK
 S15.5.4.11_D1.1_T3: PASS || FAIL_OK
 S12.6.4_D1: PASS || FAIL_OK
+S15.5.4.14_A1_T6: FAIL_OK
+S15.5.4.14_A1_T7: FAIL_OK
+S15.5.4.14_A1_T8: FAIL_OK
+S15.5.4.14_A1_T9: FAIL_OK
+S15.5.4.14_A2_T7: FAIL_OK
+S15.10.2.12_A1_T1: FAIL_OK
+S15.10.2.12_A2_T1: FAIL_OK
 
 # We allow function declarations within statements
 S12.6.2_A13_T1: FAIL_OK
@@ -159,6 +159,10 @@
 S9.9_A1: FAIL_OK
 S9.9_A2: FAIL_OK
 
+# The expected evaluation order of comparison operations changed.
+S11.8.2_A2.3_T1: FAIL_OK
+S11.8.3_A2.3_T1: FAIL_OK
+
 # Calls builtins without an explicit receiver which means that
 # undefined is passed to the builtin. The tests expect the global
 # object to be passed which was true in ES3 but not in ES5.
@@ -176,6 +180,22 @@
 S15.5.4.14_A1_T3: FAIL_OK
 S15.5.4.15_A1_T3: FAIL_OK
 
+# NaN, Infinity and undefined are read-only according to ES5.
+S15.1.1.1_A2_T1: FAIL_OK  # NaN
+S15.1.1.1_A2_T2: FAIL_OK  # NaN
+S15.1.1.2_A2_T1: FAIL_OK  # Infinity
+# S15.1.1.2_A2_T2 would fail if it weren't bogus in r97. sputnik bug #45.
+S15.1.1.3_A2_T1: FAIL_OK  # undefined
+S15.1.1.3_A2_T2: FAIL_OK  # undefined
+
+# Function.prototype.apply can handle arbitrary object as argument list.
+S15.3.4.3_A6_T1: FAIL_OK
+S15.3.4.3_A6_T4: FAIL_OK
+
+# Array.prototype.to[Locale]String is generic in ES5.
+S15.4.4.2_A2_T1: FAIL_OK
+S15.4.4.3_A2_T1: FAIL_OK
+
 ##################### SKIPPED TESTS #####################
 
 # These tests take a looong time to run in debug mode.
@@ -194,53 +214,6 @@
 # Invalid test case (recent change adding var changes semantics)
 S15.3_A3_T3: FAIL
 
-# These tests fail because we had to add bugs to be compatible with JSC.  See
-# http://code.google.com/p/chromium/issues/detail?id=1717
-S15.5.4.1_A1_T2: FAIL_OK
-S15.5.4_A1: FAIL_OK
-S15.5.4_A3: FAIL_OK
-S15.9.5.10_A1_T2: FAIL_OK
-S15.9.5.11_A1_T2: FAIL_OK
-S15.9.5.12_A1_T2: FAIL_OK
-S15.9.5.13_A1_T2: FAIL_OK
-S15.9.5.14_A1_T2: FAIL_OK
-S15.9.5.15_A1_T2: FAIL_OK
-S15.9.5.16_A1_T2: FAIL_OK
-S15.9.5.17_A1_T2: FAIL_OK
-S15.9.5.18_A1_T2: FAIL_OK
-S15.9.5.19_A1_T2: FAIL_OK
-S15.9.5.20_A1_T2: FAIL_OK
-S15.9.5.21_A1_T2: FAIL_OK
-S15.9.5.22_A1_T2: FAIL_OK
-S15.9.5.23_A1_T2: FAIL_OK
-S15.9.5.24_A1_T2: FAIL_OK
-S15.9.5.25_A1_T2: FAIL_OK
-S15.9.5.26_A1_T2: FAIL_OK
-S15.9.5.27_A1_T2: FAIL_OK
-S15.9.5.28_A1_T2: FAIL_OK
-S15.9.5.29_A1_T2: FAIL_OK
-S15.9.5.2_A1_T2: FAIL_OK
-S15.9.5.30_A1_T2: FAIL_OK
-S15.9.5.31_A1_T2: FAIL_OK
-S15.9.5.32_A1_T2: FAIL_OK
-S15.9.5.33_A1_T2: FAIL_OK
-S15.9.5.34_A1_T2: FAIL_OK
-S15.9.5.35_A1_T2: FAIL_OK
-S15.9.5.36_A1_T2: FAIL_OK
-S15.9.5.37_A1_T2: FAIL_OK
-S15.9.5.38_A1_T2: FAIL_OK
-S15.9.5.39_A1_T2: FAIL_OK
-S15.9.5.3_A1_T2: FAIL_OK
-S15.9.5.40_A1_T2: FAIL_OK
-S15.9.5.41_A1_T2: FAIL_OK
-S15.9.5.42_A1_T2: FAIL_OK
-S15.9.5.4_A1_T2: FAIL_OK
-S15.9.5.5_A1_T2: FAIL_OK
-S15.9.5.6_A1_T2: FAIL_OK
-S15.9.5.7_A1_T2: FAIL_OK
-S15.9.5.8_A1_T2: FAIL_OK
-S15.9.5.9_A1_T2: FAIL_OK
-
 [ $arch == arm ]
 
 # BUG(3251225): Tests that timeout with --nocrankshaft.
@@ -257,5 +230,14 @@
 
 [ $arch == mips ]
 
-# Skip all tests on MIPS.
-*: SKIP
+# BUG(3251225): Tests that timeout with --nocrankshaft.
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.1_A2.4_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.4_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.3_A2.3_T1: SKIP
+S15.1.3.4_A2.3_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
diff --git a/test/test262/README b/test/test262/README
index ea6b4a7..dae1843 100644
--- a/test/test262/README
+++ b/test/test262/README
@@ -4,11 +4,11 @@
 
   http://hg.ecmascript.org/tests/test262
 
-at revision 128 as 'data' in this directory.  Using later version
+at revision 309 as 'data' in this directory.  Using later version
 may be possible but the tests are only known to pass (and indeed run)
 with that revision.
 
-hg clone -r 128 http://hg.ecmascript.org/tests/test262 data
+hg clone -r 309 http://hg.ecmascript.org/tests/test262 data
 
 If you do update to a newer revision you may have to change the test
 harness adapter code since it uses internal functionality from the
diff --git a/test/test262/test262.status b/test/test262/test262.status
index 8cee210..67607ff 100644
--- a/test/test262/test262.status
+++ b/test/test262/test262.status
@@ -25,1330 +25,94 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+prefix test262
+def FAIL_OK = FAIL, OKAY
 
-#
-# ietestcenter tests.
-#
+############################### BUGS ###################################
 
-prefix ietestcenter
+# '__proto__' should be treated as a normal property in JSON.
+S15.12.2_A1: FAIL
 
-
-#
-# Deliberate differences for compatibility with other browsers
-#
-# 15.9.5.43-0-9 and 15.9.5.43-0-10. V8 doesn't throw RangeError
-# from Date.prototype.toISOString when string is not a finite number.
-# This is compatible with Firefox and Safari.
-15.9.5.43-0-9: PASS || FAIL
-15.9.5.43-0-10: PASS || FAIL
-
-#
-# Unanalyzed failures which may be bugs or deliberate differences
-#
-
-# Bug? Strict Mode - TypeError is thrown when changing the value of a Value
-#      Property of the Global Object under strict mode (NaN)
-10.2.1.1.3-4-16-s: FAIL
-# Bug? Strict Mode - TypeError is thrown when changing the value of a Value
-#      Property of the Global Object under strict mode (undefined)
-10.2.1.1.3-4-18-s: FAIL
-# Invalid test: https://bugs.ecmascript.org/show_bug.cgi?id=76
-10.4.2-2-c-1: FAIL
-# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
-#      when using Greater-than operator: valueOf > valueOf
-11.8.2-1: FAIL
-# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
-#      when using Greater-than operator: valueOf > toString
-11.8.2-2: FAIL
-# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
-#      when using Greater-than operator: toString > valueOf
-11.8.2-3: FAIL
-# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
-#       when using Greater-than operator: toString > toString
-11.8.2-4: FAIL
-# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
-#      enforced when using Less-than-or-equal operator: valueOf <= valueOf
-11.8.3-1: FAIL
-# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
-#      enforced when using Less-than-or-equal operator: valueOf <= toString
-11.8.3-2: FAIL
-# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
-#      enforced when using Less-than-or-equal operator: toString <= valueOf
-11.8.3-3: FAIL
-# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
-#      enforced when using Less-than-or-equal operator: toString <= toString
-11.8.3-4: FAIL
-# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
-#      enforced when using Less-than-or-equal operator: valueOf <= valueOf
-11.8.3-5: FAIL
-# Bug? simple assignment throws TypeError if LeftHandSide is a readonly property
-#      in strict mode (Global.undefined)
-11.13.1-4-27-s: FAIL
-# Bug? simple assignment throws TypeError if LeftHandSide is a readonly property
-#      in strict mode (Global.Infinity)
-11.13.1-4-3-s: FAIL
-# BUG: Global.NaN is a data property with default attribute values
-15.1.1.1-0: FAIL
-# BUG: Global.Infinity is a data property with default attribute values
-15.1.1.2-0: FAIL
-# BUG: Global.undefined is a data property with default attribute values
-15.1.1.3-0: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (Global.NaN)
-15.2.3.3-4-178: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (Global.Infinity)
-15.2.3.3-4-179: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (Global.undefined)
-15.2.3.3-4-180: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (RegExp.prototype.source)
-# There is no RegExp.prototype.source
-15.2.3.3-4-212: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (RegExp.prototype.global)
-# There is no RegExp.prototype.global
-15.2.3.3-4-213: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (RegExp.prototype.ignoreCase)
-# There is no RegExp.prototype.ignoreCase
-15.2.3.3-4-214: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (RegExp.prototype.multiline)
-15.2.3.3-4-215: FAIL
-# Bug? Object.defineProperty - Update [[Enumerable]] attribute of 'name'
-#      property to true successfully when [[Enumerable]] attribute of 'name'
-#      is false and [[Configurable]] attribute of 'name' is true,  the 'desc'
-#      is a generic descriptor which only contains [[Enumerable]] attribute
-#      as true, 'name' property is an index data property (8.12.9 step 8)
-15.2.3.6-4-82-18: FAIL
-# Bug? Object.defineProperty - Update [[Enumerable]] attribute of 'name'
-#      property to false successfully when [[Enumerable]] and [[Configurable]]
-#      attributes of 'name' property are true,  the 'desc' is a generic
-#      descriptor which only contains [Enumerable]] attribute as false and
-#      'name' property is an index accessor property (8.12.9 step 8)
-15.2.3.6-4-82-19: FAIL
-# Bug? Object.defineProperty - Update [[Enumerable]] attribute of 'name'
-#      property to false successfully when [[Enumerable]] and [[Configurable]]
-#      attributes of 'name' property are true,  the 'desc' is a generic
-#      descriptor which contains [Enumerable]] attribute as false and
-#      [[Configurable]] property is true, 'name' property is an index accessor
-#      property (8.12.9 step 8)
-15.2.3.6-4-82-20: FAIL
-# Bug? Object.defineProperty - Update [[Configurable]] attribute of 'name'
-#      property to false successfully when [[Enumerable]] and [[Configurable]]
-#      attributes of 'name' property are true, the 'desc' is a generic
-#      descriptor which only contains [[Configurable]] attribute as false,
-#      'name' property is an index accessor property (8.12.9 step 8)
-15.2.3.6-4-82-21: FAIL
-# Bug? Object.defineProperty - Update [[Configurable]] attribute of 'name'
-#      property to false successfully when [[Enumerable]] and [[Configurable]]
-#      attributes of 'name' property are true, the 'desc' is a generic
-#      descriptor which contains [[Enumerable]] attribute as true and
-#      [[Configurable]] attribute is false, 'name' property is an index accessor
-#      property (8.12.9 step 8)
-15.2.3.6-4-82-22: FAIL
-# Bug? Object.defineProperty - Update [[Enumerable]] and [[Configurable]]
-#      attributes of 'name' property to false successfully when [[Enumerable]]
-#      and [[Configurable]] attributes of 'name' property are true, the 'desc'
-#      is a generic descriptor which contains [[Enumerable]] and
-#      [[Configurable]] attributes as false, 'name' property is an index
-#      accessor property (8.12.9 step 8)
-15.2.3.6-4-82-23: FAIL
-# Bug? Object.defineProperty - Update [[Enumerable]] attributes of 'name'
-#      property to true successfully when [[Enumerable]] attribute of 'name' is
-#      false and [[Configurable]] attribute of 'name' is true, the 'desc' is a
-#      generic descriptor which only contains [[Enumerable]] attribute as true,
-#      'name' property is an index accessor property (8.12.9 step 8)
-15.2.3.6-4-82-24: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, test the length property of 'O'
-#      is own data property (15.4.5.1 step 1)
-15.2.3.6-4-116: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, test the length property of 'O'
-#      is own data property that overrides an inherited data property (15.4.5.1
-#      step 1)
-15.2.3.6-4-117: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test that RangeError exception is thrown when [[Value]] field of
-#      'desc' is undefined (15.4.5.1 step 3.c)
-15.2.3.6-4-125: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is null (15.4.5.1 step 3.c)
-15.2.3.6-4-126: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a boolean with value false
-#      (15.4.5.1 step 3.c)
-15.2.3.6-4-127: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a boolean with value true
-#      (15.4.5.1 step 3.c)
-15.2.3.6-4-128: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is not thrown when the [[Value]] field of
-#      'desc' is 0 (15.4.5.1 step 3.c)
-15.2.3.6-4-129: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is not thrown when the [[Value]] field of
-#      'desc' is +0 (15.4.5.1 step 3.c)
-15.2.3.6-4-130: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is not thrown when the [[Value]] field of
-#      'desc' is -0 (15.4.5.1 step 3.c)
-15.2.3.6-4-131: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is not thrown when the [[Value]] field of
-#      'desc' is a positive number (15.4.5.1 step 3.c)
-15.2.3.6-4-132: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a negative number (15.4.5.1 step 3.c)
-15.2.3.6-4-133: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is +Infinity (15.4.5.1 step 3.c)
-15.2.3.6-4-134: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is -Infinity (15.4.5.1 step 3.c)
-15.2.3.6-4-135: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is NaN (15.4.5.1 step 3.c)
-15.2.3.6-4-136: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is not thrown when the [[Value]] field of
-#      'desc' is a string containing a positive number (15.4.5.1 step 3.c)
-15.2.3.6-4-137: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a string containing a negative number (15.4.5.1 step 3.c)
-15.2.3.6-4-138: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a string containing a decimal number (15.4.5.1 step 3.c)
-15.2.3.6-4-139: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a string containing +Infinity (15.4.5.1 step 3.c)
-15.2.3.6-4-140: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a string containing -Infinity (15.4.5.1 step 3.c)
-15.2.3.6-4-141: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a string containing an
-#      exponential number (15.4.5.1 step 3.c)
-15.2.3.6-4-142: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a string containing a hex
-#      number (15.4.5.1 step 3.c)
-15.2.3.6-4-143: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a string containing a number
-#      with leading zeros (15.4.5.1 step 3.c)
-15.2.3.6-4-144: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a string which doesn't convert to a number (15.4.5.1 step 3.c)
-15.2.3.6-4-145: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is an object which has an own
-#      toString method (15.4.5.1 step 3.c)
-15.2.3.6-4-146: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      valueOf method (15.4.5.1 step 3.c)
-15.2.3.6-4-147: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      valueOf method that returns an object and toString method that returns a
-#      string (15.4.5.1 step 3.c)
-15.2.3.6-4-148: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      toString and valueOf method (15.4.5.1 step 3.c)
-15.2.3.6-4-149: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test TypeError is thrown when the [[Value]] field of 'desc' is an
-#      Object that both toString and valueOf wouldn't return primitive value
-#      (15.4.5.1 step 3.c)
-15.2.3.6-4-150: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', and the [[Value]] field of 'desc' is an Object with an own toString
-#      method and an inherited valueOf method (15.4.5.1 step 3.c), test that the
-#      inherited valueOf method is used
-15.2.3.6-4-151: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError is thrown when the [[Value]] field of 'desc' is a
-#      positive non-integer values (15.4.5.1 step 3.c)
-15.2.3.6-4-152: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length prosperty
-#      of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is a
-#      negative non-integer values (15.4.5.1 step 3.c)
-15.2.3.6-4-153: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 2
-#      (15.4.5.1 step 3.c)
-15.2.3.6-4-154: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 1
-#      (15.4.5.1 step 3.c)
-15.2.3.6-4-155: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      boundary value 2^32 (15.4.5.1 step 3.c)
-15.2.3.6-4-156: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      boundary value 2^32 + 1 (15.4.5.1 step 3.c)
-15.2.3.6-4-157: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', set the [[Value]] field of 'desc' to a value greater than the
-#      existing value of length (15.4.5.1 step 3.f)
-15.2.3.6-4-159: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', set the [[Value]] field of 'desc' to a value lesser than the
-#      existing value of length and test that indexes beyond the new length are
-#      deleted(15.4.5.1 step 3.f)
-15.2.3.6-4-161: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to true after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is absent (15.4.5.1 step 3.h)
-15.2.3.6-4-165: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to true after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is true (15.4.5.1 step 3.h)
-15.2.3.6-4-166: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to false after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is false (15.4.5.1 step 3.i.ii)
-15.2.3.6-4-167: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', whose writable attribute is being changed to false and the [[Value]]
-#      field of 'desc' is less than value of the length property and also lesser
-#      than an index of the array which is set to configurable:false, test that
-#      new length is set to a value greater than the non-deletable index by 1,
-#      writable attribute of length is set to false and TypeError exception is
-#      thrown (15.4.5.1 step 3.i.iii)
-15.2.3.6-4-168: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property and also lesser than an index of the array which is set to
-#      configurable: false, test that new length is set to a value greater than
-#      the non-deletable index by 1, and TypeError is thrown (15.4.5.1 step
-#      3.l.i)
-15.2.3.6-4-169: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property and also lesser than an index of the array which is set to
-#      configurable: false, test that new length is set to a value greater than
-#      the non-deletable index by 1, writable attribute of length is set to
-#      false and TypeError exception is thrown (15.4.5.1 step 3.l.ii)
-15.2.3.6-4-170: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of an inherited data
-#      property with large index named in 'O' can't stop deleting index named
-#      properties (15.4.5.1 step 3.l.ii)
-15.2.3.6-4-171: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own data property with
-#      large index named in 'O' that overrides an inherited data property can
-#      stop deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.6-4-172: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own data property with
-#      large index named in 'O' that overrides an inherited accessor property
-#      can stop deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.6-4-173: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' can stop deleting index named properties
-#      (15.4.5.1 step 3.l.ii)
-15.2.3.6-4-174: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of an inherited accessor
-#      property with large index named in 'O' can't stop deleting index named
-#      properties (15.4.5.1 step 3.l.ii)
-15.2.3.6-4-175: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' that overrides an inherited data property
-#      can stop deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.6-4-176: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' that overrides an inherited accessor
-#      property can stop deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.6-4-177: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the configurable large index named property of 'O' is
-#      deleted (15.4.5.1 step 3.l.ii)
-15.2.3.6-4-178: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is greater than value of the length
-#      property, test value of the length property is same as [[Value]]
-#      (15.4.5.1 step 3.l.iii.1)
-15.2.3.6-4-179-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to false at last when the [[Writable]] field of 'desc' is false and 'O'
-#      doesn't contain non-configurable large index named property (15.4.5.1
-#      step 3.m)
-15.2.3.6-4-181: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, 'name' is boundary value 2^32 - 2 (15.4.5.1 step 4.a)
-15.2.3.6-4-183: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, test TypeError is thrown if the [[Writable]] attribute of the
-#      length property in 'O' is false and value of 'name' equals to value of
-#      the length property (15.4.5.1 step 4.b)
-15.2.3.6-4-188: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, test TypeError is thrown if the [[Writable]] attribute of the
-#      length property in 'O' is false and value of 'name' is greater than value
-#      of the length property (15.4.5.1 step 4.b)
-15.2.3.6-4-189: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, 'desc' is accessor descriptor, test updating all attribute
-#      values of 'name' (15.4.5.1 step 4.c)
-15.2.3.6-4-209: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, name is accessor property and 'desc' is accessor descriptor,
-#      test updating the [[Enumerable]] attribute value of 'name' (15.4.5.1 step
-#      4.c)
-15.2.3.6-4-271: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, name is accessor property and 'desc' is accessor descriptor,
-#      test updating the [[Configurable]] attribute value of 'name' (15.4.5.1
-#      step 4.c)
-15.2.3.6-4-272: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, name is accessor property and 'desc' is accessor descriptor,
-#      test updating multiple attribute values of 'name' (15.4.5.1 step 4.c)
-15.2.3.6-4-273: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, test the length property of 'O' is set as ToUint32('name') + 1
-#      if ToUint32('name') equals to value of the length property in 'O'
-#      (15.4.5.1 step 4.e.ii)
-15.2.3.6-4-275: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, test the length property of 'O' is set as ToUint32('name') + 1
-#      if ToUint32('name') is greater than value of the length property in 'O'
-#      (15.4.5.1 step 4.e.ii)
-15.2.3.6-4-276: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own accessor property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', and 'desc' is accessor descriptor,
-#      test updating multiple attribute values of 'name' (10.6
-#      [[DefineOwnProperty]] step 3 and 5.a.i)
-15.2.3.6-4-291-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object, 'name' is own
-#      accessor property of 'O', and 'desc' is accessor descriptor, test
-#      updating multiple attribute values of 'name' (10.6 [[DefineOwnProperty]]
-#      step 3)
-15.2.3.6-4-291: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own property of 'O' which is also defined in
-#      [[ParameterMap]] of 'O', and 'desc' is data descriptor, test updating
-#      multiple attribute values of 'name' (10.6 [[DefineOwnProperty]] step 3
-#      and 5.b)
-15.2.3.6-4-292-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own data property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
-#      updating the [[Value]] attribute value of 'name' which is defined as
-#      unwritable and non-configurable (10.6 [[DefineOwnProperty]] step 4 and
-#      step 5b)
-15.2.3.6-4-293-2: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own data property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is not thrown when
-#      updating the [[Value]] attribute value of 'name' which is defined as
-#      non-writable and configurable (10.6 [[DefineOwnProperty]] step 3 and step
-#      5.b)
-15.2.3.6-4-293-3: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own data property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
-#      updating the [[Writable]] attribute value of 'name' which is defined as
-#      non-configurable (10.6 [[DefineOwnProperty]] step 4 and 5b)
-15.2.3.6-4-294-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own data property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
-#      updating the [[Enumerable]] attribute value of 'name' which is defined as
-#      non-configurable (10.6 [[DefineOwnProperty]] step 4 and step 5b)
-15.2.3.6-4-295-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own data property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
-#      updating the [[Configurable]] attribute value of 'name' which is defined
-#      as non-configurable (10.6 [[DefineOwnProperty]] step 4 and step 5b)
-15.2.3.6-4-296-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object, 'name' is an index
-#      named accessor property of 'O' but not defined in [[ParameterMap]] of
-#      'O', and 'desc' is accessor descriptor, test updating multiple attribute
-#      values of 'name' (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.6-4-303: FAIL
-# Bug? ES5 Attributes - indexed property 'P' with attributes [[Writable]]: true,
-#      [[Enumerable]]: true, [[Configurable]]: false is writable using simple
-#      assignment, 'O' is an Arguments object
-15.2.3.6-4-333-11: FAIL
-# Bug? ES5 Attributes - Updating indexed data property 'P' whose attributes are
-#      [[Writable]]: false, [[Enumerable]]: true, [[Configurable]]: true to an
-#      accessor property, 'A' is an Array object (8.12.9 - step 9.b.i)
-15.2.3.6-4-360-1: FAIL
-# Bug? ES5 Attributes - Updating indexed data property 'P' whose attributes are
-#      [[Writable]]: false, [[Enumerable]]: true, [[Configurable]]: true to an
-#      accessor property, 'O' is an Arguments object (8.12.9 - step 9.b.i)
-15.2.3.6-4-360-6: FAIL
-# Bug? ES5 Attributes - Updating indexed data property 'P' whose attributes are
-#      [[Writable]]: false, [[Enumerable]]: true, [[Configurable]]: true to an
-#      accessor property, 'O' is the global object (8.12.9 - step 9.b.i)
-15.2.3.6-4-360-7: FAIL
-# Bug? ES5 Attributes - [[Value]] attribute of data property is the activex host
-#      object
-15.2.3.6-4-401: FAIL
-# Bug? ES5 Attributes - Failed to add a property to an object when the object's
-#      object has a property with same name and [[Writable]] attribute is set to
-#      false (Number instance)
+# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1475
 15.2.3.6-4-405: FAIL
-# Bug? ES5 Attributes - Failed to add a property to an object when the object's
-#      prototype has a property with the same name and [[Writable]] set to false
-#      (JSON)
 15.2.3.6-4-410: FAIL
-# Bug? ES5 Attributes - Failed to add properties to an object when the object's
-#      prototype has properties with the same name and [[Writable]] set to false
-#      (Object.create)
 15.2.3.6-4-415: FAIL
-# Bug? ES5 Attributes - Failed to add a property to an object when the object's
-#      prototype has a property with the same name and [[Writable]] set to
-#      false(Function.prototype.bind)
 15.2.3.6-4-420: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.indexOf are correct
-15.2.3.6-4-612: FAIL
-# Bug? ES5 Attributes - all attributes in Object.lastIndexOf are correct
-15.2.3.6-4-613: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.every are correct
-15.2.3.6-4-614: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.some are correct
-15.2.3.6-4-615: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.forEach are correct
-15.2.3.6-4-616: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.map are correct
-15.2.3.6-4-617: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.filter are correct
-15.2.3.6-4-618: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.reduce are correct
-15.2.3.6-4-619: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.reduceRight are
-#      correct
-15.2.3.6-4-620: FAIL
-# Bug? ES5 Attributes - all attributes in String.prototype.trim are correct
-15.2.3.6-4-621: FAIL
-# Bug? ES5 Attributes - all attributes in Date.prototype.toISOString are correct
-15.2.3.6-4-623: FAIL
-# Bug? ES5 Attributes - all attributes in Date.prototype.toJSON are correct
-15.2.3.6-4-624: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, test the length property of
-#      'O' is own data property (15.4.5.1 step 1)
-15.2.3.7-6-a-112: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, test the length property of
-#      'O' is own data property that overrides an inherited data property
-#      (15.4.5.1 step 1)
-15.2.3.7-6-a-113: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test RangeError is thrown when setting the [[Value]] field of 'desc'
-#      to undefined (15.4.5.1 step 3.c)
-15.2.3.7-6-a-121: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test setting the [[Value]] field of 'desc' to null actuall is set to
-#      0 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-122: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a boolean with value false
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-123: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a boolean with value true
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-124: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is 0 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-125: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is +0 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-126: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is -0 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-127: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is positive number (15.4.5.1
-#      step 3.c)
-15.2.3.7-6-a-128: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is negative number (15.4.5.1
-#      step 3.c)
-15.2.3.7-6-a-129: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is +Infinity (15.4.5.1 step
-#      3.c)
-15.2.3.7-6-a-130: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is -Infinity (15.4.5.1 step
-#      3.c)
-15.2.3.7-6-a-131: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is NaN (15.4.5.1 step 3.c)
-15.2.3.7-6-a-132: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing a
-#      positive number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-133: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing a
-#      negative number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-134: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing a
-#      decimal number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-135: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing
-#      +Infinity (15.4.5.1 step 3.c)
-15.2.3.7-6-a-136: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing
-#      -Infinity (15.4.5.1 step 3.c)
-15.2.3.7-6-a-137: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing an
-#      exponential number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-138: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing an hex
-#      number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-139: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing an
-#      leading zero number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-140: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a string which doesn't convert
-#      to a number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-141: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      toString method (15.4.5.1 step 3.c)
-15.2.3.7-6-a-142: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      valueOf method (15.4.5.1 step 3.c)
-15.2.3.7-6-a-143: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      valueOf method that returns an object and toString method that returns a
-#      string (15.4.5.1 step 3.c)
-15.2.3.7-6-a-144: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      toString and valueOf method (15.4.5.1 step 3.c)
-15.2.3.7-6-a-145: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test TypeError is thrown when the [[Value]] field of 'desc' is an
-#      Object that both toString and valueOf wouldn't return primitive value
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-146: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test using inherited valueOf method when the [[Value]] field of
-#      'desc' is an Objec with an own toString and inherited valueOf methods
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-147: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      positive non-integer values (15.4.5.1 step 3.c)
-15.2.3.7-6-a-148: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      negative non-integer values (15.4.5.1 step 3.c)
-15.2.3.7-6-a-149: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 2
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-150: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 1
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-151: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      boundary value 2^32 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-152: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      boundary value 2^32 + 1 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-153: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test the [[Value]] field of 'desc' which is greater than value of
-#      the length property is defined into 'O' without deleting any property
-#      with large index named (15.4.5.1 step 3.f)
-15.2.3.7-6-a-155: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test the [[Value]] field of 'desc' which is less than value of the
-#      length property is defined into 'O' with deleting properties with large
-#      index named (15.4.5.1 step 3.f)
-15.2.3.7-6-a-157: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to true at last after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is absent (15.4.5.1 step 3.h)
-15.2.3.7-6-a-161: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to true at last after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is true (15.4.5.1 step 3.h)
-15.2.3.7-6-a-162: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to false at last after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is false (15.4.5.1 step 3.i.ii)
-15.2.3.7-6-a-163: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property in 'O'
-#      is set as true before deleting properties with large index named
-#      (15.4.5.1 step 3.i.iii)
-15.2.3.7-6-a-164: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the length property is decreased by 1 (15.4.5.1 step
-#      3.l.i)
-15.2.3.7-6-a-165: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own data property with
-#      large index named in 'O' can stop deleting index named properties
-#      (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-166: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of inherited data property
-#      with large index named in 'O' can't stop deleting index named properties
-#      (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-167: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own data property with
-#      large index named in 'O' that overrides inherited data property can stop
-#      deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-168: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own data property with
-#      large index named in 'O' that overrides inherited accessor property can
-#      stop deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-169: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' can stop deleting index named properties
-#      (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-170: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of inherited accessor
-#      property with large index named in 'O' can't stop deleting index named
-#      properties (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-171: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' that overrides inherited data property can
-#      stop deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-172: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' that overrides inherited accessor property
-#      can stop deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-173: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the configurable large index named property of 'O' can be
-#      deleted (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-174: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test value of the length property is set to the last
-#      non-configurable index named property of 'O' plus 1 (15.4.5.1 step
-#      3.l.iii.1)
-15.2.3.7-6-a-175: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to false at last when the [[Writable]] field of 'desc' is false and 'O'
-#      contains non-configurable large index named property (15.4.5.1 step
-#      3.l.iii.2)
-15.2.3.7-6-a-176: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to false at last when the [[Writable]] field of 'desc' is false and 'O'
-#      doesn't contain non-configurable large index named property (15.4.5.1
-#      step 3.m)
-15.2.3.7-6-a-177: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property, 'P' is boundary value 2^32 - 2 (15.4.5.1 step 4.a)
-15.2.3.7-6-a-179: FAIL
-# Bug? Object.defineProperties - TypeError is thrown if 'O' is an Array, 'P' is
-#      an array index named property,[[Writable]] attribute of the length
-#      property in 'O' is false, value of 'P' is equal to value of the length
-#      property in 'O' (15.4.5.1 step 4.b)
-15.2.3.7-6-a-184: FAIL
-# Bug? Object.defineProperties - TypeError is thrown if 'O' is an Array, 'P' is
-#      an array index named property,[[Writable]] attribute of the length
-#      property in 'O' is false, value of 'P' is bigger than value of the length
-#      property in 'O' (15.4.5.1 step 4.b)
-15.2.3.7-6-a-185: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property, 'desc' is accessor descriptor, test updating all attribute
-#      values of 'P' (15.4.5.1 step 4.c)
-15.2.3.7-6-a-205: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property that already exists on 'O' is accessor property and 'desc' is
-#      accessor descriptor, test updating the [[Enumerable]] attribute value of
-#      'P' (15.4.5.1 step 4.c)
-15.2.3.7-6-a-260: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property that already exists on 'O' is accessor property and 'desc' is
-#      accessor descriptor, test updating the [[Configurable]] attribute value
-#      of 'P' (15.4.5.1 step 4.c)
-15.2.3.7-6-a-261: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property that already exists on 'O' is accessor property and 'desc' is
-#      accessor descriptor, test updating multiple attribute values of 'P'
-#      (15.4.5.1 step 4.c)
-15.2.3.7-6-a-262: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property, test the length property of 'O' is set as ToUint32('P') + 1 if
-#      ToUint32('P') equals to value of the length property in 'O' (15.4.5.1
-#      step 4.e.ii)
-15.2.3.7-6-a-264: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property, test the length property of 'O' is set as ToUint32('P') + 1 if
-#      ToUint32('P') is greater than value of the length property in 'O'
-#      (15.4.5.1 step 4.e.ii)
-15.2.3.7-6-a-265: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own accessor
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', and
-#      'desc' is accessor descriptor, test updating multiple attribute values of
-#      'P' (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.7-6-a-280: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', and
-#      'desc' is data descriptor, test updating multiple attribute values of 'P'
-#      (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.7-6-a-281: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
-#      TypeError is thrown when updating the [[Value]] attribute value of 'P'
-#      whose writable and configurable attributes are false (10.6
-#      [[DefineOwnProperty]] step 4)
-15.2.3.7-6-a-282: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
-#      TypeError is thrown when updating the [[Writable]] attribute value of 'P'
-#      which is defined as non-configurable (10.6 [[DefineOwnProperty]] step 4)
-15.2.3.7-6-a-283: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
-#      TypeError is thrown when updating the [[Enumerable]] attribute value of
-#      'P' which is defined as non-configurable (10.6 [[DefineOwnProperty]] step
-#      4)
-15.2.3.7-6-a-284: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
-#      TypeError is thrown when updating the [[Configurable]] attribute value of
-#      'P' which is defined as non-configurable (10.6 [[DefineOwnProperty]] step
-#      4)
-15.2.3.7-6-a-285: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is an array
-#      index named accessor property of 'O' but not defined in [[ParameterMap]]
-#      of 'O', and 'desc' is accessor descriptor, test updating multiple
-#      attribute values of 'P' (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.7-6-a-292: FAIL
-# Bug? Strict Mode - 'this' value is a string which cannot be converted to
-#      wrapper objects when the function is called with an array of arguments
-15.3.4.3-1-s: FAIL
-# Bug? Strict Mode - 'this' value is a number which cannot be converted to
-#      wrapper objects when the function is called with an array of arguments
-15.3.4.3-2-s: FAIL
-# Bug? Strict Mode - 'this' value is a boolean which cannot be converted to
-#      wrapper objects when the function is called with an array of arguments
-15.3.4.3-3-s: FAIL
-# Bug? Function.prototype.bind - [[Get]] attribute of 'caller' property in 'F'
-#      is thrower
-15.3.4.5-20-2: FAIL
-# Bug? Function.prototype.bind - [[Set]] attribute of 'caller' property in 'F'
-#      is thrower
-15.3.4.5-20-3: FAIL
-# Bug? Function.prototype.bind - [[Get]] attribute of 'arguments' property in
-#      'F' is thrower
-15.3.4.5-21-2: FAIL
-# Bug? Function.prototype.bind - [[Set]] attribute of 'arguments' property in
-#      'F' is thrower
-15.3.4.5-21-3: FAIL
-# Bug? Array.prototype.indexOf - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.14-9-a-19: FAIL
-# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
-#      property that overrides an inherited data property on an Array
-15.4.4.14-9-b-i-11: FAIL
-# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
-#      property that overrides an inherited accessor property on an Array
-15.4.4.14-9-b-i-13: FAIL
-# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
-#      property without a get function on an Array
-15.4.4.14-9-b-i-17: FAIL
-# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
-#      property without a get function that overrides an inherited accessor
-#      property on an Array
-15.4.4.14-9-b-i-19: FAIL
-# Bug? Array.prototype.indexOf - side-effects are visible in subsequent
-#      iterations on an Array
-15.4.4.14-9-b-i-28: FAIL
-# Bug? Array.prototype.indexOf - terminates iteration on unhandled exception on
-#      an Array
-15.4.4.14-9-b-i-30: FAIL
-# Bug? Array.prototype.lastIndexOf - deleting property of prototype causes
-#      prototype index property not to be visited on an Array
-15.4.4.15-8-a-14: FAIL
-# Bug? Array.prototype.lastIndexOf - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.15-8-a-19: FAIL
-# Bug? Array.prototype.lastIndexOf - element to be retrieved is own accessor
-#      property that overrides an inherited data property on an Array
-15.4.4.15-8-b-i-11: FAIL
-# Bug? Array.prototype.lastIndexOf - element to be retrieved is own accessor
-#      property that overrides an inherited accessor property on an Array
-15.4.4.15-8-b-i-13: FAIL
-# Bug? Array.prototype.lastIndexOf - element to be retrieved is own accessor
-#      property without a get function on an Array
-15.4.4.15-8-b-i-17: FAIL
-# Bug? Array.prototype.lastIndexOf - side-effects are visible in subsequent
-#      iterations on an Array
-15.4.4.15-8-b-i-28: FAIL
-# Bug? Array.prototype.lastIndexOf terminates iteration on unhandled exception
-#      on an Array
-15.4.4.15-8-b-i-30: FAIL
-# Bug? Array.prototype.every applied to boolean primitive
-15.4.4.16-1-3: FAIL
-# Bug? Array.prototype.every applied to number primitive
-15.4.4.16-1-5: FAIL
-# Bug? Array.prototype.every applied to string primitive
-15.4.4.16-1-7: FAIL
-# Bug? Array.prototype.every - side effects produced by step 2 are visible when
-#      an exception occurs
-15.4.4.16-4-8: FAIL
-# Bug? Array.prototype.every - side effects produced by step 3 are visible when
-#      an exception occurs
-15.4.4.16-4-9: FAIL
-# Bug? Array.prototype.every - the exception is not thrown if exception was
-#      thrown by step 2
-15.4.4.16-4-10: FAIL
-# Bug? Array.prototype.every - the exception is not thrown if exception was
-#      thrown by step 3
-15.4.4.16-4-11: FAIL
-# Bug? Array.prototype.every - calling with no callbackfn is the same as passing
-#      undefined for callbackfn
-15.4.4.16-4-15: FAIL
-# Bug? Array.prototype.every - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.16-7-b-16: FAIL
-# Bug? Array.prototype.every - element to be retrieved is own accessor property
-#      on an Array
-15.4.4.16-7-c-i-10: FAIL
-# Bug? Array.prototype.every - element to be retrieved is own accessor property
-#      that overrides an inherited data property on an Array
-15.4.4.16-7-c-i-12: FAIL
-# Bug? Array.prototype.every - element to be retrieved is own accessor property
-#      that overrides an inherited accessor property on an Array
-15.4.4.16-7-c-i-14: FAIL
-# Bug? Array.prototype.every - element to be retrieved is own accessor property
-#      without a get function on an Array
-15.4.4.16-7-c-i-18: FAIL
-# Bug? Array.prototype.every - element to be retrieved is own accessor property
-#      without a get function that overrides an inherited accessor property on
-#      an Array
-15.4.4.16-7-c-i-20: FAIL
-# Bug? Array.prototype.every - element changed by getter on previous iterations
-#      is observed on an Array
-15.4.4.16-7-c-i-28: FAIL
-# Bug? Array.prototype.some applied to boolean primitive
-15.4.4.17-1-3: FAIL
-# Bug? Array.prototype.some applied to number primitive
-15.4.4.17-1-5: FAIL
-# Bug? Array.prototype.some applied to applied to string primitive
-15.4.4.17-1-7: FAIL
-# Bug? Array.prototype.some - side effects produced by step 2 are visible when
-#      an exception occurs
-15.4.4.17-4-8: FAIL
-# Bug? Array.prototype.some - side effects produced by step 3 are visible when
-#      an exception occurs
-15.4.4.17-4-9: FAIL
-# Bug? Array.prototype.some - the exception is not thrown if exception was
-#      thrown by step 2
-15.4.4.17-4-10: FAIL
-# Bug? Array.prototype.some - the exception is not thrown if exception was
-#      thrown by step 3
-15.4.4.17-4-11: FAIL
-# Bug? Array.prototype.some - calling with no callbackfn is the same as passing
-#      undefined for callbackfn
-15.4.4.17-4-15: FAIL
-# Bug? Array.prototype.some - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.17-7-b-16: FAIL
-# Bug? Array.prototype.some - element to be retrieved is own accessor property
-#      on an Array
-15.4.4.17-7-c-i-10: FAIL
-# Bug? Array.prototype.some - element to be retrieved is own accessor property
-#      that overrides an inherited data property on an Array
-15.4.4.17-7-c-i-12: FAIL
-# Bug? Array.prototype.some - element to be retrieved is own accessor property
-#      that overrides an inherited accessor property on an Array
-15.4.4.17-7-c-i-14: FAIL
-# Bug? Array.prototype.some - element to be retrieved is own accessor property
-#      without a get function on an Array
-15.4.4.17-7-c-i-18: FAIL
-# Bug? Array.prototype.some - element to be retrieved is own accessor property
-#      without a get function that overrides an inherited accessor property on
-#      an Array
-15.4.4.17-7-c-i-20: FAIL
-# Bug? Array.prototype.some - element changed by getter on previous iterations
-#      is observed on an Array
-15.4.4.17-7-c-i-28: FAIL
-# Bug? Array.prototype.forEach applied to boolean primitive
-15.4.4.18-1-3: FAIL
-# Bug? Array.prototype.forEach applied to number primitive
-15.4.4.18-1-5: FAIL
-# Bug? Array.prototype.forEach applied to string primitive
-15.4.4.18-1-7: FAIL
-# Bug? Array.prototype.forEach - side effects produced by step 2 are visible
-#      when an exception occurs
-15.4.4.18-4-8: FAIL
-# Bug? Array.prototype.forEach - side effects produced by step 3 are visible
-#      when an exception occurs
-15.4.4.18-4-9: FAIL
-# Bug? Array.prototype.forEach - the exception is not thrown if exception was
-#      thrown by step 2
-15.4.4.18-4-10: FAIL
-# Bug? Array.prototype.forEach - the exception is not thrown if exception was
-#      thrown by step 3
-15.4.4.18-4-11: FAIL
-# Bug? Array.prototype.forEach - calling with no callbackfn is the same as
-#      passing undefined for callbackfn
-15.4.4.18-4-15: FAIL
-# Bug? Array.prototype.forEach - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.18-7-b-16: FAIL
-# Bug? Array.prototype.forEach - element to be retrieved is own accessor
-#      property on an Array
-15.4.4.18-7-c-i-10: FAIL
-# Bug? Array.prototype.forEach - element to be retrieved is own accessor
-#      property that overrides an inherited data property on an Array
-15.4.4.18-7-c-i-12: FAIL
-# Bug? Array.prototype.forEach - element to be retrieved is own accessor
-#      property that overrides an inherited accessor property on an Array
-15.4.4.18-7-c-i-14: FAIL
-# Bug? Array.prototype.forEach - element to be retrieved is own accessor
-#      property without a get function on an Array
-15.4.4.18-7-c-i-18: FAIL
-# Bug? Array.prototype.forEach - element to be retrieved is own accessor
-#      property without a get function that overrides an inherited accessor
-#      property on an Array
-15.4.4.18-7-c-i-20: FAIL
-# Bug? Array.prototype.forEach - element changed by getter on previous
-#      iterations is observed on an Array
-15.4.4.18-7-c-i-28: FAIL
-# Bug? Array.prototype.map - applied to boolean primitive
-15.4.4.19-1-3: FAIL
-# Bug? Array.prototype.map - applied to number primitive
-15.4.4.19-1-5: FAIL
-# Bug? Array.prototype.map - applied to string primitive
-15.4.4.19-1-7: FAIL
-# Bug? Array.prototype.map - Side effects produced by step 2 are visible when an
-#      exception occurs
-15.4.4.19-4-8: FAIL
-# Bug? Array.prototype.map - Side effects produced by step 3 are visible when an
-#      exception occurs
-15.4.4.19-4-9: FAIL
-# Bug? Array.prototype.map - the exception is not thrown if exception was thrown
-#      by step 2
-15.4.4.19-4-10: FAIL
-# Bug? Array.prototype.map - the exception is not thrown if exception was thrown
-#      by step 3
-15.4.4.19-4-11: FAIL
-# Bug? Array.prototype.map - calling with no callbackfn is the same as passing
-#      undefined for callbackfn
-15.4.4.19-4-15: FAIL
-# Bug? Array.prototype.map - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.19-8-b-16: FAIL
-# Bug? Array.prototype.map - element to be retrieved is own accessor property on
-#      an Array
-15.4.4.19-8-c-i-10: FAIL
-# Bug? Array.prototype.map - element to be retrieved is own accessor property
-#      that overrides an inherited data property on an Array
-15.4.4.19-8-c-i-12: FAIL
-# Bug? Array.prototype.map - element to be retrieved is own accessor property
-#      that overrides an inherited accessor property on an Array
-15.4.4.19-8-c-i-14: FAIL
-# Bug? Array.prototype.map - element to be retrieved is own accessor property
-#      without a get function on an Array
-15.4.4.19-8-c-i-18: FAIL
-# Bug? Array.prototype.map - element to be retrieved is own accessor property
-#      without a get function that overrides an inherited accessor property on
-#      an Array
-15.4.4.19-8-c-i-19: FAIL
-# Bug? Array.prototype.map - element changed by getter on previous iterations is
-#      observed on an Array
-15.4.4.19-8-c-i-28: FAIL
-# Bug? Array.prototype.filter applied to boolean primitive
-15.4.4.20-1-3: FAIL
-# Bug? Array.prototype.filter applied to number primitive
-15.4.4.20-1-5: FAIL
-# Bug? Array.prototype.filter applied to string primitive
-15.4.4.20-1-7: FAIL
-# Bug? Array.prototype.filter - side effects produced by step 2 are visible when
-#      an exception occurs
-15.4.4.20-4-8: FAIL
-# Bug? Array.prototype.filter - side effects produced by step 3 are visible when
-#      an exception occurs
-15.4.4.20-4-9: FAIL
-# Bug? Array.prototype.filter - the exception is not thrown if exception was
-#      thrown by step 2
-15.4.4.20-4-10: FAIL
-# Bug? Array.prototype.filter - the exception is not thrown if exception was
-#      thrown by step 3
-15.4.4.20-4-11: FAIL
-# Bug? Array.prototype.filter - calling with no callbackfn is the same as
-#      passing undefined for callbackfn
-15.4.4.20-4-15: FAIL
-# Bug? Array.prototype.filter - properties can be added to prototype after
-#      current position are visited on an Array-like object
-15.4.4.20-9-b-6: FAIL
-# Bug? Array.prototype.filter - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.20-9-b-16: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own data property
-#      that overrides an inherited accessor property on an Array
-15.4.4.20-9-c-i-6: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own accessor property
-#      on an Array
-15.4.4.20-9-c-i-10: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own accessor property
-#      that overrides an inherited data property on an Array
-15.4.4.20-9-c-i-12: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own accessor property
-#      that overrides an inherited accessor property on an Array
-15.4.4.20-9-c-i-14: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is inherited accessor
-#      property on an Array
-15.4.4.20-9-c-i-16: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own accessor property
-#      without a get function on an Array
-15.4.4.20-9-c-i-18: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own accessor property
-#      without a get function that overrides an inherited accessor property on
-#      an Array
-15.4.4.20-9-c-i-20: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is inherited accessor
-#      property without a get function on an Array
-15.4.4.20-9-c-i-22: FAIL
-# Bug? Array.prototype.filter - element changed by getter on previous iterations
-#      is observed on an Array
-15.4.4.20-9-c-i-28: FAIL
-# Bug? Array.prototype.reduce applied to boolean primitive
-15.4.4.21-1-3: FAIL
-# Bug? Array.prototype.reduce applied to number primitive
-15.4.4.21-1-5: FAIL
-# Bug? Array.prototype.reduce applied to string primitive
-15.4.4.21-1-7: FAIL
-# Bug? Array.prototype.reduce - side effects produced by step 2 are visible when
-#      an exception occurs
-15.4.4.21-4-8: FAIL
-# Bug? Array.prototype.reduce - side effects produced by step 3 are visible when
-#      an exception occurs
-15.4.4.21-4-9: FAIL
-# Bug? Array.prototype.reduce - the exception is not thrown if exception was
-#      thrown by step 2
-15.4.4.21-4-10: FAIL
-# Bug? Array.prototype.reduce - the exception is not thrown if exception was
-#      thrown by step 3
-15.4.4.21-4-11: FAIL
-# Bug? Array.prototype.reduce - calling with no callbackfn is the same as
-#      passing undefined for callbackfn
-15.4.4.21-4-15: FAIL
-# Bug? Array.prototype.reduce - decreasing length of array in step 8 does not
-#      delete non-configurable properties
-15.4.4.21-9-b-16: FAIL
-# Bug? Array.prototype.reduce - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.21-9-b-29: FAIL
-# Bug? Array.prototype.reduceRight applied to boolean primitive
-15.4.4.22-1-3: FAIL
-# Bug? Array.prototype.reduceRight applied to number primitive
-15.4.4.22-1-5: FAIL
-# Bug? Array.prototype.reduceRight applied to string primitive
-15.4.4.22-1-7: FAIL
-# Bug? Array.prototype.reduceRight - side effects produced by step 2 are visible
-#      when an exception occurs
-15.4.4.22-4-8: FAIL
-# Bug? Array.prototype.reduceRight - side effects produced by step 3 are visible
-#      when an exception occurs
-15.4.4.22-4-9: FAIL
-# Bug? Array.prototype.reduceRight - the exception is not thrown if exception
-#      was thrown by step 2
-15.4.4.22-4-10: FAIL
-# Bug? Array.prototype.reduceRight - the exception is not thrown if exception
-#      was thrown by step 3
-15.4.4.22-4-11: FAIL
-# Bug? Array.prototype.reduceRight - calling with no callbackfn is the same as
-#      passing undefined for callbackfn
-15.4.4.22-4-15: FAIL
-# Bug? Array.prototype.reduceRight - element to be retrieved is own accessor
-#      property that overrides an inherited data property on an Array
-15.4.4.22-8-b-iii-1-12: FAIL
-# Bug? Array.prototype.reduceRight - element to be retrieved is own accessor
-#      property without a get function on an Array
-15.4.4.22-8-b-iii-1-18: FAIL
-# Bug? Array.prototype.reduceRight - element to be retrieved is own accessor
-#      property without a get function that overrides an inherited accessor
-#      property on an Array
-15.4.4.22-8-b-iii-1-20: FAIL
-# Bug? Array.prototype.reduceRight - element changed by getter on current
-#      iteration is observed in subsequent iterations on an Array
-15.4.4.22-8-b-iii-1-30: FAIL
-# Bug? Array.prototype.reduceRight - Exception in getter terminate iteration on
-#      an Array
-15.4.4.22-8-b-iii-1-33: FAIL
-# Bug? Array.prototype.reduceRight - modifications to length don't change number
-#      of iterations in step 9
-15.4.4.22-8-b-2: FAIL
-# Bug? Array.prototype.reduceRight - deleting own property in step 8 causes
-#      deleted index property not to be visited on an Array
-15.4.4.22-9-b-9: FAIL
-# Bug? Array.prototype.reduceRight - deleting own property with prototype
-#      property in step 8 causes prototype index property to be visited on an
-#      Array
-15.4.4.22-9-b-13: FAIL
-# Bug? Array.prototype.reduceRight - decreasing length of array in step 8 does
-#      not delete non-configurable properties
-15.4.4.22-9-b-16: FAIL
-# Bug? Array.prototype.reduceRight - deleting property of prototype causes
-#      deleted index property not to be visited on an Array
-15.4.4.22-9-b-24: FAIL
-# Bug? Array.prototype.reduceRight - deleting own property with prototype
-#      property causes prototype index property to be visited on an Array
-15.4.4.22-9-b-26: FAIL
-# Bug? Array.prototype.reduceRight - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.22-9-b-29: FAIL
-# Bug? Array.prototype.reduceRight - element changed by getter on previous
-#      iterations is observed on an Array
-15.4.4.22-9-c-i-30: FAIL
-# Bug? Array.prototype.reduceRight - modifications to length will change number
-#      of iterations
-15.4.4.22-9-9: FAIL
-# Bug? String.prototype.trim - 'S' is a string with all WhiteSpace
-15.5.4.20-3-2: FAIL
-# Bug? String.prototype.trim - 'S' is a string with all union of WhiteSpace and
-#      LineTerminator
-15.5.4.20-3-3: FAIL
-# Bug? String.prototype.trim - 'S' is a string start with union of all
-#      LineTerminator and all WhiteSpace
-15.5.4.20-3-4: FAIL
-# Bug? String.prototype.trim - 'S' is a string end with union of all
-#      LineTerminator and all WhiteSpace
-15.5.4.20-3-5: FAIL
-# Bug? String.prototype.trim - 'S' is a string start with union of all
-#      LineTerminator and all WhiteSpace and end with union of all
-#      LineTerminator and all WhiteSpace
-15.5.4.20-3-6: FAIL
-# Bug? String.prototype.trim handles whitepace and lineterminators (\\uFEFFabc)
-15.5.4.20-4-10: FAIL
-# Bug? String.prototype.trim handles whitepace and lineterminators (abc\\uFEFF)
-15.5.4.20-4-18: FAIL
-# Bug? String.prototype.trim handles whitepace and lineterminators
-#      (\\uFEFF\\uFEFF)
-15.5.4.20-4-34: FAIL
-# Bug? Date.prototype.toISOString - RangeError is thrown when value of date is
-#      Date(1970, 0, -99999999, 0, 0, 0, -1), the time zone is UTC(0)
-15.9.5.43-0-8: FAIL
-# Bug? Date.prototype.toISOString - RangeError is not thrown when value of date
-#      is Date(1970, 0, 100000001, 0, 0, 0, -1), the time zone is UTC(0)
-15.9.5.43-0-11: FAIL
-# Bug? Date.prototype.toISOString - RangeError is not thrown when value of date
-#      is Date(1970, 0, 100000001, 0, 0, 0, 0), the time zone is UTC(0)
-15.9.5.43-0-12: FAIL
-# Bug? Date.prototype.toISOString - RangeError is thrown when value of date is
-#      Date(1970, 0, 100000001, 0, 0, 0, 1), the time zone is UTC(0)
-15.9.5.43-0-13: FAIL
-# Bug? Date.prototype.toISOString - when value of year is -Infinity
-#      Date.prototype.toISOString throw the RangeError
-15.9.5.43-0-14: FAIL
-# Bug? Date.prototype.toISOString - value of year is Infinity
-#      Date.prototype.toISOString throw the RangeError
-15.9.5.43-0-15: FAIL
-# Bug? RegExp - the thrown error is SyntaxError instead of RegExpError when 'F'
-#      contains any character other than 'g', 'i', or 'm'
-15.10.4.1-3: FAIL
-# Bug? RegExp.prototype is itself a RegExp
-15.10.6: FAIL
-# Bug? RegExp.prototype.source is of type String
-15.10.7.1-1: FAIL
-# Bug? RegExp.prototype.source is a data property with default attribute values
-#      (false)
-15.10.7.1-2: FAIL
-# Bug? RegExp.prototype.global is of type Boolean
-15.10.7.2-1: FAIL
-# Bug? RegExp.prototype.global is a data property with default attribute values
-#      (false)
-15.10.7.2-2: FAIL
-# Bug? RegExp.prototype.ignoreCase is of type Boolean
-15.10.7.3-1: FAIL
-# Bug? RegExp.prototype.ignoreCase is a data property with default attribute
-#      values (false)
-15.10.7.3-2: FAIL
-# Bug? RegExp.prototype.multiline is of type Boolean
-15.10.7.4-1: FAIL
-# Bug? RegExp.prototype.multiline is a data property with default attribute
-#      values (false)
-15.10.7.4-2: FAIL
-# Bug? RegExp.prototype.lastIndex is of type Number
-15.10.7.5-1: FAIL
-# Bug? RegExp.prototype.lastIndex is a data property with specified attribute
-#      values
-15.10.7.5-2: FAIL
-# Bug? Error.prototype.toString return the value of 'msg' when 'name' is empty
-#      string and 'msg' isn't undefined
-15.11.4.4-8-1: FAIL
+
+##################### DELIBERATE INCOMPATIBILITIES #####################
+
+# We deliberately treat arguments to parseInt() with a leading zero as
+# octal numbers in order to not break the web.
+S15.1.2.2_A5.1_T1: FAIL_OK
+
+# This tests precision of Math.tan and Math.sin.  The implementation for those
+# trigonometric functions are platform/compiler dependent.  Furthermore, the
+# expectation values by far deviates from the actual result given by an
+# arbitrary-precision calculator, making those tests partly bogus.
+S15.8.2.16_A7: PASS || FAIL_OK
+S15.8.2.18_A7: PASS || FAIL_OK
+
+# We are more lenient in which string character escapes we allow than
+# the spec (7.8.4 p. 19) wants us to be.  This is for compatibility.
+S7.8.4_A6.1_T4: FAIL_OK
+S7.8.4_A6.2_T1: FAIL_OK
+S7.8.4_A6.2_T2: FAIL_OK
+S7.8.4_A7.1_T4: FAIL_OK
+S7.8.4_A7.2_T1: FAIL_OK
+S7.8.4_A7.2_T2: FAIL_OK
+S7.8.4_A7.2_T3: FAIL_OK
+S7.8.4_A7.2_T4: FAIL_OK
+S7.8.4_A7.2_T5: FAIL_OK
+S7.8.4_A7.2_T6: FAIL_OK
+
+# Linux and Mac defaults to extended 80 bit floating point format in the FPU.
+# We follow the other major JS engines by keeping this default.
+S8.5_A2.2: PASS if ($system != linux || $arch == x64), FAIL_OK if ($system == linux && $arch != x64)
+S8.5_A2.1: PASS if ($system != linux || $arch == x64), FAIL_OK if ($system == linux && $arch != x64)
+
+############################ INVALID TESTS #############################
+
+# The reference value calculated by Test262 is incorrect if you run these tests
+# in PST/PDT between first Sunday in March and first Sunday in April. The DST
+# switch was moved in 2007 whereas Test262 bases the reference value on 2000.
+# Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=293
+S15.9.3.1_A5_T1: PASS || FAIL_OK
+S15.9.3.1_A5_T2: PASS || FAIL_OK
+S15.9.3.1_A5_T3: PASS || FAIL_OK
+S15.9.3.1_A5_T4: PASS || FAIL_OK
+S15.9.3.1_A5_T5: PASS || FAIL_OK
+S15.9.3.1_A5_T6: PASS || FAIL_OK
+
+############################ SKIPPED TESTS #############################
+
+# These tests take a looong time to run in debug mode.
+S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug
+S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug
+
+[ $arch == arm ]
+
+# BUG(3251225): Tests that timeout with --nocrankshaft.
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.1_A2.4_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.4_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.3_A2.3_T1: SKIP
+S15.1.3.4_A2.3_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+
+[ $arch == mips ]
+
+# BUG(3251225): Tests that timeout with --nocrankshaft.
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.1_A2.4_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.4_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.3_A2.3_T1: SKIP
+S15.1.3.4_A2.3_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
diff --git a/test/test262/testcfg.py b/test/test262/testcfg.py
index 9482046..294b39c 100644
--- a/test/test262/testcfg.py
+++ b/test/test262/testcfg.py
@@ -29,8 +29,14 @@
 import test
 import os
 from os.path import join, exists
+import urllib
+import hashlib
+import tarfile
 
 
+TEST_262_ARCHIVE_REVISION = '3a890174343c'  # This is the r309 revision.
+TEST_262_ARCHIVE_MD5 = 'be5d4cfbe69cef70430907b8f3a92b50'
+TEST_262_URL = 'http://hg.ecmascript.org/tests/test262/archive/%s.tar.bz2'
 TEST_262_HARNESS = ['sta.js']
 
 
@@ -43,10 +49,10 @@
     self.root = root
 
   def IsNegative(self):
-    return self.filename.endswith('-n.js')
+    return '@negative' in self.GetSource()
 
   def GetLabel(self):
-    return "%s test262 %s %s" % (self.mode, self.GetGroup(), self.GetName())
+    return "%s test262 %s" % (self.mode, self.GetName())
 
   def IsFailureOutput(self, output):
     if output.exit_code != 0:
@@ -55,7 +61,6 @@
 
   def GetCommand(self):
     result = self.context.GetVmCommand(self, self.mode)
-    result += ['-e', 'var window = this']
     result += self.framework
     result.append(self.filename)
     return result
@@ -63,9 +68,6 @@
   def GetName(self):
     return self.path[-1]
 
-  def GetGroup(self):
-    return self.path[0]
-
   def GetSource(self):
     return open(self.filename).read()
 
@@ -75,13 +77,14 @@
   def __init__(self, context, root):
     super(Test262TestConfiguration, self).__init__(context, root)
 
-  def AddIETestCenter(self, tests, current_path, path, mode):
-    current_root = join(self.root, 'data', 'test', 'suite', 'ietestcenter')
+  def ListTests(self, current_path, path, mode, variant_flags):
+    testroot = join(self.root, 'data', 'test', 'suite')
     harness = [join(self.root, 'data', 'test', 'harness', f)
                    for f in TEST_262_HARNESS]
     harness += [join(self.root, 'harness-adapt.js')]
-    for root, dirs, files in os.walk(current_root):
-      for dotted in [x  for x in dirs if x.startswith('.')]:
+    tests = []
+    for root, dirs, files in os.walk(testroot):
+      for dotted in [x for x in dirs if x.startswith('.')]:
         dirs.remove(dotted)
       dirs.sort()
       root_path = root[len(self.root):].split(os.path.sep)
@@ -89,27 +92,35 @@
       files.sort()
       for file in files:
         if file.endswith('.js'):
-          if self.Contains(path, root_path):
-            test_path = ['ietestcenter', file[:-3]]
+          test_path = ['test262', file[:-3]]
+          if self.Contains(path, test_path):
             test = Test262TestCase(join(root, file), test_path, self.context,
                                    self.root, mode, harness)
             tests.append(test)
-
-  def AddSputnikConvertedTests(self, tests, current_path, path, mode):
-    # To be enabled
-    pass
-
-  def AddSputnikTests(self, tests, current_path, path, mode):
-    # To be enabled
-    pass
-
-  def ListTests(self, current_path, path, mode, variant_flags):
-    tests = []
-    self.AddIETestCenter(tests, current_path, path, mode)
-    self.AddSputnikConvertedTests(tests, current_path, path, mode)
-    self.AddSputnikTests(tests, current_path, path, mode)
     return tests
 
+  def DownloadData(self):
+    revision = TEST_262_ARCHIVE_REVISION
+    archive_url = TEST_262_URL % revision
+    archive_name = join(self.root, 'test262-%s.tar.bz2' % revision)
+    directory_name = join(self.root, "test262-%s" % revision)
+    if not exists(directory_name) or not exists(archive_name):
+      if not exists(archive_name):
+        print "Downloading test data from %s ..." % archive_url
+        urllib.urlretrieve(archive_url, archive_name)
+      if not exists(directory_name):
+        print "Extracting test262-%s.tar.bz2 ..." % revision
+        md5 = hashlib.md5()
+        with open(archive_name,'rb') as f:
+          for chunk in iter(lambda: f.read(8192), ''):
+            md5.update(chunk)
+        if md5.hexdigest() != TEST_262_ARCHIVE_MD5:
+          raise Exception("Hash mismatch of test data file")
+        archive = tarfile.open(archive_name, 'r:bz2')
+        archive.extractall(join(self.root))
+      if not exists(join(self.root, 'data')):
+        os.symlink(directory_name, join(self.root, 'data'))
+
   def GetBuildRequirements(self):
     return ['d8']
 
diff --git a/tools/bash-completion.sh b/tools/bash-completion.sh
new file mode 100644
index 0000000..9f65c67
--- /dev/null
+++ b/tools/bash-completion.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Inspired by and based on:
+# http://src.chromium.org/viewvc/chrome/trunk/src/tools/bash-completion
+
+# Flag completion rule for bash.
+# To load in your shell, "source path/to/this/file".
+
+v8_source=$(readlink -f $(dirname $BASH_SOURCE)/..)
+
+_v8_flag() {
+  local cur defines targets
+  cur="${COMP_WORDS[COMP_CWORD]}"
+  defines=$(cat src/flag-definitions.h \
+    | grep "^DEFINE" \
+    | grep -v "DEFINE_implication" \
+    | sed -e 's/_/-/g')
+  targets=$(echo "$defines" \
+    | sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
+    echo "$defines" \
+    | sed -ne 's/^DEFINE-bool(\([^,]*\).*/--no\1/p'; \
+    cat src/d8.cc \
+    | grep "strcmp(argv\[i\]" \
+    | sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
+  COMPREPLY=($(compgen -W "$targets" -- "$cur"))
+  return 0
+}
+
+complete -F _v8_flag -f d8
diff --git a/tools/check-static-initializers.sh b/tools/check-static-initializers.sh
new file mode 100644
index 0000000..e6da828
--- /dev/null
+++ b/tools/check-static-initializers.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Checks that the number of compilation units having at least one static
+# initializer in d8 matches the one defined below.
+# Note that the project must be built with SCons before running this script.
+
+# Allow:
+#  - _GLOBAL__I__ZN2v810LineEditor6first_E
+#  - _GLOBAL__I__ZN2v88internal32AtomicOps_Internalx86CPUFeaturesE
+#  - _GLOBAL__I__ZN2v88internal8ThreadId18highest_thread_id_E
+expected_static_init_count=3
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+d8="${v8_root}/d8"
+
+if [ ! -f "$d8" ]; then
+  echo "Please build the project with SCons."
+  exit 1
+fi
+
+static_inits=$(nm "$d8" | grep _GLOBAL__I | awk '{ print $NF; }')
+
+static_init_count=$(echo "$static_inits" | wc -l)
+
+if [ $static_init_count -gt $expected_static_init_count ]; then
+  echo "Too many static initializers."
+  echo "$static_inits"
+  exit 1
+fi
diff --git a/tools/common-includes.sh b/tools/common-includes.sh
new file mode 100644
index 0000000..8f0e78b
--- /dev/null
+++ b/tools/common-includes.sh
@@ -0,0 +1,197 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This file contains common function definitions for various other shell
+# scripts in this directory. It is not meant to be executed by itself.
+
+# Important: before including this file, the following variables must be set:
+# - BRANCHNAME
+# - PERSISTFILE_BASENAME
+
+TEMP_BRANCH=$BRANCHNAME-temporary-branch-created-by-script
+VERSION_FILE="src/version.cc"
+CHANGELOG_ENTRY_FILE="$PERSISTFILE_BASENAME-changelog-entry"
+PATCH_FILE="$PERSISTFILE_BASENAME-patch"
+PATCH_OUTPUT_FILE="$PERSISTFILE_BASENAME-patch-output"
+COMMITMSG_FILE="$PERSISTFILE_BASENAME-commitmsg"
+TOUCHED_FILES_FILE="$PERSISTFILE_BASENAME-touched-files"
+TRUNK_REVISION_FILE="$PERSISTFILE_BASENAME-trunkrevision"
+START_STEP=0
+CURRENT_STEP=0
+
+die() {
+  [[ -n "$1" ]] && echo "Error: $1"
+  echo "Exiting."
+  exit 1
+}
+
+confirm() {
+  echo -n "$1 [Y/n] "
+  read ANSWER
+  if [[ -z "$ANSWER" || "$ANSWER" == "Y" || "$ANSWER" == "y" ]] ; then
+    return 0
+  else
+    return 1
+  fi
+}
+
+delete_branch() {
+  local MATCH=$(git branch | grep "$1" | awk '{print $NF}' | grep -x $1)
+  if [ "$MATCH" == "$1" ] ; then
+    confirm "Branch $1 exists, do you want to delete it?"
+    if [ $? -eq 0 ] ; then
+      git branch -D $1 || die "Deleting branch '$1' failed."
+      echo "Branch $1 deleted."
+    else
+      die "Can't continue. Please delete branch $1 and try again."
+    fi
+  fi
+}
+
+# Persist and restore variables to support canceling/resuming execution
+# of this script.
+persist() {
+  local VARNAME=$1
+  local FILE="$PERSISTFILE_BASENAME-$VARNAME"
+  echo "${!VARNAME}" > $FILE
+}
+
+restore() {
+  local VARNAME=$1
+  local FILE="$PERSISTFILE_BASENAME-$VARNAME"
+  local VALUE="$(cat $FILE)"
+  eval "$VARNAME=\"$VALUE\""
+}
+
+restore_if_unset() {
+  local VARNAME=$1
+  [[ -z "${!VARNAME}" ]] && restore "$VARNAME"
+  [[ -z "${!VARNAME}" ]] && die "Variable '$VARNAME' could not be restored."
+}
+
+initial_environment_checks() {
+  # Cancel if this is not a git checkout.
+  [[ -d .git ]] \
+    || die "This is not a git checkout, this script won't work for you."
+
+  # Cancel if EDITOR is unset or not executable.
+  [[ -n "$EDITOR" && -x "$(which $EDITOR)" ]] \
+    || die "Please set your EDITOR environment variable, you'll need it."
+}
+
+common_prepare() {
+  # Check for a clean workdir.
+  [[ -z "$(git status -s -uno)" ]] \
+    || die "Workspace is not clean. Please commit or undo your changes."
+
+  # Persist current branch.
+  CURRENT_BRANCH=$(git status -s -b -uno | grep "^##" | awk '{print $2}')
+  persist "CURRENT_BRANCH"
+
+  # Fetch unfetched revisions.
+  git svn fetch || die "'git svn fetch' failed."
+
+  # Get ahold of a safe temporary branch and check it out.
+  if [ "$CURRENT_BRANCH" != "$TEMP_BRANCH" ] ; then
+    delete_branch $TEMP_BRANCH
+    git checkout -b $TEMP_BRANCH
+  fi
+
+  # Delete the branch that will be created later if it exists already.
+  delete_branch $BRANCHNAME
+}
+
+common_cleanup() {
+  restore_if_unset "CURRENT_BRANCH"
+  git checkout -f $CURRENT_BRANCH
+  [[ "$TEMP_BRANCH" != "$CURRENT_BRANCH" ]] && git branch -D $TEMP_BRANCH
+  [[ "$BRANCHNAME" != "$CURRENT_BRANCH" ]] && git branch -D $BRANCHNAME
+  # Clean up all temporary files.
+  rm -f "$PERSISTFILE_BASENAME"*
+}
+
+# These two functions take a prefix for the variable names as first argument.
+read_and_persist_version() {
+  for v in MAJOR_VERSION MINOR_VERSION BUILD_NUMBER PATCH_LEVEL; do
+    VARNAME="$1${v%%_*}"
+    VALUE=$(grep "#define $v" "$VERSION_FILE" | awk '{print $NF}')
+    eval "$VARNAME=\"$VALUE\""
+    persist "$VARNAME"
+  done
+}
+restore_version_if_unset() {
+  for v in MAJOR MINOR BUILD PATCH; do
+    restore_if_unset "$1$v"
+  done
+}
+
+upload_step() {
+  let CURRENT_STEP+=1
+  if [ $START_STEP -le $CURRENT_STEP ] ; then
+    echo ">>> Step $CURRENT_STEP: Upload for code review."
+    echo -n "Please enter the email address of a V8 reviewer for your patch: "
+    read REVIEWER
+    git cl upload -r "$REVIEWER" --send-mail \
+      || die "'git cl upload' failed, please try again."
+  fi
+}
+
+wait_for_lgtm() {
+  echo "Please wait for an LGTM, then type \"LGTM<Return>\" to commit your \
+change. (If you need to iterate on the patch or double check that it's \
+sane, do so in another shell, but remember to not change the headline of \
+the uploaded CL."
+  unset ANSWER
+  while [ "$ANSWER" != "LGTM" ] ; do
+    [[ -n "$ANSWER" ]] && echo "That was not 'LGTM'."
+    echo -n "> "
+    read ANSWER
+  done
+}
+
+# Takes a file containing the patch to apply as first argument.
+apply_patch() {
+  patch -p1 < "$1" > "$PATCH_OUTPUT_FILE" || \
+    { cat "$PATCH_OUTPUT_FILE" && die "Applying the patch failed."; }
+  tee < "$PATCH_OUTPUT_FILE" >(awk '{print $NF}' >> "$TOUCHED_FILES_FILE")
+  rm "$PATCH_OUTPUT_FILE"
+}
+
+stage_files() {
+  # Stage added and modified files.
+  TOUCHED_FILES=$(cat "$TOUCHED_FILES_FILE")
+  for FILE in $TOUCHED_FILES ; do
+    git add "$FILE"
+  done
+  # Stage deleted files.
+  DELETED_FILES=$(git status -s -uno --porcelain | grep "^ D" \
+                                                 | awk '{print $NF}')
+  for FILE in $DELETED_FILES ; do
+    git rm "$FILE"
+  done
+  rm -f "$TOUCHED_FILES_FILE"
+}
diff --git a/tools/disasm.py b/tools/disasm.py
index c326382..681b425 100644
--- a/tools/disasm.py
+++ b/tools/disasm.py
@@ -48,7 +48,8 @@
 _ARCH_MAP = {
   "ia32": "-m i386",
   "x64": "-m i386 -M x86-64",
-  "arm": "-m arm"  # Not supported by our objdump build.
+  "arm": "-m arm",  # Not supported by our objdump build.
+  "mips": "-m mips"  # Not supported by our objdump build.
 }
 
 
diff --git a/tools/gc-nvp-trace-processor.py b/tools/gc-nvp-trace-processor.py
index 511ab2b..fe5a7f3 100755
--- a/tools/gc-nvp-trace-processor.py
+++ b/tools/gc-nvp-trace-processor.py
@@ -219,13 +219,17 @@
   if r['gc'] == 's':
     # there is no 'other' scope for scavenging collections.
     return 0
-  return r['pause'] - r['mark'] - r['sweep'] - r['compact'] - r['external']
+  return r['pause'] - r['mark'] - r['sweep'] - r['external']
 
 def scavenge_scope(r):
   if r['gc'] == 's':
     return r['pause'] - r['external']
   return 0
 
+
+def real_mutator(r):
+  return r['mutator'] - r['stepstook']
+
 plots = [
   [
     Set('style fill solid 0.5 noborder'),
@@ -234,9 +238,24 @@
     Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
          Item('Marking', 'mark', lc = 'purple'),
          Item('Sweep', 'sweep', lc = 'blue'),
-         Item('Compaction', 'compact', lc = 'red'),
          Item('External', 'external', lc = '#489D43'),
-         Item('Other', other_scope, lc = 'grey'))
+         Item('Other', other_scope, lc = 'grey'),
+         Item('IGC Steps', 'stepstook', lc = '#FF6347'))
+  ],
+  [
+    Set('style fill solid 0.5 noborder'),
+    Set('style histogram rowstacked'),
+    Set('style data histograms'),
+    Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
+         Item('Marking', 'mark', lc = 'purple'),
+         Item('Sweep', 'sweep', lc = 'blue'),
+         Item('External', 'external', lc = '#489D43'),
+         Item('Other', other_scope, lc = '#ADD8E6'),
+         Item('External', 'external', lc = '#D3D3D3'))
+  ],
+
+  [
+    Plot(Item('Mutator', real_mutator, lc = 'black', style = 'lines'))
   ],
   [
     Set('style histogram rowstacked'),
@@ -275,7 +294,7 @@
   return reduce(lambda t,r: f(t, r[field]), trace, init)
 
 def calc_total(trace, field):
-  return freduce(lambda t,v: t + v, field, trace, 0)
+  return freduce(lambda t,v: t + long(v), field, trace, long(0))
 
 def calc_max(trace, field):
   return freduce(lambda t,r: max(t, r), field, trace, 0)
@@ -288,8 +307,9 @@
   trace = parse_gc_trace(filename)
 
   marksweeps = filter(lambda r: r['gc'] == 'ms', trace)
-  markcompacts = filter(lambda r: r['gc'] == 'mc', trace)
   scavenges = filter(lambda r: r['gc'] == 's', trace)
+  globalgcs = filter(lambda r: r['gc'] != 's', trace)
+
 
   charts = plot_all(plots, trace, filename)
 
@@ -302,7 +322,7 @@
     else:
       avg = 0
     if n > 1:
-      dev = math.sqrt(freduce(lambda t,r: (r - avg) ** 2, field, trace, 0) /
+      dev = math.sqrt(freduce(lambda t,r: t + (r - avg) ** 2, field, trace, 0) /
                       (n - 1))
     else:
       dev = 0
@@ -311,6 +331,31 @@
               '<td>%d</td><td>%d [dev %f]</td></tr>' %
               (prefix, n, total, max, avg, dev))
 
+  def HumanReadable(size):
+    suffixes = ['bytes', 'kB', 'MB', 'GB']
+    power = 1
+    for i in range(len(suffixes)):
+      if size < power*1024:
+        return "%.1f" % (float(size) / power) + " " + suffixes[i]
+      power *= 1024
+
+  def throughput(name, trace):
+    total_live_after = calc_total(trace, 'total_size_after')
+    total_live_before = calc_total(trace, 'total_size_before')
+    total_gc = calc_total(trace, 'pause')
+    if total_gc == 0:
+      return
+    out.write('GC %s Throughput (after): %s / %s ms = %s/ms<br/>' %
+              (name,
+               HumanReadable(total_live_after),
+               total_gc,
+               HumanReadable(total_live_after / total_gc)))
+    out.write('GC %s Throughput (before): %s / %s ms = %s/ms<br/>' %
+              (name,
+               HumanReadable(total_live_before),
+               total_gc,
+               HumanReadable(total_live_before / total_gc)))
+
 
   with open(filename + '.html', 'w') as out:
     out.write('<html><body>')
@@ -320,15 +365,17 @@
     stats(out, 'Total in GC', trace, 'pause')
     stats(out, 'Scavenge', scavenges, 'pause')
     stats(out, 'MarkSweep', marksweeps, 'pause')
-    stats(out, 'MarkCompact', markcompacts, 'pause')
     stats(out, 'Mark', filter(lambda r: r['mark'] != 0, trace), 'mark')
     stats(out, 'Sweep', filter(lambda r: r['sweep'] != 0, trace), 'sweep')
-    stats(out, 'Compact', filter(lambda r: r['compact'] != 0, trace), 'compact')
     stats(out,
           'External',
           filter(lambda r: r['external'] != 0, trace),
           'external')
     out.write('</table>')
+    throughput('TOTAL', trace)
+    throughput('MS', marksweeps)
+    throughput('OLDSPACE', globalgcs)
+    out.write('<br/>')
     for chart in charts:
       out.write('<img src="%s">' % chart)
       out.write('</body></html>')
diff --git a/tools/gcmole/gccause.lua b/tools/gcmole/gccause.lua
index a6fe542..b989176 100644
--- a/tools/gcmole/gccause.lua
+++ b/tools/gcmole/gccause.lua
@@ -48,6 +48,8 @@
 	    T[f] = true
 	    TrackCause(f, (lvl or 0) + 1)
 	 end
+
+	 if f == '<GC>' then break end
       end
    end
 end
diff --git a/tools/gcmole/gcmole.cc b/tools/gcmole/gcmole.cc
index 71ba24a..38ee6e0 100644
--- a/tools/gcmole/gcmole.cc
+++ b/tools/gcmole/gcmole.cc
@@ -69,6 +69,21 @@
 }
 
 
+static std::string EXTERNAL("EXTERNAL");
+static std::string STATE_TAG("enum v8::internal::StateTag");
+
+static bool IsExternalVMState(const clang::ValueDecl* var) {
+  const clang::EnumConstantDecl* enum_constant =
+      dyn_cast<clang::EnumConstantDecl>(var);
+  if (enum_constant != NULL && enum_constant->getNameAsString() == EXTERNAL) {
+    clang::QualType type = enum_constant->getType();
+    return (type.getAsString() == STATE_TAG);
+  }
+
+  return false;
+}
+
+
 struct Resolver {
   explicit Resolver(clang::ASTContext& ctx)
       : ctx_(ctx), decl_ctx_(ctx.getTranslationUnitDecl()) {
@@ -121,6 +136,13 @@
     return true;
   }
 
+  virtual bool VisitDeclRefExpr(clang::DeclRefExpr* expr) {
+    // If function mentions EXTERNAL VMState add artificial garbage collection
+    // mark.
+    if (IsExternalVMState(expr->getDecl())) AddCallee("CollectGarbage");
+    return true;
+  }
+
   void AnalyzeFunction(const clang::FunctionDecl* f) {
     MangledName name;
     if (InV8Namespace(f) && GetMangledName(ctx_, f, &name)) {
@@ -278,6 +300,10 @@
     return reinterpret_cast<Environment*>(effect_ & ~kAllEffects);
   }
 
+  static ExprEffect GC() {
+    return ExprEffect(kCausesGC, NULL);
+  }
+
  private:
   ExprEffect(int effect, Environment* env)
       : effect_((effect & kAllEffects) |
@@ -790,6 +816,9 @@
   ExprEffect Use(const clang::Expr* parent,
                  const clang::ValueDecl* var,
                  const Environment& env) {
+    if (IsExternalVMState(var)) {
+      return ExprEffect::GC();
+    }
     return Use(parent, var->getType(), var->getNameAsString(), env);
   }
 
diff --git a/tools/gcmole/gcmole.lua b/tools/gcmole/gcmole.lua
index f8d3b62..09db547 100644
--- a/tools/gcmole/gcmole.lua
+++ b/tools/gcmole/gcmole.lua
@@ -106,7 +106,6 @@
                                          cfg.plugin_args,
                                          cfg.triple,
                                          cfg.arch_define)
-
    for _, filename in ipairs(filenames) do
       log("-- %s", filename)
       local action = cmd_line .. " src/" .. filename .. " 2>&1"
@@ -218,7 +217,13 @@
    --      Callsites of such functions are safe as long as they are properly 
    --      check return value and propagate the Failure to the caller.
    --      It should be possible to extend GCMole to understand this.
-   "Heap.*AllocateFunctionPrototype"
+   "Heap.*AllocateFunctionPrototype",
+
+   -- Ignore all StateTag methods.
+   "StateTag",
+
+   -- Ignore printing of elements transition.
+   "PrintElementsTransition"
 };
 
 local function AddCause(name, cause)
diff --git a/tools/gen-postmortem-metadata.py b/tools/gen-postmortem-metadata.py
new file mode 100644
index 0000000..b9b1625
--- /dev/null
+++ b/tools/gen-postmortem-metadata.py
@@ -0,0 +1,481 @@
+#!/usr/bin/env python
+
+#
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#
+# Emits a C++ file to be compiled and linked into libv8 to support postmortem
+# debugging tools.  Most importantly, this tool emits constants describing V8
+# internals:
+#
+#    v8dbg_type_CLASS__TYPE = VALUE             Describes class type values
+#    v8dbg_class_CLASS__FIELD__TYPE = OFFSET    Describes class fields
+#    v8dbg_parent_CLASS__PARENT                 Describes class hierarchy
+#    v8dbg_frametype_NAME = VALUE               Describes stack frame values
+#    v8dbg_off_fp_NAME = OFFSET                 Frame pointer offsets
+#    v8dbg_prop_NAME = OFFSET                   Object property offsets
+#    v8dbg_NAME = VALUE                         Miscellaneous values
+#
+# These constants are declared as global integers so that they'll be present in
+# the generated libv8 binary.
+#
+
+import re
+import sys
+
+#
+# Miscellaneous constants, tags, and masks used for object identification.
+#
+consts_misc = [
+    { 'name': 'FirstNonstringType',     'value': 'FIRST_NONSTRING_TYPE' },
+
+    { 'name': 'IsNotStringMask',        'value': 'kIsNotStringMask' },
+    { 'name': 'StringTag',              'value': 'kStringTag' },
+    { 'name': 'NotStringTag',           'value': 'kNotStringTag' },
+
+    { 'name': 'StringEncodingMask',     'value': 'kStringEncodingMask' },
+    { 'name': 'TwoByteStringTag',       'value': 'kTwoByteStringTag' },
+    { 'name': 'AsciiStringTag',         'value': 'kAsciiStringTag' },
+
+    { 'name': 'StringRepresentationMask',
+        'value': 'kStringRepresentationMask' },
+    { 'name': 'SeqStringTag',           'value': 'kSeqStringTag' },
+    { 'name': 'ConsStringTag',          'value': 'kConsStringTag' },
+    { 'name': 'ExternalStringTag',      'value': 'kExternalStringTag' },
+
+    { 'name': 'FailureTag',             'value': 'kFailureTag' },
+    { 'name': 'FailureTagMask',         'value': 'kFailureTagMask' },
+    { 'name': 'HeapObjectTag',          'value': 'kHeapObjectTag' },
+    { 'name': 'HeapObjectTagMask',      'value': 'kHeapObjectTagMask' },
+    { 'name': 'SmiTag',                 'value': 'kSmiTag' },
+    { 'name': 'SmiTagMask',             'value': 'kSmiTagMask' },
+    { 'name': 'SmiValueShift',          'value': 'kSmiTagSize' },
+    { 'name': 'PointerSizeLog2',        'value': 'kPointerSizeLog2' },
+
+    { 'name': 'prop_idx_content',
+        'value': 'DescriptorArray::kContentArrayIndex' },
+    { 'name': 'prop_idx_first',
+        'value': 'DescriptorArray::kFirstIndex' },
+    { 'name': 'prop_type_field',
+        'value': 'FIELD' },
+    { 'name': 'prop_type_first_phantom',
+        'value': 'MAP_TRANSITION' },
+    { 'name': 'prop_type_mask',
+        'value': 'PropertyDetails::TypeField::kMask' },
+
+    { 'name': 'off_fp_context',
+        'value': 'StandardFrameConstants::kContextOffset' },
+    { 'name': 'off_fp_marker',
+        'value': 'StandardFrameConstants::kMarkerOffset' },
+    { 'name': 'off_fp_function',
+        'value': 'JavaScriptFrameConstants::kFunctionOffset' },
+    { 'name': 'off_fp_args',
+        'value': 'JavaScriptFrameConstants::kLastParameterOffset' },
+];
+
+#
+# The following useful fields are missing accessors, so we define fake ones.
+#
+extras_accessors = [
+    'HeapObject, map, Map, kMapOffset',
+    'JSObject, elements, Object, kElementsOffset',
+    'FixedArray, data, uintptr_t, kHeaderSize',
+    'Map, instance_attributes, int, kInstanceAttributesOffset',
+    'Map, instance_descriptors, int, kInstanceDescriptorsOrBitField3Offset',
+    'Map, inobject_properties, int, kInObjectPropertiesOffset',
+    'Map, instance_size, int, kInstanceSizeOffset',
+    'HeapNumber, value, double, kValueOffset',
+    'ConsString, first, String, kFirstOffset',
+    'ConsString, second, String, kSecondOffset',
+    'ExternalString, resource, Object, kResourceOffset',
+    'SeqAsciiString, chars, char, kHeaderSize',
+    'SharedFunctionInfo, code, Code, kCodeOffset',
+    'Code, instruction_start, uintptr_t, kHeaderSize',
+    'Code, instruction_size, int, kInstructionSizeOffset',
+];
+
+#
+# The following is a whitelist of classes we expect to find when scanning the
+# source code. This list is not exhaustive, but it's still useful to identify
+# when this script gets out of sync with the source. See load_objects().
+#
+expected_classes = [
+    'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
+    'JSObject', 'JSRegExp', 'JSValue', 'Map', 'Oddball', 'Script',
+    'SeqAsciiString', 'SharedFunctionInfo'
+];
+
+
+#
+# The following structures store high-level representations of the structures
+# for which we're going to emit descriptive constants.
+#
+types = {};             # set of all type names
+typeclasses = {};       # maps type names to corresponding class names
+klasses = {};           # known classes, including parents
+fields = [];            # field declarations
+
+header = '''
+/*
+ * This file is generated by %s.  Do not edit directly.
+ */
+
+#include "v8.h"
+#include "frames.h"
+#include "frames-inl.h" /* for architecture-specific frame constants */
+
+using namespace v8::internal;
+
+extern "C" {
+
+/* stack frame constants */
+#define FRAME_CONST(value, klass)       \
+    int v8dbg_frametype_##klass = StackFrame::value;
+
+STACK_FRAME_TYPE_LIST(FRAME_CONST)
+
+#undef FRAME_CONST
+
+''' % sys.argv[0];
+
+footer = '''
+}
+'''
+
+#
+# Loads class hierarchy and type information from "objects.h".
+#
+def load_objects():
+        objfilename = sys.argv[2];
+        objfile = open(objfilename, 'r');
+        in_insttype = False;
+
+        typestr = '';
+
+        #
+        # Construct a dictionary for the classes we're sure should be present.
+        #
+        checktypes = {};
+        for klass in expected_classes:
+                checktypes[klass] = True;
+
+        #
+        # Iterate objects.h line-by-line to collect type and class information.
+        # For types, we accumulate a string representing the entire InstanceType
+        # enum definition and parse it later because it's easier to do so
+        # without the embedded newlines.
+        #
+        for line in objfile:
+                if (line.startswith('enum InstanceType {')):
+                        in_insttype = True;
+                        continue;
+
+                if (in_insttype and line.startswith('};')):
+                        in_insttype = False;
+                        continue;
+
+                line = re.sub('//.*', '', line.rstrip().lstrip());
+
+                if (in_insttype):
+                        typestr += line;
+                        continue;
+
+                match = re.match('class (\w[^\s:]*)(: public (\w[^\s{]*))?\s*{',
+                    line);
+
+                if (match):
+                        klass = match.group(1);
+                        pklass = match.group(3);
+                        klasses[klass] = { 'parent': pklass };
+
+        #
+        # Process the instance type declaration.
+        #
+        entries = typestr.split(',');
+        for entry in entries:
+                types[re.sub('\s*=.*', '', entry).lstrip()] = True;
+
+        #
+        # Infer class names for each type based on a systematic transformation.
+        # For example, "JS_FUNCTION_TYPE" becomes "JSFunction".  We find the
+        # class for each type rather than the other way around because there are
+        # fewer cases where one type maps to more than one class than the other
+        # way around.
+        #
+        for type in types:
+                #
+                # Symbols and Strings are implemented using the same classes.
+                #
+                usetype = re.sub('SYMBOL_', 'STRING_', type);
+
+                #
+                # REGEXP behaves like REG_EXP, as in JS_REGEXP_TYPE => JSRegExp.
+                #
+                usetype = re.sub('_REGEXP_', '_REG_EXP_', usetype);
+
+                #
+                # Remove the "_TYPE" suffix and then convert to camel case,
+                # except that a "JS" prefix remains uppercase (as in
+                # "JS_FUNCTION_TYPE" => "JSFunction").
+                #
+                if (not usetype.endswith('_TYPE')):
+                        continue;
+
+                usetype = usetype[0:len(usetype) - len('_TYPE')];
+                parts = usetype.split('_');
+                cctype = '';
+
+                if (parts[0] == 'JS'):
+                        cctype = 'JS';
+                        start = 1;
+                else:
+                        cctype = '';
+                        start = 0;
+
+                for ii in range(start, len(parts)):
+                        part = parts[ii];
+                        cctype += part[0].upper() + part[1:].lower();
+
+                #
+                # Mapping string types is more complicated.  Both types and
+                # class names for Strings specify a representation (e.g., Seq,
+                # Cons, External, or Sliced) and an encoding (TwoByte or Ascii),
+                # In the simplest case, both of these are explicit in both
+                # names, as in:
+                #
+                #       EXTERNAL_ASCII_STRING_TYPE => ExternalAsciiString
+                #
+                # However, either the representation or encoding can be omitted
+                # from the type name, in which case "Seq" and "TwoByte" are
+                # assumed, as in:
+                #
+                #       STRING_TYPE => SeqTwoByteString
+                #
+                # Additionally, sometimes the type name has more information
+                # than the class, as in:
+                #
+                #       CONS_ASCII_STRING_TYPE => ConsString
+                #
+                # To figure this out dynamically, we first check for a
+                # representation and encoding and add them if they're not
+                # present.  If that doesn't yield a valid class name, then we
+                # strip out the representation.
+                #
+                if (cctype.endswith('String')):
+                        if (cctype.find('Cons') == -1 and
+                            cctype.find('External') == -1 and
+                            cctype.find('Sliced') == -1):
+                                if (cctype.find('Ascii') != -1):
+                                        cctype = re.sub('AsciiString$',
+                                            'SeqAsciiString', cctype);
+                                else:
+                                        cctype = re.sub('String$',
+                                            'SeqString', cctype);
+
+                        if (cctype.find('Ascii') == -1):
+                                cctype = re.sub('String$', 'TwoByteString',
+                                    cctype);
+
+                        if (not (cctype in klasses)):
+                                cctype = re.sub('Ascii', '', cctype);
+                                cctype = re.sub('TwoByte', '', cctype);
+
+                #
+                # Despite all that, some types have no corresponding class.
+                #
+                if (cctype in klasses):
+                        typeclasses[type] = cctype;
+                        if (cctype in checktypes):
+                                del checktypes[cctype];
+
+        if (len(checktypes) > 0):
+                for klass in checktypes:
+                        print('error: expected class \"%s\" not found' % klass);
+
+                sys.exit(1);
+
+
+#
+# For a given macro call, pick apart the arguments and return an object
+# describing the corresponding output constant.  See load_fields().
+#
+def parse_field(call):
+        # Replace newlines with spaces.
+        for ii in range(0, len(call)):
+                if (call[ii] == '\n'):
+                        call[ii] == ' ';
+
+        idx = call.find('(');
+        kind = call[0:idx];
+        rest = call[idx + 1: len(call) - 1];
+        args = re.split('\s*,\s*', rest);
+
+        consts = [];
+
+        if (kind == 'ACCESSORS' or kind == 'ACCESSORS_GCSAFE'):
+                klass = args[0];
+                field = args[1];
+                dtype = args[2];
+                offset = args[3];
+
+                return ({
+                    'name': 'class_%s__%s__%s' % (klass, field, dtype),
+                    'value': '%s::%s' % (klass, offset)
+                });
+
+        assert(kind == 'SMI_ACCESSORS');
+        klass = args[0];
+        field = args[1];
+        offset = args[2];
+
+        return ({
+            'name': 'class_%s__%s__%s' % (klass, field, 'SMI'),
+            'value': '%s::%s' % (klass, offset)
+        });
+
+#
+# Load field offset information from objects-inl.h.
+#
+def load_fields():
+        inlfilename = sys.argv[3];
+        inlfile = open(inlfilename, 'r');
+
+        #
+        # Each class's fields and the corresponding offsets are described in the
+        # source by calls to macros like "ACCESSORS" (and friends).  All we do
+        # here is extract these macro invocations, taking into account that they
+        # may span multiple lines and may contain nested parentheses.  We also
+        # call parse_field() to pick apart the invocation.
+        #
+        prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE', 'SMI_ACCESSORS' ];
+        current = '';
+        opens = 0;
+
+        for line in inlfile:
+                if (opens > 0):
+                        # Continuation line
+                        for ii in range(0, len(line)):
+                                if (line[ii] == '('):
+                                        opens += 1;
+                                elif (line[ii] == ')'):
+                                        opens -= 1;
+
+                                if (opens == 0):
+                                        break;
+
+                        current += line[0:ii + 1];
+                        continue;
+
+                for prefix in prefixes:
+                        if (not line.startswith(prefix + '(')):
+                                continue;
+
+                        if (len(current) > 0):
+                                fields.append(parse_field(current));
+                                current = '';
+
+                        for ii in range(len(prefix), len(line)):
+                                if (line[ii] == '('):
+                                        opens += 1;
+                                elif (line[ii] == ')'):
+                                        opens -= 1;
+
+                                if (opens == 0):
+                                        break;
+
+                        current += line[0:ii + 1];
+
+        if (len(current) > 0):
+                fields.append(parse_field(current));
+                current = '';
+
+        for body in extras_accessors:
+                fields.append(parse_field('ACCESSORS(%s)' % body));
+
+#
+# Emit a block of constants.
+#
+def emit_set(out, consts):
+        for ii in range(0, len(consts)):
+                out.write('int v8dbg_%s = %s;\n' %
+                    (consts[ii]['name'], consts[ii]['value']));
+        out.write('\n');
+
+#
+# Emit the whole output file.
+#
+def emit_config():
+        out = file(sys.argv[1], 'w');
+
+        out.write(header);
+
+        out.write('/* miscellaneous constants */\n');
+        emit_set(out, consts_misc);
+
+        out.write('/* class type information */\n');
+        consts = [];
+        keys = typeclasses.keys();
+        keys.sort();
+        for typename in keys:
+                klass = typeclasses[typename];
+                consts.append({
+                    'name': 'type_%s__%s' % (klass, typename),
+                    'value': typename
+                });
+
+        emit_set(out, consts);
+
+        out.write('/* class hierarchy information */\n');
+        consts = [];
+        keys = klasses.keys();
+        keys.sort();
+        for klassname in keys:
+                pklass = klasses[klassname]['parent'];
+                if (pklass == None):
+                        continue;
+
+                consts.append({
+                    'name': 'parent_%s__%s' % (klassname, pklass),
+                    'value': 0
+                });
+
+        emit_set(out, consts);
+
+        out.write('/* field information */\n');
+        emit_set(out, fields);
+
+        out.write(footer);
+
+if (len(sys.argv) < 4):
+        print('usage: %s output.cc objects.h objects-inl.h' % sys.argv[0]);
+        sys.exit(2);
+
+load_objects();
+load_fields();
+emit_config();
diff --git a/tools/grokdump.py b/tools/grokdump.py
index 6bc49c6..9977289 100755
--- a/tools/grokdump.py
+++ b/tools/grokdump.py
@@ -52,6 +52,7 @@
   $ %prog 12345678-1234-1234-1234-123456789abcd-full.dmp
 """
 
+
 DEBUG=False
 
 
@@ -233,6 +234,80 @@
                 MD_CONTEXT_X86_EXTENDED_REGISTERS))
 ])
 
+MD_CONTEXT_AMD64 = 0x00100000
+MD_CONTEXT_AMD64_CONTROL = (MD_CONTEXT_AMD64 | 0x00000001)
+MD_CONTEXT_AMD64_INTEGER = (MD_CONTEXT_AMD64 | 0x00000002)
+MD_CONTEXT_AMD64_SEGMENTS = (MD_CONTEXT_AMD64 | 0x00000004)
+MD_CONTEXT_AMD64_FLOATING_POINT = (MD_CONTEXT_AMD64 | 0x00000008)
+MD_CONTEXT_AMD64_DEBUG_REGISTERS = (MD_CONTEXT_AMD64 | 0x00000010)
+
+MINIDUMP_CONTEXT_AMD64 = Descriptor([
+  ("p1_home", ctypes.c_uint64),
+  ("p2_home", ctypes.c_uint64),
+  ("p3_home", ctypes.c_uint64),
+  ("p4_home", ctypes.c_uint64),
+  ("p5_home", ctypes.c_uint64),
+  ("p6_home", ctypes.c_uint64),
+  ("context_flags", ctypes.c_uint32),
+  ("mx_csr", ctypes.c_uint32),
+  # MD_CONTEXT_AMD64_CONTROL.
+  ("cs", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_CONTROL)),
+  # MD_CONTEXT_AMD64_SEGMENTS
+  ("ds", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
+  ("es", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
+  ("fs", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
+  ("gs", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
+  # MD_CONTEXT_AMD64_CONTROL.
+  ("ss", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_CONTROL)),
+  ("eflags", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_AMD64_CONTROL)),
+  # MD_CONTEXT_AMD64_DEBUG_REGISTERS.
+  ("dr0", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("dr1", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("dr2", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("dr3", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("dr6", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("dr7", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  # MD_CONTEXT_AMD64_INTEGER.
+  ("rax", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("rcx", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("rdx", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("rbx", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  # MD_CONTEXT_AMD64_CONTROL.
+  ("rsp", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_CONTROL)),
+  # MD_CONTEXT_AMD64_INTEGER.
+  ("rbp", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("rsi", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("rdi", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r8", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r9", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r10", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r11", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r12", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r13", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r14", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r15", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  # MD_CONTEXT_AMD64_CONTROL.
+  ("rip", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_CONTROL)),
+  # MD_CONTEXT_AMD64_FLOATING_POINT
+  ("sse_registers", EnableOnFlag(ctypes.c_uint8 * (16 * 26),
+                                 MD_CONTEXT_AMD64_FLOATING_POINT)),
+  ("vector_registers", EnableOnFlag(ctypes.c_uint8 * (16 * 26),
+                                    MD_CONTEXT_AMD64_FLOATING_POINT)),
+  ("vector_control", EnableOnFlag(ctypes.c_uint64,
+                                  MD_CONTEXT_AMD64_FLOATING_POINT)),
+  # MD_CONTEXT_AMD64_DEBUG_REGISTERS.
+  ("debug_control", EnableOnFlag(ctypes.c_uint64,
+                                 MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("last_branch_to_rip", EnableOnFlag(ctypes.c_uint64,
+                                      MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("last_branch_from_rip", EnableOnFlag(ctypes.c_uint64,
+                                        MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("last_exception_to_rip", EnableOnFlag(ctypes.c_uint64,
+                                         MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("last_exception_from_rip", EnableOnFlag(ctypes.c_uint64,
+                                           MD_CONTEXT_AMD64_DEBUG_REGISTERS))
+])
+
 MINIDUMP_MEMORY_DESCRIPTOR = Descriptor([
   ("start", ctypes.c_uint64),
   ("memory", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
@@ -269,6 +344,12 @@
   ("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count)
 ])
 
+MINIDUMP_RAW_SYSTEM_INFO = Descriptor([
+  ("processor_architecture", ctypes.c_uint16)
+])
+
+MD_CPU_ARCHITECTURE_X86 = 0
+MD_CPU_ARCHITECTURE_AMD64 = 9
 
 class MinidumpReader(object):
   """Minidump (.dmp) reader."""
@@ -288,20 +369,34 @@
     for _ in xrange(self.header.stream_count):
       directories.append(MINIDUMP_DIRECTORY.Read(self.minidump, offset))
       offset += MINIDUMP_DIRECTORY.size
+    self.arch = None
     self.exception = None
     self.exception_context = None
     self.memory_list = None
     self.memory_list64 = None
     self.thread_map = {}
+
+    # Find MDRawSystemInfo stream and determine arch.
+    for d in directories:
+      if d.stream_type == MD_SYSTEM_INFO_STREAM:
+        system_info = MINIDUMP_RAW_SYSTEM_INFO.Read(
+            self.minidump, d.location.rva)
+        self.arch = system_info.processor_architecture
+        assert self.arch in [MD_CPU_ARCHITECTURE_AMD64, MD_CPU_ARCHITECTURE_X86]
+    assert not self.arch is None
+
     for d in directories:
       DebugPrint(d)
-      # TODO(vitalyr): extract system info including CPU features.
       if d.stream_type == MD_EXCEPTION_STREAM:
         self.exception = MINIDUMP_EXCEPTION_STREAM.Read(
           self.minidump, d.location.rva)
         DebugPrint(self.exception)
-        self.exception_context = MINIDUMP_CONTEXT_X86.Read(
-          self.minidump, self.exception.thread_context.rva)
+        if self.arch == MD_CPU_ARCHITECTURE_X86:
+          self.exception_context = MINIDUMP_CONTEXT_X86.Read(
+              self.minidump, self.exception.thread_context.rva)
+        elif self.arch == MD_CPU_ARCHITECTURE_AMD64:
+          self.exception_context = MINIDUMP_CONTEXT_AMD64.Read(
+              self.minidump, self.exception.thread_context.rva)
         DebugPrint(self.exception_context)
       elif d.stream_type == MD_THREAD_LIST_STREAM:
         thread_list = MINIDUMP_THREAD_LIST.Read(self.minidump, d.location.rva)
@@ -335,6 +430,16 @@
     location = self.FindLocation(address)
     return ctypes.c_uint32.from_buffer(self.minidump, location).value
 
+  def ReadU64(self, address):
+    location = self.FindLocation(address)
+    return ctypes.c_uint64.from_buffer(self.minidump, location).value
+
+  def ReadUIntPtr(self, address):
+    if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return self.ReadU64(address)
+    elif self.arch == MD_CPU_ARCHITECTURE_X86:
+      return self.ReadU32(address)
+
   def ReadBytes(self, address, size):
     location = self.FindLocation(address)
     return self.minidump[location:location + size]
@@ -355,10 +460,15 @@
   def GetDisasmLines(self, address, size):
     location = self.FindLocation(address)
     if location is None: return []
+    arch = None
+    if self.arch == MD_CPU_ARCHITECTURE_X86:
+      arch = "ia32"
+    elif self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      arch = "x64"
     return disasm.GetDisasmLines(self.minidump_name,
                                  location,
                                  size,
-                                 "ia32",
+                                 arch,
                                  False)
 
 
@@ -366,13 +476,40 @@
     self.minidump.close()
     self.minidump_file.close()
 
+  def ExceptionIP(self):
+    if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return self.exception_context.rip
+    elif self.arch == MD_CPU_ARCHITECTURE_X86:
+      return self.exception_context.eip
+
+  def ExceptionSP(self):
+    if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return self.exception_context.rsp
+    elif self.arch == MD_CPU_ARCHITECTURE_X86:
+      return self.exception_context.esp
+
+  def FormatIntPtr(self, value):
+    if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return "%016x" % value
+    elif self.arch == MD_CPU_ARCHITECTURE_X86:
+      return "%08x" % value
+
+  def PointerSize(self):
+    if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return 8
+    elif self.arch == MD_CPU_ARCHITECTURE_X86:
+      return 4
+
+  def Register(self, name):
+    return self.exception_context.__getattribute__(name)
+
 
 # List of V8 instance types. Obtained by adding the code below to any .cc file.
 #
-# #define DUMP_TYPE(T) printf("%d: \"%s\",\n", T, #T);
+# #define DUMP_TYPE(T) printf("  %d: \"%s\",\n", T, #T);
 # struct P {
 #   P() {
-#     printf("{\n");
+#     printf("INSTANCE_TYPES = {\n");
 #     INSTANCE_TYPE_LIST(DUMP_TYPE)
 #     printf("}\n");
 #   }
@@ -386,13 +523,20 @@
   66: "EXTERNAL_SYMBOL_TYPE",
   74: "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE",
   70: "EXTERNAL_ASCII_SYMBOL_TYPE",
+  82: "SHORT_EXTERNAL_SYMBOL_TYPE",
+  90: "SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE",
+  86: "SHORT_EXTERNAL_ASCII_SYMBOL_TYPE",
   0: "STRING_TYPE",
   4: "ASCII_STRING_TYPE",
   1: "CONS_STRING_TYPE",
   5: "CONS_ASCII_STRING_TYPE",
+  3: "SLICED_STRING_TYPE",
   2: "EXTERNAL_STRING_TYPE",
   10: "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE",
   6: "EXTERNAL_ASCII_STRING_TYPE",
+  18: "SHORT_EXTERNAL_STRING_TYPE",
+  26: "SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE",
+  22: "SHORT_EXTERNAL_ASCII_STRING_TYPE",
   6: "PRIVATE_EXTERNAL_ASCII_STRING_TYPE",
   128: "MAP_TYPE",
   129: "CODE_TYPE",
@@ -401,43 +545,46 @@
   132: "HEAP_NUMBER_TYPE",
   133: "FOREIGN_TYPE",
   134: "BYTE_ARRAY_TYPE",
-  135: "EXTERNAL_BYTE_ARRAY_TYPE",
-  136: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
-  137: "EXTERNAL_SHORT_ARRAY_TYPE",
-  138: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
-  139: "EXTERNAL_INT_ARRAY_TYPE",
-  140: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
-  141: "EXTERNAL_FLOAT_ARRAY_TYPE",
-  143: "EXTERNAL_PIXEL_ARRAY_TYPE",
-  145: "FILLER_TYPE",
-  146: "ACCESSOR_INFO_TYPE",
-  147: "ACCESS_CHECK_INFO_TYPE",
-  148: "INTERCEPTOR_INFO_TYPE",
-  149: "CALL_HANDLER_INFO_TYPE",
-  150: "FUNCTION_TEMPLATE_INFO_TYPE",
-  151: "OBJECT_TEMPLATE_INFO_TYPE",
-  152: "SIGNATURE_INFO_TYPE",
-  153: "TYPE_SWITCH_INFO_TYPE",
-  154: "SCRIPT_TYPE",
-  155: "CODE_CACHE_TYPE",
-  156: "POLYMORPHIC_CODE_CACHE_TYPE",
-  159: "FIXED_ARRAY_TYPE",
-  160: "SHARED_FUNCTION_INFO_TYPE",
-  161: "JS_MESSAGE_OBJECT_TYPE",
-  162: "JS_VALUE_TYPE",
-  163: "JS_OBJECT_TYPE",
-  164: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
-  165: "JS_GLOBAL_OBJECT_TYPE",
-  166: "JS_BUILTINS_OBJECT_TYPE",
-  167: "JS_GLOBAL_PROXY_TYPE",
-  168: "JS_ARRAY_TYPE",
-  169: "JS_PROXY_TYPE",
-  170: "JS_WEAK_MAP_TYPE",
-  171: "JS_REGEXP_TYPE",
-  172: "JS_FUNCTION_TYPE",
-  173: "JS_FUNCTION_PROXY_TYPE",
-  157: "DEBUG_INFO_TYPE",
-  158: "BREAK_POINT_INFO_TYPE",
+  135: "FREE_SPACE_TYPE",
+  136: "EXTERNAL_BYTE_ARRAY_TYPE",
+  137: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
+  138: "EXTERNAL_SHORT_ARRAY_TYPE",
+  139: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
+  140: "EXTERNAL_INT_ARRAY_TYPE",
+  141: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
+  142: "EXTERNAL_FLOAT_ARRAY_TYPE",
+  144: "EXTERNAL_PIXEL_ARRAY_TYPE",
+  146: "FILLER_TYPE",
+  147: "ACCESSOR_INFO_TYPE",
+  148: "ACCESSOR_PAIR_TYPE",
+  149: "ACCESS_CHECK_INFO_TYPE",
+  150: "INTERCEPTOR_INFO_TYPE",
+  151: "CALL_HANDLER_INFO_TYPE",
+  152: "FUNCTION_TEMPLATE_INFO_TYPE",
+  153: "OBJECT_TEMPLATE_INFO_TYPE",
+  154: "SIGNATURE_INFO_TYPE",
+  155: "TYPE_SWITCH_INFO_TYPE",
+  156: "SCRIPT_TYPE",
+  157: "CODE_CACHE_TYPE",
+  158: "POLYMORPHIC_CODE_CACHE_TYPE",
+  161: "FIXED_ARRAY_TYPE",
+  145: "FIXED_DOUBLE_ARRAY_TYPE",
+  162: "SHARED_FUNCTION_INFO_TYPE",
+  163: "JS_MESSAGE_OBJECT_TYPE",
+  166: "JS_VALUE_TYPE",
+  167: "JS_OBJECT_TYPE",
+  168: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+  169: "JS_GLOBAL_OBJECT_TYPE",
+  170: "JS_BUILTINS_OBJECT_TYPE",
+  171: "JS_GLOBAL_PROXY_TYPE",
+  172: "JS_ARRAY_TYPE",
+  165: "JS_PROXY_TYPE",
+  175: "JS_WEAK_MAP_TYPE",
+  176: "JS_REGEXP_TYPE",
+  177: "JS_FUNCTION_TYPE",
+  164: "JS_FUNCTION_PROXY_TYPE",
+  159: "DEBUG_INFO_TYPE",
+  160: "BREAK_POINT_INFO_TYPE",
 }
 
 
@@ -501,34 +648,36 @@
     p.Print(str(self))
 
   def __str__(self):
-    return "HeapObject(%08x, %s)" % (self.address,
-                                     INSTANCE_TYPES[self.map.instance_type])
+    return "HeapObject(%s, %s)" % (self.heap.reader.FormatIntPtr(self.address),
+                                   INSTANCE_TYPES[self.map.instance_type])
 
   def ObjectField(self, offset):
-    field_value = self.heap.reader.ReadU32(self.address + offset)
+    field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
     return self.heap.FindObjectOrSmi(field_value)
 
   def SmiField(self, offset):
-    field_value = self.heap.reader.ReadU32(self.address + offset)
+    field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
     assert (field_value & 1) == 0
     return field_value / 2
 
 
 class Map(HeapObject):
-  INSTANCE_TYPE_OFFSET = 8
+  def InstanceTypeOffset(self):
+    return self.heap.PointerSize() + self.heap.IntSize()
 
   def __init__(self, heap, map, address):
     HeapObject.__init__(self, heap, map, address)
     self.instance_type = \
-        heap.reader.ReadU8(self.address + Map.INSTANCE_TYPE_OFFSET)
+        heap.reader.ReadU8(self.address + self.InstanceTypeOffset())
 
 
 class String(HeapObject):
-  LENGTH_OFFSET = 4
+  def LengthOffset(self):
+    return self.heap.PointerSize()
 
   def __init__(self, heap, map, address):
     HeapObject.__init__(self, heap, map, address)
-    self.length = self.SmiField(String.LENGTH_OFFSET)
+    self.length = self.SmiField(self.LengthOffset())
 
   def GetChars(self):
     return "?string?"
@@ -541,11 +690,12 @@
 
 
 class SeqString(String):
-  CHARS_OFFSET = 12
+  def CharsOffset(self):
+    return self.heap.PointerSize() * 3
 
   def __init__(self, heap, map, address):
     String.__init__(self, heap, map, address)
-    self.chars = heap.reader.ReadBytes(self.address + SeqString.CHARS_OFFSET,
+    self.chars = heap.reader.ReadBytes(self.address + self.CharsOffset(),
                                        self.length)
 
   def GetChars(self):
@@ -553,6 +703,7 @@
 
 
 class ExternalString(String):
+  # TODO(vegorov) fix ExternalString for X64 architecture
   RESOURCE_OFFSET = 12
 
   WEBKIT_RESOUCE_STRING_IMPL_OFFSET = 4
@@ -582,24 +733,28 @@
 
 
 class ConsString(String):
-  LEFT_OFFSET = 12
-  RIGHT_OFFSET = 16
+  def LeftOffset(self):
+    return self.heap.PointerSize() * 3
+
+  def RightOffset(self):
+    return self.heap.PointerSize() * 4
 
   def __init__(self, heap, map, address):
     String.__init__(self, heap, map, address)
-    self.left = self.ObjectField(ConsString.LEFT_OFFSET)
-    self.right = self.ObjectField(ConsString.RIGHT_OFFSET)
+    self.left = self.ObjectField(self.LeftOffset())
+    self.right = self.ObjectField(self.RightOffset())
 
   def GetChars(self):
     return self.left.GetChars() + self.right.GetChars()
 
 
 class Oddball(HeapObject):
-  TO_STRING_OFFSET = 4
+  def ToStringOffset(self):
+    return self.heap.PointerSize()
 
   def __init__(self, heap, map, address):
     HeapObject.__init__(self, heap, map, address)
-    self.to_string = self.ObjectField(Oddball.TO_STRING_OFFSET)
+    self.to_string = self.ObjectField(self.ToStringOffset())
 
   def Print(self, p):
     p.Print(str(self))
@@ -609,19 +764,23 @@
 
 
 class FixedArray(HeapObject):
-  LENGTH_OFFSET = 4
-  ELEMENTS_OFFSET = 8
+  def LengthOffset(self):
+    return self.heap.PointerSize()
+
+  def ElementsOffset(self):
+    return self.heap.PointerSize() * 2
 
   def __init__(self, heap, map, address):
     HeapObject.__init__(self, heap, map, address)
-    self.length = self.SmiField(FixedArray.LENGTH_OFFSET)
+    self.length = self.SmiField(self.LengthOffset())
 
   def Print(self, p):
-    p.Print("FixedArray(%08x) {" % self.address)
+    p.Print("FixedArray(%s) {" % self.heap.reader.FormatIntPtr(self.address))
     p.Indent()
     p.Print("length: %d" % self.length)
+    base_offset = self.ElementsOffset()
     for i in xrange(self.length):
-      offset = FixedArray.ELEMENTS_OFFSET + 4 * i
+      offset = base_offset + 4 * i
       p.Print("[%08d] = %s" % (i, self.ObjectField(offset)))
     p.Dedent()
     p.Print("}")
@@ -631,19 +790,22 @@
 
 
 class JSFunction(HeapObject):
-  CODE_ENTRY_OFFSET = 12
-  SHARED_OFFSET = 20
+  def CodeEntryOffset(self):
+    return 3 * self.heap.PointerSize()
+
+  def SharedOffset(self):
+    return 5 * self.heap.PointerSize()
 
   def __init__(self, heap, map, address):
     HeapObject.__init__(self, heap, map, address)
     code_entry = \
-        heap.reader.ReadU32(self.address + JSFunction.CODE_ENTRY_OFFSET)
-    self.code = heap.FindObject(code_entry - Code.ENTRY_OFFSET + 1)
-    self.shared = self.ObjectField(JSFunction.SHARED_OFFSET)
+        heap.reader.ReadU32(self.address + self.CodeEntryOffset())
+    self.code = heap.FindObject(code_entry - Code.HeaderSize(heap) + 1)
+    self.shared = self.ObjectField(self.SharedOffset())
 
   def Print(self, p):
     source = "\n".join("  %s" % line for line in self._GetSource().split("\n"))
-    p.Print("JSFunction(%08x) {" % self.address)
+    p.Print("JSFunction(%s) {" % self.heap.reader.FormatIntPtr(self.address))
     p.Indent()
     p.Print("inferred name: %s" % self.shared.inferred_name)
     if self.shared.script.Is(Script) and self.shared.script.name.Is(String):
@@ -662,7 +824,8 @@
     inferred_name = ""
     if self.shared.Is(SharedFunctionInfo):
       inferred_name = self.shared.inferred_name
-    return "JSFunction(%08x, %s)" % (self.address, inferred_name)
+    return "JSFunction(%s, %s)" % \
+          (self.heap.reader.FormatIntPtr(self.address), inferred_name)
 
   def _GetSource(self):
     source = "?source?"
@@ -675,47 +838,75 @@
 
 
 class SharedFunctionInfo(HeapObject):
-  CODE_OFFSET = 2 * 4
-  SCRIPT_OFFSET = 7 * 4
-  INFERRED_NAME_OFFSET = 9 * 4
-  START_POSITION_AND_TYPE_OFFSET = 17 * 4
-  END_POSITION_OFFSET = 18 * 4
+  def CodeOffset(self):
+    return 2 * self.heap.PointerSize()
+
+  def ScriptOffset(self):
+    return 7 * self.heap.PointerSize()
+
+  def InferredNameOffset(self):
+    return 9 * self.heap.PointerSize()
+
+  def EndPositionOffset(self):
+    return 12 * self.heap.PointerSize() + 4 * self.heap.IntSize()
+
+  def StartPositionAndTypeOffset(self):
+    return 12 * self.heap.PointerSize() + 5 * self.heap.IntSize()
 
   def __init__(self, heap, map, address):
     HeapObject.__init__(self, heap, map, address)
-    self.code = self.ObjectField(SharedFunctionInfo.CODE_OFFSET)
-    self.script = self.ObjectField(SharedFunctionInfo.SCRIPT_OFFSET)
-    self.inferred_name = \
-        self.ObjectField(SharedFunctionInfo.INFERRED_NAME_OFFSET)
-    start_position_and_type = \
-        self.SmiField(SharedFunctionInfo.START_POSITION_AND_TYPE_OFFSET)
-    self.start_position = start_position_and_type >> 2
-    self.end_position = self.SmiField(SharedFunctionInfo.END_POSITION_OFFSET)
+    self.code = self.ObjectField(self.CodeOffset())
+    self.script = self.ObjectField(self.ScriptOffset())
+    self.inferred_name = self.ObjectField(self.InferredNameOffset())
+    if heap.PointerSize() == 8:
+      start_position_and_type = \
+          heap.reader.ReadU32(self.StartPositionAndTypeOffset())
+      self.start_position = start_position_and_type >> 2
+      pseudo_smi_end_position = \
+          heap.reader.ReadU32(self.EndPositionOffset())
+      self.end_position = pseudo_smi_end_position >> 2
+    else:
+      start_position_and_type = \
+          self.SmiField(self.StartPositionAndTypeOffset())
+      self.start_position = start_position_and_type >> 2
+      self.end_position = \
+          self.SmiField(self.EndPositionOffset())
 
 
 class Script(HeapObject):
-  SOURCE_OFFSET = 4
-  NAME_OFFSET = 8
+  def SourceOffset(self):
+    return self.heap.PointerSize()
+
+  def NameOffset(self):
+    return self.SourceOffset() + self.heap.PointerSize()
 
   def __init__(self, heap, map, address):
     HeapObject.__init__(self, heap, map, address)
-    self.source = self.ObjectField(Script.SOURCE_OFFSET)
-    self.name = self.ObjectField(Script.NAME_OFFSET)
+    self.source = self.ObjectField(self.SourceOffset())
+    self.name = self.ObjectField(self.NameOffset())
 
 
 class Code(HeapObject):
-  INSTRUCTION_SIZE_OFFSET = 4
-  ENTRY_OFFSET = 32
+  CODE_ALIGNMENT_MASK = (1 << 5) - 1
+
+  def InstructionSizeOffset(self):
+    return self.heap.PointerSize()
+
+  @staticmethod
+  def HeaderSize(heap):
+    return (heap.PointerSize() + heap.IntSize() + \
+        4 * heap.PointerSize() + 3 * heap.IntSize() + \
+        Code.CODE_ALIGNMENT_MASK) & ~Code.CODE_ALIGNMENT_MASK
 
   def __init__(self, heap, map, address):
     HeapObject.__init__(self, heap, map, address)
-    self.entry = self.address + Code.ENTRY_OFFSET
+    self.entry = self.address + Code.HeaderSize(heap)
     self.instruction_size = \
-        heap.reader.ReadU32(self.address + Code.INSTRUCTION_SIZE_OFFSET)
+        heap.reader.ReadU32(self.address + self.InstructionSizeOffset())
 
   def Print(self, p):
     lines = self.heap.reader.GetDisasmLines(self.entry, self.instruction_size)
-    p.Print("Code(%08x) {" % self.address)
+    p.Print("Code(%s) {" % self.heap.reader.FormatIntPtr(self.address))
     p.Indent()
     p.Print("instruction_size: %d" % self.instruction_size)
     p.PrintLines(self._FormatLine(line) for line in lines)
@@ -735,6 +926,9 @@
     "EXTERNAL_SYMBOL_TYPE": ExternalString,
     "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE": ExternalString,
     "EXTERNAL_ASCII_SYMBOL_TYPE": ExternalString,
+    "SHORT_EXTERNAL_SYMBOL_TYPE": ExternalString,
+    "SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE": ExternalString,
+    "SHORT_EXTERNAL_ASCII_SYMBOL_TYPE": ExternalString,
     "STRING_TYPE": SeqString,
     "ASCII_STRING_TYPE": SeqString,
     "CONS_STRING_TYPE": ConsString,
@@ -764,10 +958,10 @@
   def FindObject(self, tagged_address):
     if tagged_address in self.objects:
       return self.objects[tagged_address]
-    if (tagged_address & 1) != 1: return None
+    if (tagged_address & self.ObjectAlignmentMask()) != 1: return None
     address = tagged_address - 1
     if not self.reader.IsValidAddress(address): return None
-    map_tagged_address = self.reader.ReadU32(address)
+    map_tagged_address = self.reader.ReadUIntPtr(address)
     if tagged_address == map_tagged_address:
       # Meta map?
       meta_map = Map(self, None, address)
@@ -776,7 +970,7 @@
       meta_map.map = meta_map
       object = meta_map
     else:
-      map = self.FindObject(map_tagged_address)
+      map = self.FindMap(map_tagged_address)
       if map is None: return None
       instance_type_name = INSTANCE_TYPES.get(map.instance_type)
       if instance_type_name is None: return None
@@ -785,9 +979,37 @@
     self.objects[tagged_address] = object
     return object
 
+  def FindMap(self, tagged_address):
+    if (tagged_address & self.MapAlignmentMask()) != 1: return None
+    address = tagged_address - 1
+    if not self.reader.IsValidAddress(address): return None
+    object = Map(self, None, address)
+    return object
+
+  def IntSize(self):
+    return 4
+
+  def PointerSize(self):
+    return self.reader.PointerSize()
+
+  def ObjectAlignmentMask(self):
+    return self.PointerSize() - 1
+
+  def MapAlignmentMask(self):
+    if self.reader.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return (1 << 4) - 1
+    elif self.reader.arch == MD_CPU_ARCHITECTURE_X86:
+      return (1 << 5) - 1
+
 
 EIP_PROXIMITY = 64
 
+CONTEXT_FOR_ARCH = {
+    MD_CPU_ARCHITECTURE_AMD64:
+      ['rax', 'rbx', 'rcx', 'rdx', 'rdi', 'rsi', 'rbp', 'rsp', 'rip'],
+    MD_CPU_ARCHITECTURE_X86:
+      ['eax', 'ebx', 'ecx', 'edx', 'edi', 'esi', 'ebp', 'esp', 'eip']
+}
 
 def AnalyzeMinidump(options, minidump_name):
   reader = MinidumpReader(options, minidump_name)
@@ -800,40 +1022,35 @@
   print "  thread id: %d" % exception_thread.id
   print "  code: %08X" % reader.exception.exception.code
   print "  context:"
-  print "    eax: %08x" % reader.exception_context.eax
-  print "    ebx: %08x" % reader.exception_context.ebx
-  print "    ecx: %08x" % reader.exception_context.ecx
-  print "    edx: %08x" % reader.exception_context.edx
-  print "    edi: %08x" % reader.exception_context.edi
-  print "    esi: %08x" % reader.exception_context.esi
-  print "    ebp: %08x" % reader.exception_context.ebp
-  print "    esp: %08x" % reader.exception_context.esp
-  print "    eip: %08x" % reader.exception_context.eip
+  for r in CONTEXT_FOR_ARCH[reader.arch]:
+    print "    %s: %s" % (r, reader.FormatIntPtr(reader.Register(r)))
   # TODO(vitalyr): decode eflags.
   print "    eflags: %s" % bin(reader.exception_context.eflags)[2:]
   print
 
+  stack_top = reader.ExceptionSP()
   stack_bottom = exception_thread.stack.start + \
       exception_thread.stack.memory.data_size
-  stack_map = {reader.exception_context.eip: -1}
-  for slot in xrange(reader.exception_context.esp, stack_bottom, 4):
-    maybe_address = reader.ReadU32(slot)
+  stack_map = {reader.ExceptionIP(): -1}
+  for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
+    maybe_address = reader.ReadUIntPtr(slot)
     if not maybe_address in stack_map:
       stack_map[maybe_address] = slot
   heap = V8Heap(reader, stack_map)
 
   print "Disassembly around exception.eip:"
-  start = reader.exception_context.eip - EIP_PROXIMITY
+  start = reader.ExceptionIP() - EIP_PROXIMITY
   lines = reader.GetDisasmLines(start, 2 * EIP_PROXIMITY)
   for line in lines:
     print FormatDisasmLine(start, heap, line)
   print
 
   print "Annotated stack (from exception.esp to bottom):"
-  for slot in xrange(reader.exception_context.esp, stack_bottom, 4):
-    maybe_address = reader.ReadU32(slot)
+  for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
+    maybe_address = reader.ReadUIntPtr(slot)
     heap_object = heap.FindObject(maybe_address)
-    print "%08x: %08x" % (slot, maybe_address)
+    print "%s: %s" % (reader.FormatIntPtr(slot),
+                      reader.FormatIntPtr(maybe_address))
     if heap_object:
       heap_object.Print(Printer())
       print
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 5014417..538b7ef 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -32,6 +32,7 @@
       'targets': [
         {
           'target_name': 'v8',
+          'dependencies_traverse': 1,
           'conditions': [
             ['want_separate_host_toolset==1', {
               'toolsets': ['host', 'target'],
@@ -39,10 +40,16 @@
               'toolsets': ['target'],
             }],
             ['v8_use_snapshot=="true"', {
-              'dependencies': ['v8_snapshot'],
+              # The dependency on v8_base should come from a transitive
+              # dependency however the Android toolchain requires libv8_base.a
+              # to appear before libv8_snapshot.a so it's listed explicitly.
+              'dependencies': ['v8_base', 'v8_snapshot'],
             },
             {
-              'dependencies': ['v8_nosnapshot'],
+              # The dependency on v8_base should come from a transitive
+              # dependency however the Android toolchain requires libv8_base.a
+              # to appear before libv8_snapshot.a so it's listed explicitly.
+              'dependencies': ['v8_base', 'v8_nosnapshot'],
             }],
             ['component=="shared_library"', {
               'type': '<(component)',
@@ -72,11 +79,7 @@
                   },
                 }],
                 ['soname_version!=""', {
-                  # Ideally, we'd like to specify the full filename for the
-                  # library and set it to "libv8.so.<(soname_version)",
-                  # but currently the best we can do is use 'product_name' and
-                  # get "libv8-<(soname_version).so".
-                  'product_name': 'v8-<(soname_version)',
+                  'product_extension': 'so.<(soname_version)',
                 }],
               ],
             },
@@ -225,6 +228,9 @@
         {
           'target_name': 'v8_base',
           'type': '<(library)',
+          'variables': {
+            'optimize': 'max',
+          },
           'include_dirs+': [
             '../../src',
           ],
@@ -240,8 +246,8 @@
             '../../src/assembler.cc',
             '../../src/assembler.h',
             '../../src/ast.cc',
-            '../../src/ast-inl.h',
             '../../src/ast.h',
+            '../../src/atomicops.h',
             '../../src/atomicops_internals_x86_gcc.cc',
             '../../src/bignum.cc',
             '../../src/bignum.h',
@@ -283,6 +289,8 @@
             '../../src/cpu-profiler.h',
             '../../src/data-flow.cc',
             '../../src/data-flow.h',
+            '../../src/date.cc',
+            '../../src/date.h',
             '../../src/dateparser.cc',
             '../../src/dateparser.h',
             '../../src/dateparser-inl.h',
@@ -326,7 +334,6 @@
             '../../src/handles-inl.h',
             '../../src/handles.cc',
             '../../src/handles.h',
-            '../../src/hashmap.cc',
             '../../src/hashmap.h',
             '../../src/heap-inl.h',
             '../../src/heap.cc',
@@ -340,8 +347,12 @@
             '../../src/ic-inl.h',
             '../../src/ic.cc',
             '../../src/ic.h',
+            '../../src/incremental-marking.cc',
+            '../../src/incremental-marking.h',
             '../../src/inspector.cc',
             '../../src/inspector.h',
+            '../../src/interface.cc',
+            '../../src/interface.h',
             '../../src/interpreter-irregexp.cc',
             '../../src/interpreter-irregexp.h',
             '../../src/json-parser.h',
@@ -349,6 +360,7 @@
             '../../src/jsregexp.h',
             '../../src/isolate.cc',
             '../../src/isolate.h',
+            '../../src/lazy-instance.h'
             '../../src/list-inl.h',
             '../../src/list.h',
             '../../src/lithium.cc',
@@ -379,8 +391,11 @@
             '../../src/objects-visiting.h',
             '../../src/objects.cc',
             '../../src/objects.h',
+            '../../src/once.cc',
+            '../../src/once.h',
             '../../src/parser.cc',
             '../../src/parser.h',
+            '../../src/platform-posix.h',
             '../../src/platform-tls-mac.h',
             '../../src/platform-tls-win32.h',
             '../../src/platform-tls.h',
@@ -394,6 +409,7 @@
             '../../src/prettyprinter.h',
             '../../src/property.cc',
             '../../src/property.h',
+            '../../src/property-details.h',
             '../../src/profile-generator-inl.h',
             '../../src/profile-generator.cc',
             '../../src/profile-generator.h',
@@ -431,6 +447,9 @@
             '../../src/spaces-inl.h',
             '../../src/spaces.cc',
             '../../src/spaces.h',
+            '../../src/store-buffer-inl.h',
+            '../../src/store-buffer.cc',
+            '../../src/store-buffer.h',
             '../../src/string-search.cc',
             '../../src/string-search.h',
             '../../src/string-stream.cc',
@@ -549,6 +568,40 @@
                 '../../src/ia32/stub-cache-ia32.cc',
               ],
             }],
+            ['v8_target_arch=="mips"', {
+              'sources': [
+                '../../src/mips/assembler-mips.cc',
+                '../../src/mips/assembler-mips.h',
+                '../../src/mips/assembler-mips-inl.h',
+                '../../src/mips/builtins-mips.cc',
+                '../../src/mips/codegen-mips.cc',
+                '../../src/mips/codegen-mips.h',
+                '../../src/mips/code-stubs-mips.cc',
+                '../../src/mips/code-stubs-mips.h',
+                '../../src/mips/constants-mips.cc',
+                '../../src/mips/constants-mips.h',
+                '../../src/mips/cpu-mips.cc',
+                '../../src/mips/debug-mips.cc',
+                '../../src/mips/deoptimizer-mips.cc',
+                '../../src/mips/disasm-mips.cc',
+                '../../src/mips/frames-mips.cc',
+                '../../src/mips/frames-mips.h',
+                '../../src/mips/full-codegen-mips.cc',
+                '../../src/mips/ic-mips.cc',
+                '../../src/mips/lithium-codegen-mips.cc',
+                '../../src/mips/lithium-codegen-mips.h',
+                '../../src/mips/lithium-gap-resolver-mips.cc',
+                '../../src/mips/lithium-gap-resolver-mips.h',
+                '../../src/mips/lithium-mips.cc',
+                '../../src/mips/lithium-mips.h',
+                '../../src/mips/macro-assembler-mips.cc',
+                '../../src/mips/macro-assembler-mips.h',
+                '../../src/mips/regexp-macro-assembler-mips.cc',
+                '../../src/mips/regexp-macro-assembler-mips.h',
+                '../../src/mips/simulator-mips.cc',
+                '../../src/mips/stub-cache-mips.cc',
+              ],
+            }],
             ['v8_target_arch=="x64" or v8_target_arch=="mac" or OS=="mac"', {
               'sources': [
                 '../../src/x64/assembler-x64-inl.h',
@@ -586,7 +639,8 @@
                     ['v8_compress_startup_data=="bz2"', {
                       'libraries': [
                         '-lbz2',
-                    ]}],
+                      ]
+                    }],
                   ],
                 },
                 'sources': [
@@ -596,26 +650,30 @@
               }
             ],
             ['OS=="android"', {
+                'defines': [
+                  'CAN_USE_VFP_INSTRUCTIONS',
+                ],
                 'sources': [
                   '../../src/platform-posix.cc',
                 ],
                 'conditions': [
-                  ['host_os=="mac" and _toolset!="target"', {
-                    'sources': [
-                      '../../src/platform-macos.cc'
-                    ]
+                  ['host_os=="mac"', {
+                    'target_conditions': [
+                      ['_toolset=="host"', {
+                        'sources': [
+                          '../../src/platform-macos.cc'
+                        ]
+                      }, {
+                        'sources': [
+                          '../../src/platform-linux.cc'
+                        ]
+                      }],
+                    ],
                   }, {
                     'sources': [
                       '../../src/platform-linux.cc'
                     ]
                   }],
-                  ['_toolset=="target"', {
-                    'link_settings': {
-                      'libraries': [
-                        '-llog',
-                       ],
-                     }
-                  }],
                 ],
               },
             ],
@@ -641,6 +699,28 @@
                 ],
               }
             ],
+            ['OS=="netbsd"', {
+                'link_settings': {
+                  'libraries': [
+                    '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
+                ]},
+                'sources': [
+                  '../../src/platform-openbsd.cc',
+                  '../../src/platform-posix.cc'
+                ],
+              }
+            ],
+            ['OS=="solaris"', {
+                'link_settings': {
+                  'libraries': [
+                    '-lsocket -lnsl',
+                ]},
+                'sources': [
+                  '../../src/platform-solaris.cc',
+                  '../../src/platform-posix.cc',
+                ],
+              }
+            ],
             ['OS=="mac"', {
               'sources': [
                 '../../src/platform-macos.cc',
@@ -664,6 +744,11 @@
                 'V8_SHARED',
               ],
             }],
+            ['v8_postmortem_support=="true"', {
+              'sources': [
+                '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
+              ]
+            }],
           ],
         },
         {
@@ -697,7 +782,7 @@
             'experimental_library_files': [
               '../../src/macros.py',
               '../../src/proxy.js',
-              '../../src/weakmap.js',
+              '../../src/collection.js',
             ],
           },
           'actions': [
@@ -740,9 +825,38 @@
           ],
         },
         {
+          'target_name': 'postmortem-metadata',
+          'type': 'none',
+          'variables': {
+            'heapobject_files': [
+                '../../src/objects.h',
+                '../../src/objects-inl.h',
+            ],
+          },
+          'actions': [
+              {
+                'action_name': 'gen-postmortem-metadata',
+                'inputs': [
+                  '../../tools/gen-postmortem-metadata.py',
+                  '<@(heapobject_files)',
+                ],
+                'outputs': [
+                  '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
+                ],
+                'action': [
+                  'python',
+                  '../../tools/gen-postmortem-metadata.py',
+                  '<@(_outputs)',
+                  '<@(heapobject_files)'
+                ]
+              }
+           ]
+        },
+        {
           'target_name': 'mksnapshot',
           'type': 'executable',
           'dependencies': [
+            'v8_base',
             'v8_nosnapshot',
           ],
           'include_dirs+': [
@@ -760,8 +874,8 @@
             ['v8_compress_startup_data=="bz2"', {
               'libraries': [
                 '-lbz2',
-              ]}
-            ],
+              ]
+            }],
           ],
         },
         {
@@ -786,7 +900,8 @@
             ['v8_compress_startup_data=="bz2"', {
               'libraries': [
                 '-lbz2',
-              ]}],
+              ]
+            }],
           ],
         },
         {
@@ -800,6 +915,8 @@
             '../../include/v8stdint.h',
             '../../src/allocation.cc',
             '../../src/allocation.h',
+            '../../src/atomicops.h',
+            '../../src/atomicops_internals_x86_gcc.cc',
             '../../src/bignum.cc',
             '../../src/bignum.h',
             '../../src/bignum-dtoa.cc',
@@ -822,10 +939,11 @@
             '../../src/fixed-dtoa.cc',
             '../../src/fixed-dtoa.h',
             '../../src/globals.h',
-            '../../src/hashmap.cc',
             '../../src/hashmap.h',
             '../../src/list-inl.h',
             '../../src/list.h',
+            '../../src/once.cc',
+            '../../src/once.h',
             '../../src/preparse-data-format.h',
             '../../src/preparse-data.cc',
             '../../src/preparse-data.h',
@@ -858,7 +976,7 @@
       'targets': [
         {
           'target_name': 'v8',
-          'type': 'settings',
+          'type': 'none',
           'conditions': [
             ['want_separate_host_toolset==1', {
               'toolsets': ['host', 'target'],
diff --git a/tools/js2c.py b/tools/js2c.py
index a2ea8ea..fa559f3 100644
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -128,12 +128,13 @@
       end = pattern_match.end()
       assert lines[end - 1] == '('
       last_match = end
-      arg_index = 0
+      arg_index = [0]  # Wrap state into array, to work around Python "scoping"
       mapping = { }
       def add_arg(str):
         # Remember to expand recursively in the arguments
         replacement = ExpandMacros(str.strip(), macros)
-        mapping[macro.args[arg_index]] = replacement
+        mapping[macro.args[arg_index[0]]] = replacement
+        arg_index[0] += 1
       while end < len(lines) and height > 0:
         # We don't count commas at higher nesting levels.
         if lines[end] == ',' and height == 1:
diff --git a/tools/jsmin.py b/tools/jsmin.py
index 646bf14..e82f3d0 100644
--- a/tools/jsmin.py
+++ b/tools/jsmin.py
@@ -232,7 +232,9 @@
       # A regexp that matches a regexp literal surrounded by /slashes/.
       # Don't allow a regexp to have a ) before the first ( since that's a
       # syntax error and it's probably just two unrelated slashes.
-      slash_quoted_regexp = r"/(?:(?=\()|(?:[^()/\\]|\\.)+)(?:\([^/\\]|\\.)*/"
+      # Also don't allow it to come after anything that can only be the
+      # end of a primary expression.
+      slash_quoted_regexp = r"(?<![\w$'\")\]])/(?:(?=\()|(?:[^()/\\]|\\.)+)(?:\([^/\\]|\\.)*/"
       # Replace multiple spaces with a single space.
       line = re.sub("|".join([double_quoted_string,
                               single_quoted_string,
diff --git a/tools/linux-tick-processor b/tools/linux-tick-processor
index 0b0a1fb..7070ce6 100755
--- a/tools/linux-tick-processor
+++ b/tools/linux-tick-processor
@@ -1,20 +1,5 @@
 #!/bin/sh
 
-tools_path=`cd $(dirname "$0");pwd`
-if [ ! "$D8_PATH" ]; then
-  d8_public=`which d8`
-  if [ -x $d8_public ]; then D8_PATH=$(dirname "$d8_public"); fi
-fi
-[ "$D8_PATH" ] || D8_PATH=$tools_path/..
-d8_exec=$D8_PATH/d8
-
-if [ ! -x $d8_exec ]; then
-  echo "d8 shell not found in $D8_PATH"
-  echo "To build, execute 'scons <flags> d8' from the V8 directory"
-  exit 1
-fi
-
-
 # find the name of the log file to process, it must not start with a dash.
 log_file="v8.log"
 for arg in "$@"
@@ -24,6 +9,28 @@
   fi
 done
 
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+  d8_public=`which d8`
+  if [ -x $d8_public ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x $d8_exec ]; then
+  D8_PATH=`pwd`/out/native
+  d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x $d8_exec ]; then
+  d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x $d8_exec ]; then
+  echo "d8 shell not found in $D8_PATH"
+  echo "To build, execute 'make native' from the V8 directory"
+  exit 1
+fi
 
 # nm spits out 'no symbols found' messages to stderr.
 cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
diff --git a/tools/ll_prof.py b/tools/ll_prof.py
index 58cbb95..51ba672 100755
--- a/tools/ll_prof.py
+++ b/tools/ll_prof.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -334,6 +334,7 @@
   _ARCH_TO_POINTER_TYPE_MAP = {
     "ia32": ctypes.c_uint32,
     "arm": ctypes.c_uint32,
+    "mips": ctypes.c_uint32,
     "x64": ctypes.c_uint64
   }
 
@@ -399,12 +400,16 @@
         code = Code(name, start_address, end_address, origin, origin_offset)
         conficting_code = self.code_map.Find(start_address)
         if conficting_code:
-          LogReader._HandleCodeConflict(conficting_code, code)
-          # TODO(vitalyr): this warning is too noisy because of our
-          # attempts to reconstruct code log from the snapshot.
-          # print >>sys.stderr, \
-          #     "Warning: Skipping duplicate code log entry %s" % code
-          continue
+          if not (conficting_code.start_address == code.start_address and
+            conficting_code.end_address == code.end_address):
+            self.code_map.Remove(conficting_code)
+          else:
+            LogReader._HandleCodeConflict(conficting_code, code)
+            # TODO(vitalyr): this warning is too noisy because of our
+            # attempts to reconstruct code log from the snapshot.
+            # print >>sys.stderr, \
+            #     "Warning: Skipping duplicate code log entry %s" % code
+            continue
         self.code_map.Add(code)
         continue
 
@@ -668,7 +673,9 @@
 OBJDUMP_SYMBOL_LINE_RE = re.compile(
   r"^([a-f0-9]+)\s(.{7})\s(\S+)\s+([a-f0-9]+)\s+(?:\.hidden\s+)?(.*)$")
 OBJDUMP_DYNAMIC_SYMBOLS_START_RE = re.compile(
-   r"^DYNAMIC SYMBOL TABLE")
+  r"^DYNAMIC SYMBOL TABLE")
+OBJDUMP_SKIP_RE = re.compile(
+  r"^.*ld\.so\.cache$")
 KERNEL_ALLSYMS_FILE = "/proc/kallsyms"
 PERF_KERNEL_ALLSYMS_RE = re.compile(
   r".*kallsyms.*")
@@ -687,6 +694,8 @@
     # is 0.
     if mmap_info.tid == 0 and not options.kernel:
       return True
+    if OBJDUMP_SKIP_RE.match(mmap_info.filename):
+      return True
     if PERF_KERNEL_ALLSYMS_RE.match(mmap_info.filename):
       return self._LoadKernelSymbols(code_map)
     self.infos.append(mmap_info)
diff --git a/tools/logreader.js b/tools/logreader.js
index 315e721..a8141da 100644
--- a/tools/logreader.js
+++ b/tools/logreader.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -134,9 +134,8 @@
 LogReader.prototype.dispatchLogRow_ = function(fields) {
   // Obtain the dispatch.
   var command = fields[0];
-  if (!(command in this.dispatchTable_)) {
-    throw new Error('unknown command: ' + command);
-  }
+  if (!(command in this.dispatchTable_)) return;
+
   var dispatch = this.dispatchTable_[command];
 
   if (dispatch === null || this.skipDispatch(dispatch)) {
diff --git a/tools/merge-to-branch.sh b/tools/merge-to-branch.sh
new file mode 100644
index 0000000..49bf3e4
--- /dev/null
+++ b/tools/merge-to-branch.sh
@@ -0,0 +1,267 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+########## Global variable definitions
+
+BRANCHNAME=prepare-merge
+PERSISTFILE_BASENAME=/tmp/v8-merge-to-branch-tempfile
+ALREADY_MERGING_SENTINEL_FILE="$PERSISTFILE_BASENAME-already-merging"
+COMMIT_HASHES_FILE="$PERSISTFILE_BASENAME-PATCH_COMMIT_HASHES"
+TEMPORARY_PATCH_FILE="$PERSISTFILE_BASENAME-temporary-patch"
+
+########## Function definitions
+
+source $(dirname $BASH_SOURCE)/common-includes.sh
+
+usage() {
+cat << EOF
+usage: $0 [OPTIONS]... [BRANCH] [REVISION]...
+
+Performs the necessary steps to merge revisions from bleeding_edge
+to other branches, including trunk.
+
+OPTIONS:
+  -h    Show this message
+  -s    Specify the step where to start work. Default: 0.
+  -p    Specify a patch file to apply as part of the merge
+EOF
+}
+
+persist_patch_commit_hashes() {
+  echo "PATCH_COMMIT_HASHES=( ${PATCH_COMMIT_HASHES[@]} )" > $COMMIT_HASHES_FILE
+}
+
+restore_patch_commit_hashes() {
+  source $COMMIT_HASHES_FILE
+}
+
+restore_patch_commit_hashes_if_unset() {
+  [[ "${#PATCH_COMMIT_HASHES[@]}" == 0 ]] && restore_patch_commit_hashes
+  [[ "${#PATCH_COMMIT_HASHES[@]}" == 0 ]] && [[ -z "$EXTRA_PATCH" ]] && \
+      die "Variable PATCH_COMMIT_HASHES could not be restored."
+}
+
+########## Option parsing
+
+while getopts ":hs:fp:" OPTION ; do
+  case $OPTION in
+    h)  usage
+        exit 0
+        ;;
+    p)  EXTRA_PATCH=$OPTARG
+        ;;
+    f)  rm -f "$ALREADY_MERGING_SENTINEL_FILE"
+        ;;
+    s)  START_STEP=$OPTARG
+        ;;
+    ?)  echo "Illegal option: -$OPTARG"
+        usage
+        exit 1
+        ;;
+  esac
+done
+let OPTION_COUNT=$OPTIND-1
+shift $OPTION_COUNT
+
+########## Regular workflow
+
+# If there is a merge in progress, abort.
+[[ -e "$ALREADY_MERGING_SENTINEL_FILE" ]] && [[ $START_STEP -eq 0 ]] \
+   && die "A merge is already in progress"
+touch "$ALREADY_MERGING_SENTINEL_FILE"
+
+initial_environment_checks
+
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  if [ ${#@} -lt 2 ] && [ -z "$EXTRA_PATCH" ] ; then
+    die "Either a patch file or revision numbers must be specified"
+  fi
+  echo ">>> Step $CURRENT_STEP: Preparation"
+  MERGE_TO_BRANCH=$1
+  [[ -n "$MERGE_TO_BRANCH" ]] || die "Please specify a branch to merge to"
+  shift
+  persist "MERGE_TO_BRANCH"
+  common_prepare
+fi
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Create a fresh branch for the patch."
+  restore_if_unset "MERGE_TO_BRANCH"
+  git checkout -b $BRANCHNAME svn/$MERGE_TO_BRANCH \
+    || die "Creating branch $BRANCHNAME failed."
+fi
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Find the git \
+revisions associated with the patches."
+  current=0
+  for REVISION in "$@" ; do
+    NEXT_HASH=$(git svn find-rev "r$REVISION" svn/bleeding_edge)
+    [[ -n "$NEXT_HASH" ]] \
+      || die "Cannot determine git hash for r$REVISION"
+    PATCH_COMMIT_HASHES[$current]="$NEXT_HASH"
+    [[ -n "$REVISION_LIST" ]] && REVISION_LIST="$REVISION_LIST,"
+    REVISION_LIST="$REVISION_LIST r$REVISION"
+    let current+=1
+  done
+  if [ -z "$REVISION_LIST" ] ; then
+    NEW_COMMIT_MSG="Applied patch to $MERGE_TO_BRANCH branch."
+  else
+    NEW_COMMIT_MSG="Merged$REVISION_LIST into $MERGE_TO_BRANCH branch."
+  fi;
+
+  echo "$NEW_COMMIT_MSG" > $COMMITMSG_FILE
+  echo "" >> $COMMITMSG_FILE
+  for HASH in ${PATCH_COMMIT_HASHES[@]} ; do
+    PATCH_MERGE_DESCRIPTION=$(git log -1 --format=%s $HASH)
+    echo "$PATCH_MERGE_DESCRIPTION" >> $COMMITMSG_FILE
+    echo "" >> $COMMITMSG_FILE
+  done
+  for HASH in ${PATCH_COMMIT_HASHES[@]} ; do
+    BUG=$(git log -1 $HASH | grep "BUG=" | awk -F '=' '{print $NF}')
+    if [ -n "$BUG" ] ; then
+      [[ -n "$BUG_AGGREGATE" ]] && BUG_AGGREGATE="$BUG_AGGREGATE,"
+      BUG_AGGREGATE="$BUG_AGGREGATE$BUG"
+    fi
+  done
+  if [ -n "$BUG_AGGREGATE" ] ; then
+    echo "BUG=$BUG_AGGREGATE" >> $COMMITMSG_FILE
+  fi
+  persist "NEW_COMMIT_MSG"
+  persist "REVISION_LIST"
+  persist_patch_commit_hashes
+fi
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Apply patches for selected revisions."
+  restore_if_unset "MERGE_TO_BRANCH"
+  restore_patch_commit_hashes_if_unset "PATCH_COMMIT_HASHES"
+  rm -f "$TOUCHED_FILES_FILE"
+  for HASH in ${PATCH_COMMIT_HASHES[@]} ; do
+    echo "Applying patch for $HASH to $MERGE_TO_BRANCH..."
+    git log -1 -p $HASH > "$TEMPORARY_PATCH_FILE"
+    apply_patch "$TEMPORARY_PATCH_FILE"
+  done
+  if [ -n "$EXTRA_PATCH" ] ; then
+    apply_patch "$EXTRA_PATCH"
+  fi
+  stage_files
+fi
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Prepare $VERSION_FILE."
+  # These version numbers are used again for creating the tag
+  read_and_persist_version
+fi
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Increment version number."
+  restore_if_unset "PATCH"
+  NEWPATCH=$(($PATCH + 1))
+  confirm "Automatically increment PATCH_LEVEL? (Saying 'n' will fire up \
+your EDITOR on $VERSION_FILE so you can make arbitrary changes. When \
+you're done, save the file and exit your EDITOR.)"
+  if [ $? -eq 0 ] ; then
+    sed -e "/#define PATCH_LEVEL/s/[0-9]*$/$NEWPATCH/" \
+        -i "$VERSION_FILE"
+  else
+    $EDITOR "$VERSION_FILE"
+  fi
+  read_and_persist_version "NEW"
+fi
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Commit to local branch."
+  git commit -a -F "$COMMITMSG_FILE" \
+    || die "'git commit -a' failed."
+fi
+
+upload_step
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Commit to the repository."
+  restore_if_unset "MERGE_TO_BRANCH"
+  git checkout $BRANCHNAME \
+    || die "cannot ensure that the current branch is $BRANCHNAME"
+  wait_for_lgtm
+  git cl dcommit || die "failed to commit to $MERGE_TO_BRANCH"
+fi
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Determine svn commit revision"
+  restore_if_unset "NEW_COMMIT_MSG"
+  restore_if_unset "MERGE_TO_BRANCH"
+  git svn fetch || die "'git svn fetch' failed."
+  COMMIT_HASH=$(git log -1 --format=%H --grep="$NEW_COMMIT_MSG" \
+    svn/$MERGE_TO_BRANCH)
+  [[ -z "$COMMIT_HASH" ]] && die "Unable to map git commit to svn revision"
+  SVN_REVISION=$(git svn find-rev $COMMIT_HASH)
+  echo "subversion revision number is r$SVN_REVISION"
+  persist "SVN_REVISION"
+fi
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Create the tag."
+  restore_if_unset "SVN_REVISION"
+  restore_version_if_unset "NEW"
+  echo "Creating tag svn/tags/$NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
+  if [ "$MERGE_TO_BRANCH" == "trunk" ] ; then
+    TO_URL="$MERGE_TO_BRANCH"
+  else
+    TO_URL="branches/$MERGE_TO_BRANCH"
+  fi
+  svn copy -r $SVN_REVISION \
+    https://v8.googlecode.com/svn/$TO_URL \
+    https://v8.googlecode.com/svn/tags/$NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH \
+    -m "Tagging version $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
+  persist "TO_URL"
+fi
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Cleanup."
+  restore_if_unset "SVN_REVISION"
+  restore_if_unset "TO_URL"
+  restore_if_unset "REVISION_LIST"
+  restore_version_if_unset "NEW"
+  common_cleanup
+  echo "*** SUMMARY ***"
+  echo "version: $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
+  echo "branch: $TO_URL"
+  echo "svn revision: $SVN_REVISION"
+  [[ -n "$REVISION_LIST" ]] && echo "patches:$REVISION_LIST"
+fi
diff --git a/tools/presubmit.py b/tools/presubmit.py
index fda7ba9..a5f4c61 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -42,6 +42,7 @@
 import re
 import sys
 import subprocess
+import multiprocessing
 from subprocess import PIPE
 
 # Disabled LINT rules and reason.
@@ -101,6 +102,33 @@
 """.split()
 
 
+LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
+
+
+def CppLintWorker(command):
+  try:
+    process = subprocess.Popen(command, stderr=subprocess.PIPE)
+    process.wait()
+    out_lines = ""
+    error_count = -1
+    while True:
+      out_line = process.stderr.readline()
+      if out_line == '' and process.poll() != None:
+        break
+      m = LINT_OUTPUT_PATTERN.match(out_line)
+      if m:
+        out_lines += out_line
+        error_count += 1
+    sys.stderr.write(out_lines)
+    return error_count
+  except KeyboardInterrupt:
+    process.kill()
+  except:
+    print('Error running cpplint.py. Please make sure you have depot_tools' +
+          ' in your $PATH. Lint check skipped.')
+    process.kill()
+
+
 class FileContentsCache(object):
 
   def __init__(self, sums_file_name):
@@ -206,24 +234,28 @@
       return True
 
     filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
-    command = ['cpplint.py', '--filter', filt] + join(files)
+    command = ['cpplint.py', '--filter', filt]
     local_cpplint = join(path, "tools", "cpplint.py")
     if exists(local_cpplint):
-      command = ['python', local_cpplint, '--filter', filt] + join(files)
+      command = ['python', local_cpplint, '--filter', filt]
 
-    process = subprocess.Popen(command, stderr=subprocess.PIPE)
-    LINT_ERROR_PATTERN = re.compile(r'^(.+)[:(]\d+[:)]')
-    while True:
-      out_line = process.stderr.readline()
-      if out_line == '' and process.poll() != None:
-        break
-      sys.stderr.write(out_line)
-      m = LINT_ERROR_PATTERN.match(out_line)
-      if m:
-        good_files_cache.RemoveFile(m.group(1))
+    commands = join([command + [file] for file in files])
+    count = multiprocessing.cpu_count()
+    pool = multiprocessing.Pool(count)
+    try:
+      results = pool.map_async(CppLintWorker, commands).get(999999)
+    except KeyboardInterrupt:
+      print "\nCaught KeyboardInterrupt, terminating workers."
+      sys.exit(1)
 
+    for i in range(len(files)):
+      if results[i] > 0:
+        good_files_cache.RemoveFile(files[i])
+
+    total_errors = sum(results)
+    print "Total errors found: %d" % total_errors
     good_files_cache.Save()
-    return process.returncode == 0
+    return total_errors == 0
 
 
 COPYRIGHT_HEADER_PATTERN = re.compile(
diff --git a/tools/push-to-trunk.sh b/tools/push-to-trunk.sh
index 761b733..3fb5b34 100755
--- a/tools/push-to-trunk.sh
+++ b/tools/push-to-trunk.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -31,18 +31,13 @@
 
 BRANCHNAME=prepare-push
 TRUNKBRANCH=trunk-push
-TEMP_BRANCH=v8-push-to-trunk-script-temporary-branch
-VERSION_FILE="src/version.cc"
 PERSISTFILE_BASENAME=/tmp/v8-push-to-trunk-tempfile
-CHANGELOG_ENTRY_FILE="$PERSISTFILE_BASENAME-changelog-entry"
-PATCH_FILE="$PERSISTFILE_BASENAME-patch"
-COMMITMSG_FILE="$PERSISTFILE_BASENAME-commitmsg"
-TOUCHED_FILES_FILE="$PERSISTFILE_BASENAME-touched-files"
-STEP=0
-
+CHROME_PATH=
 
 ########## Function definitions
 
+source $(dirname $BASH_SOURCE)/common-includes.sh
+
 usage() {
 cat << EOF
 usage: $0 OPTIONS
@@ -54,71 +49,24 @@
   -h    Show this message
   -s    Specify the step where to start work. Default: 0.
   -l    Manually specify the git commit ID of the last push to trunk.
+  -c    Specify the path to your Chromium src/ directory to automate the
+        V8 roll.
 EOF
 }
 
-die() {
-  [[ -n "$1" ]] && echo "Error: $1"
-  echo "Exiting."
-  exit 1
-}
-
-confirm() {
-  echo -n "$1 [Y/n] "
-  read ANSWER
-  if [[ -z "$ANSWER" || "$ANSWER" == "Y" || "$ANSWER" == "y" ]] ; then
-    return 0
-  else
-    return 1
-  fi
-}
-
-delete_branch() {
-  local MATCH=$(git branch | grep $1 | awk '{print $NF}' )
-  if [ "$MATCH" == "$1" ] ; then
-    confirm "Branch $1 exists, do you want to delete it?"
-    if [ $? -eq 0 ] ; then
-      git branch -D $1 || die "Deleting branch '$1' failed."
-      echo "Branch $1 deleted."
-    else
-      die "Can't continue. Please delete branch $1 and try again."
-    fi
-  fi
-}
-
-# Persist and restore variables to support canceling/resuming execution
-# of this script.
-persist() {
-  local VARNAME=$1
-  local FILE="$PERSISTFILE_BASENAME-$VARNAME"
-  echo "${!VARNAME}" > $FILE
-}
-
-restore() {
-  local VARNAME=$1
-  local FILE="$PERSISTFILE_BASENAME-$VARNAME"
-  local VALUE="$(cat $FILE)"
-  eval "$VARNAME=\"$VALUE\""
-}
-
-restore_if_unset() {
-  local VARNAME=$1
-  [[ -z "${!VARNAME}" ]] && restore "$VARNAME"
-  [[ -z "${!VARNAME}" ]] && die "Variable '$VARNAME' could not be restored."
-}
-
-
 ########## Option parsing
 
-while getopts ":hs:l:" OPTION ; do
+while getopts ":hs:l:c:" OPTION ; do
   case $OPTION in
     h)  usage
         exit 0
         ;;
-    s)  STEP=$OPTARG
+    s)  START_STEP=$OPTARG
         ;;
     l)  LASTPUSH=$OPTARG
         ;;
+    c)  CHROME_PATH=$OPTARG
+        ;;
     ?)  echo "Illegal option: -$OPTARG"
         usage
         exit 1
@@ -129,46 +77,24 @@
 
 ########## Regular workflow
 
-# Cancel if this is not a git checkout.
-[[ -d .git ]] \
-  || die "This is not a git checkout, this script won't work for you."
+initial_environment_checks
 
-# Cancel if EDITOR is unset or not executable.
-[[ -n "$EDITOR" && -x "$(which $EDITOR)" ]] \
-  || die "Please set your EDITOR environment variable, you'll need it."
-
-if [ $STEP -le 0 ] ; then
-  echo ">>> Step 0: Preparation"
-  # Check for a clean workdir.
-  [[ -z "$(git status -s -uno)" ]] \
-    || die "Workspace is not clean. Please commit or undo your changes."
-
-  # Persist current branch.
-  CURRENT_BRANCH=$(git status -s -b -uno | grep "^##" | awk '{print $2}')
-  persist "CURRENT_BRANCH"
-  # Get ahold of a safe temporary branch and check it out.
-  if [ "$CURRENT_BRANCH" != "$TEMP_BRANCH" ] ; then
-    delete_branch $TEMP_BRANCH
-    git checkout -b $TEMP_BRANCH
-  fi
-  # Delete branches if they exist.
-  delete_branch $BRANCHNAME
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Preparation"
+  common_prepare
   delete_branch $TRUNKBRANCH
 fi
 
-if [ $STEP -le 1 ] ; then
-  echo ">>> Step 1: Fetch unfetched revisions."
-  git svn fetch || die "'git svn fetch' failed."
-fi
-
-if [ $STEP -le 2 ] ; then
-  echo ">>> Step 2: Create a fresh branch."
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Create a fresh branch."
   git checkout -b $BRANCHNAME svn/bleeding_edge \
     || die "Creating branch $BRANCHNAME failed."
 fi
 
-if [ $STEP -le 3 ] ; then
-  echo ">>> Step 3: Detect commit ID of last push to trunk."
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Detect commit ID of last push to trunk."
   [[ -n "$LASTPUSH" ]] || LASTPUSH=$(git log -1 --format=%H ChangeLog)
   LOOP=1
   while [ $LOOP -eq 1 ] ; do
@@ -184,15 +110,11 @@
   persist "LASTPUSH"
 fi
 
-if [ $STEP -le 4 ] ; then
-  echo ">>> Step 4: Prepare raw ChangeLog entry."
-# These version numbers are used again later for the trunk commit.
-  MAJOR=$(grep "#define MAJOR_VERSION" "$VERSION_FILE" | awk '{print $NF}')
-  persist "MAJOR"
-  MINOR=$(grep "#define MINOR_VERSION" "$VERSION_FILE" | awk '{print $NF}')
-  persist "MINOR"
-  BUILD=$(grep "#define BUILD_NUMBER" "$VERSION_FILE" | awk '{print $NF}')
-  persist "BUILD"
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Prepare raw ChangeLog entry."
+  # These version numbers are used again later for the trunk commit.
+  read_and_persist_version
 
   DATE=$(date +%Y-%m-%d)
   persist "DATE"
@@ -202,18 +124,25 @@
   for commit in $COMMITS ; do
     # Get the commit's title line.
     git log -1 $commit --format="%w(80,8,8)%s" >> "$CHANGELOG_ENTRY_FILE"
-    # Grep for "BUG=xxxx" lines in the commit message.
-    git log -1 $commit --format="%b" | grep BUG= | grep -v "BUG=$" \
-                                     | sed -e 's/^/        /' \
-                                     >> "$CHANGELOG_ENTRY_FILE"
+    # Grep for "BUG=xxxx" lines in the commit message and convert them to
+    # "(issue xxxx)".
+    git log -1 $commit --format="%B" \
+        | grep "^BUG=" | grep -v "BUG=$" | grep -v "BUG=none$" \
+        | sed -e 's/^/        /' \
+        | sed -e 's/BUG=v8:\(.*\)$/(issue \1)/' \
+        | sed -e 's/BUG=\(.*\)$/(Chromium issue \1)/' \
+        >> "$CHANGELOG_ENTRY_FILE"
     # Append the commit's author for reference.
     git log -1 $commit --format="%w(80,8,8)(%an)" >> "$CHANGELOG_ENTRY_FILE"
     echo "" >> "$CHANGELOG_ENTRY_FILE"
   done
+  echo "        Performance and stability improvements on all platforms." \
+    >> "$CHANGELOG_ENTRY_FILE"
 fi
 
-if [ $STEP -le 5 ] ; then
-  echo ">>> Step 5: Edit ChangeLog entry."
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Edit ChangeLog entry."
   echo -n "Please press <Return> to have your EDITOR open the ChangeLog entry, \
 then edit its contents to your liking. When you're done, save the file and \
 exit your EDITOR. "
@@ -221,7 +150,13 @@
   $EDITOR "$CHANGELOG_ENTRY_FILE"
   NEWCHANGELOG=$(mktemp)
   # Eliminate any trailing newlines by going through a shell variable.
-  CHANGELOGENTRY=$(cat "$CHANGELOG_ENTRY_FILE")
+  # Also (1) eliminate tabs, (2) fix too little and (3) too much indentation,
+  # and (4) eliminate trailing whitespace.
+  CHANGELOGENTRY=$(cat "$CHANGELOG_ENTRY_FILE" \
+                   | sed -e 's/\t/        /g' \
+                   | sed -e 's/^ \{1,7\}\([^ ]\)/        \1/g' \
+                   | sed -e 's/^ \{9,80\}\([^ ]\)/        \1/g' \
+                   | sed -e 's/ \+$//')
   [[ -n "$CHANGELOGENTRY" ]] || die "Empty ChangeLog entry."
   echo "$CHANGELOGENTRY" > "$NEWCHANGELOG"
   echo "" >> "$NEWCHANGELOG" # Explicitly insert two empty lines.
@@ -230,8 +165,9 @@
   mv "$NEWCHANGELOG" ChangeLog
 fi
 
-if [ $STEP -le 6 ] ; then
-  echo ">>> Step 6: Increment version number."
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Increment version number."
   restore_if_unset "BUILD"
   NEWBUILD=$(($BUILD + 1))
   confirm "Automatically increment BUILD_NUMBER? (Saying 'n' will fire up \
@@ -243,42 +179,26 @@
   else
     $EDITOR "$VERSION_FILE"
   fi
-  NEWMAJOR=$(grep "#define MAJOR_VERSION" "$VERSION_FILE" | awk '{print $NF}')
-  persist "NEWMAJOR"
-  NEWMINOR=$(grep "#define MINOR_VERSION" "$VERSION_FILE" | awk '{print $NF}')
-  persist "NEWMINOR"
-  NEWBUILD=$(grep "#define BUILD_NUMBER" "$VERSION_FILE" | awk '{print $NF}')
-  persist "NEWBUILD"
+  read_and_persist_version "NEW"
 fi
 
-if [ $STEP -le 7 ] ; then
-  echo ">>> Step 7: Commit to local branch."
-  restore_if_unset "NEWMAJOR"
-  restore_if_unset "NEWMINOR"
-  restore_if_unset "NEWBUILD"
-  git commit -a -m "Prepare push to trunk.  \
-Now working on version $NEWMAJOR.$NEWMINOR.$NEWBUILD." \
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Commit to local branch."
+  restore_version_if_unset "NEW"
+  PREPARE_COMMIT_MSG="Prepare push to trunk.  \
+Now working on version $NEWMAJOR.$NEWMINOR.$NEWBUILD."
+  persist "PREPARE_COMMIT_MSG"
+  git commit -a -m "$PREPARE_COMMIT_MSG" \
     || die "'git commit -a' failed."
 fi
 
-if [ $STEP -le 8 ] ; then
-  echo ">>> Step 8: Upload for code review."
-  echo -n "Please enter the email address of a V8 reviewer for your patch: "
-  read REVIEWER
-  git cl upload -r $REVIEWER --send-mail \
-    || die "'git cl upload' failed, please try again."
-fi
+upload_step
 
-if [ $STEP -le 9 ] ; then
-  echo ">>> Step 9: Commit to the repository."
-  echo "Please wait for an LGTM, then type \"LGTM<Return>\" to commit your \
-change. (If you need to iterate on the patch, do so in another shell.)"
-  unset ANSWER
-  while [ "$ANSWER" != "LGTM" ] ; do
-    [[ -n "$ANSWER" ]] && echo "That was not 'LGTM'."
-    echo -n "> "
-    read ANSWER
-  done
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Commit to the repository."
+  wait_for_lgtm
   # Re-read the ChangeLog entry (to pick up possible changes).
   cat ChangeLog | awk --posix '{
     if ($0 ~ /^[0-9]{4}-[0-9]{2}-[0-9]{2}:/) {
@@ -293,16 +213,24 @@
   git cl dcommit || die "'git cl dcommit' failed, please try again."
 fi
 
-if [ $STEP -le 10 ] ; then
-  echo ">>> Step 10: NOP"
-  # Present in the manual guide, not necessary (even harmful!) for this script.
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Fetch straggler commits that sneaked in \
+since this script was started."
+  git svn fetch || die "'git svn fetch' failed."
+  git checkout svn/bleeding_edge
+  restore_if_unset "PREPARE_COMMIT_MSG"
+  PREPARE_COMMIT_HASH=$(git log -1 --format=%H --grep="$PREPARE_COMMIT_MSG")
+  persist "PREPARE_COMMIT_HASH"
 fi
 
-if [ $STEP -le 11 ] ; then
-  echo ">>> Step 11: Squash commits into one."
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Squash commits into one."
   # Instead of relying on "git rebase -i", we'll just create a diff, because
   # that's easier to automate.
-  git diff svn/trunk > "$PATCH_FILE"
+  restore_if_unset "PREPARE_COMMIT_HASH"
+  git diff svn/trunk $PREPARE_COMMIT_HASH > "$PATCH_FILE"
   # Convert the ChangeLog entry to commit message format:
   # - remove date
   # - remove indentation
@@ -324,54 +252,29 @@
           need_space = 1;
         }
       }' > "$COMMITMSG_FILE" || die "Commit message editing failed."
-  LOOP=1
-  while [ $LOOP -eq 1 ] ; do
-    echo "This is the trunk commit message:"
-    echo "--------------------"
-    cat "$COMMITMSG_FILE"
-    echo -e "\n--------------------"
-    confirm "Does this look good to you? (Saying 'n' will fire up your \
-EDITOR so you can change the commit message. When you're done, save the \
-file and exit your EDITOR.)"
-    if [ $? -eq 0 ] ; then
-      LOOP=0
-    else
-      $EDITOR "$COMMITMSG_FILE"
-    fi
-  done
   rm -f "$CHANGELOG_ENTRY_FILE"
 fi
 
-if [ $STEP -le 12 ] ; then
-  echo ">>> Step 12: Create a new branch from trunk."
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Create a new branch from trunk."
   git checkout -b $TRUNKBRANCH svn/trunk \
     || die "Checking out a new branch '$TRUNKBRANCH' failed."
 fi
 
-if [ $STEP -le 13 ] ; then
-  echo ">>> Step 13: Apply squashed changes."
-  patch -p1 < "$PATCH_FILE" | tee >(awk '{print $NF}' >> "$TOUCHED_FILES_FILE")
-  [[ $? -eq 0 ]] || die "Applying the patch to trunk failed."
-  # Stage added and modified files.
-  TOUCHED_FILES=$(cat "$TOUCHED_FILES_FILE")
-  for FILE in $TOUCHED_FILES ; do
-    git add "$FILE"
-  done
-  # Stage deleted files.
-  DELETED_FILES=$(git status -s -uno --porcelain | grep "^ D" \
-                                                 | awk '{print $NF}')
-  for FILE in $DELETED_FILES ; do
-    git rm "$FILE"
-  done
-  rm -f "$PATCH_FILE"
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Apply squashed changes."
   rm -f "$TOUCHED_FILES_FILE"
+  apply_patch "$PATCH_FILE"
+  stage_files
+  rm -f "$PATCH_FILE"
 fi
 
-if [ $STEP -le 14 ] ; then
-  echo ">>> Step 14: Set correct version for trunk."
-  restore_if_unset "MAJOR"
-  restore_if_unset "MINOR"
-  restore_if_unset "BUILD"
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Set correct version for trunk."
+  restore_version_if_unset
   sed -e "/#define MAJOR_VERSION/s/[0-9]*$/$MAJOR/" \
       -e "/#define MINOR_VERSION/s/[0-9]*$/$MINOR/" \
       -e "/#define BUILD_NUMBER/s/[0-9]*$/$BUILD/" \
@@ -380,52 +283,110 @@
       -i "$VERSION_FILE" || die "Patching $VERSION_FILE failed."
 fi
 
-if [ $STEP -le 15 ] ; then
-  echo ">>> Step 15: Commit to local trunk branch."
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Commit to local trunk branch."
   git add "$VERSION_FILE"
   git commit -F "$COMMITMSG_FILE" || die "'git commit' failed."
   rm -f "$COMMITMSG_FILE"
 fi
 
-if [ $STEP -le 16 ] ; then
-  echo ">>> Step 16: Sanity check."
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Sanity check."
   confirm "Please check if your local checkout is sane: Inspect $VERSION_FILE, \
 compile, run tests. Do you want to commit this new trunk revision to the \
 repository?"
   [[ $? -eq 0 ]] || die "Execution canceled."
 fi
 
-if [ $STEP -le 17 ] ; then
-  echo ">>> Step 17. Commit to SVN."
-  git svn dcommit || die "'git svn dcommit' failed."
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Commit to SVN."
+  git svn dcommit | tee >(grep -E "^Committed r[0-9]+" \
+                          | sed -e 's/^Committed r\([0-9]\+\)/\1/' \
+                          > "$TRUNK_REVISION_FILE") \
+    || die "'git svn dcommit' failed."
+  TRUNK_REVISION=$(cat "$TRUNK_REVISION_FILE")
+  persist "TRUNK_REVISION"
+  rm -f "$TRUNK_REVISION_FILE"
 fi
 
-if [ $STEP -le 18 ] ; then
-  echo ">>> Step 18: Tag the new revision."
-  restore_if_unset "MAJOR"
-  restore_if_unset "MINOR"
-  restore_if_unset "BUILD"
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Tag the new revision."
+  restore_version_if_unset
   git svn tag $MAJOR.$MINOR.$BUILD -m "Tagging version $MAJOR.$MINOR.$BUILD" \
     || die "'git svn tag' failed."
 fi
 
-if [ $STEP -le 19 ] ; then
-  echo ">>> Step 19: Cleanup."
-  restore_if_unset "CURRENT_BRANCH"
-  git checkout -f $CURRENT_BRANCH
-  [[ "$TEMP_BRANCH" != "$CURRENT_BRANCH" ]] && git branch -D $TEMP_BRANCH
-  [[ "$BRANCHNAME" != "$CURRENT_BRANCH" ]] && git branch -D $BRANCHNAME
-  [[ "$TRUNKBRANCH" != "$CURRENT_BRANCH" ]] && git branch -D $TRUNKBRANCH
-fi
+if [ -n "$CHROME_PATH" ] ; then
 
-if [ $STEP -le 20 ] ; then
-  echo ">>> Step 20: Done!"
-  restore_if_unset "MAJOR"
-  restore_if_unset "MINOR"
-  restore_if_unset "BUILD"
-  echo "Congratulations, you have successfully created the trunk revision \
-$MAJOR.$MINOR.$BUILD. Please don't forget to update the v8rel spreadsheet, \
-and to roll this new version into Chromium."
-  # Clean up all temporary files.
-  rm -f "$PERSISTFILE_BASENAME"*
+  let CURRENT_STEP+=1
+  if [ $START_STEP -le $CURRENT_STEP ] ; then
+    echo ">>> Step $CURRENT_STEP: Switch to Chromium checkout."
+    V8_PATH=$(pwd)
+    persist "V8_PATH"
+    cd "$CHROME_PATH"
+    initial_environment_checks
+    # Check for a clean workdir.
+    [[ -z "$(git status -s -uno)" ]] \
+      || die "Workspace is not clean. Please commit or undo your changes."
+    # Assert that the DEPS file is there.
+    [[ -w "DEPS" ]] || die "DEPS file not present or not writable; \
+current directory is: $(pwd)."
+  fi
+
+  let CURRENT_STEP+=1
+  if [ $START_STEP -le $CURRENT_STEP ] ; then
+    echo ">>> Step $CURRENT_STEP: Update the checkout and create a new branch."
+    git checkout master || die "'git checkout master' failed."
+    git pull || die "'git pull' failed, please try again."
+    restore_if_unset "TRUNK_REVISION"
+    git checkout -b "v8-roll-$TRUNK_REVISION" \
+      || die "Failed to checkout a new branch."
+  fi
+
+  let CURRENT_STEP+=1
+  if [ $START_STEP -le $CURRENT_STEP ] ; then
+    echo ">>> Step $CURRENT_STEP: Create and upload CL."
+    # Patch DEPS file.
+    sed -r -e "/\"v8_revision\": /s/\"[0-9]+\"/\"$TRUNK_REVISION\"/" \
+        -i DEPS
+    restore_version_if_unset
+    echo -n "Please enter the email address of a reviewer for the roll CL: "
+    read REVIEWER
+    git commit -am "Update V8 to version $MAJOR.$MINOR.$BUILD.
+
+TBR=$REVIEWER" || die "'git commit' failed."
+    git cl upload --send-mail \
+      || die "'git cl upload' failed, please try again."
+    echo "CL uploaded."
+  fi
+
+  let CURRENT_STEP+=1
+  if [ $START_STEP -le $CURRENT_STEP ] ; then
+    echo ">>> Step $CURRENT_STEP: Returning to V8 checkout."
+    restore_if_unset "V8_PATH"
+    cd "$V8_PATH"
+  fi
+fi  # if [ -n "$CHROME_PATH" ]
+
+let CURRENT_STEP+=1
+if [ $START_STEP -le $CURRENT_STEP ] ; then
+  echo ">>> Step $CURRENT_STEP: Done!"
+  restore_version_if_unset
+  restore_if_unset "TRUNK_REVISION"
+  if [ -n "$CHROME_PATH" ] ; then
+    echo "Congratulations, you have successfully created the trunk revision \
+$MAJOR.$MINOR.$BUILD and rolled it into Chromium. Please don't forget to \
+update the v8rel spreadsheet:"
+  else
+    echo "Congratulations, you have successfully created the trunk revision \
+$MAJOR.$MINOR.$BUILD. Please don't forget to roll this new version into \
+Chromium, and to update the v8rel spreadsheet:"
+  fi
+  echo -e "$MAJOR.$MINOR.$BUILD\ttrunk\t$TRUNK_REVISION"
+  common_cleanup
+  [[ "$TRUNKBRANCH" != "$CURRENT_BRANCH" ]] && git branch -D $TRUNKBRANCH
 fi
diff --git a/tools/test-wrapper-gypbuild.py b/tools/test-wrapper-gypbuild.py
index ad5449a..fda4105 100755
--- a/tools/test-wrapper-gypbuild.py
+++ b/tools/test-wrapper-gypbuild.py
@@ -73,6 +73,8 @@
       choices=PROGRESS_INDICATORS, default="mono")
   result.add_option("--report", help="Print a summary of the tests to be run",
       default=False, action="store_true")
+  result.add_option("--download-data", help="Download missing test suite data",
+      default=False, action="store_true")
   result.add_option("-s", "--suite", help="A test suite",
       default=[], action="append")
   result.add_option("-t", "--timeout", help="Timeout in seconds",
@@ -131,18 +133,22 @@
 
 
 def ProcessOptions(options):
-  if options.arch_and_mode != None and options.arch_and_mode != "":
-    tokens = options.arch_and_mode.split(".")
-    options.arch = tokens[0]
-    options.mode = tokens[1]
-  options.mode = options.mode.split(',')
+  if options.arch_and_mode == ".":
+    options.arch = []
+    options.mode = []
+  else:
+    if options.arch_and_mode != None and options.arch_and_mode != "":
+      tokens = options.arch_and_mode.split(".")
+      options.arch = tokens[0]
+      options.mode = tokens[1]
+    options.mode = options.mode.split(',')
+    options.arch = options.arch.split(',')
   for mode in options.mode:
     if not mode in ['debug', 'release']:
       print "Unknown mode %s" % mode
       return False
-  options.arch = options.arch.split(',')
   for arch in options.arch:
-    if not arch in ['ia32', 'x64', 'arm']:
+    if not arch in ['ia32', 'x64', 'arm', 'mips']:
       print "Unknown architecture %s" % arch
       return False
 
@@ -157,6 +163,8 @@
     result += ['--progress=' + options.progress]
   if options.report:
     result += ['--report']
+  if options.download_data:
+    result += ['--download-data']
   if options.suite != []:
     for suite in options.suite:
       result += ['--suite=../../test/' + suite]
@@ -165,7 +173,7 @@
   if options.snapshot:
     result += ['--snapshot']
   if options.special_command:
-    result += ['--special-command=' + options.special_command]
+    result += ['--special-command="%s"' % options.special_command]
   if options.valgrind:
     result += ['--valgrind']
   if options.cat:
@@ -189,9 +197,9 @@
   if options.crankshaft:
     result += ['--crankshaft']
   if options.shard_count != 1:
-    result += ['--shard_count=%s' % options.shard_count]
+    result += ['--shard-count=%s' % options.shard_count]
   if options.shard_run != 1:
-    result += ['--shard_run=%s' % options.shard_run]
+    result += ['--shard-run=%s' % options.shard_run]
   if options.noprof:
     result += ['--noprof']
   return result
@@ -232,6 +240,18 @@
                                env=env)
       returncodes += child.wait()
 
+  if len(options.mode) == 0 and len(options.arch) == 0:
+    print ">>> running tests"
+    shellpath = workspace + '/' + options.outdir
+    env['LD_LIBRARY_PATH'] = shellpath + '/lib.target'
+    shell = shellpath + '/d8'
+    child = subprocess.Popen(' '.join(args_for_children +
+                                      ['--shell=' + shell]),
+                             shell=True,
+                             cwd=workspace,
+                             env=env)
+    returncodes = child.wait()
+
   return returncodes
 
 
diff --git a/tools/test.py b/tools/test.py
index ecc0062..0aacd99 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #
-# Copyright 2008 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -472,7 +472,7 @@
   popen_args = args
   prev_error_mode = SEM_INVALID_VALUE
   if utils.IsWindows():
-    popen_args = '"' + subprocess.list2cmdline(args) + '"'
+    popen_args = subprocess.list2cmdline(args)
     if context.suppress_dialogs:
       # Try to change the error mode to avoid dialogs on fatal errors. Don't
       # touch any existing error mode flags by merging the existing error mode.
@@ -631,9 +631,15 @@
   def GetBuildRequirements(self, path, context):
     return self.GetConfiguration(context).GetBuildRequirements()
 
+  def DownloadData(self, context):
+    config = self.GetConfiguration(context)
+    if 'DownloadData' in dir(config):
+      config.DownloadData()
+
   def AddTestsToList(self, result, current_path, path, context, mode):
-    for v in self.GetConfiguration(context).VariantFlags():
-      tests = self.GetConfiguration(context).ListTests(current_path, path, mode, v)
+    config = self.GetConfiguration(context)
+    for v in config.VariantFlags():
+      tests = config.ListTests(current_path, path, mode, v)
       for t in tests: t.variant_flags = v
       result += tests
 
@@ -655,6 +661,12 @@
         result += test.GetBuildRequirements(rest, context)
     return result
 
+  def DownloadData(self, path, context):
+    (name, rest) = CarCdr(path)
+    for test in self.tests:
+      if not name or name.match(test.GetName()):
+        test.DownloadData(context)
+
   def ListTests(self, current_path, path, context, mode, variant_flags):
     (name, rest) = CarCdr(path)
     result = [ ]
@@ -674,8 +686,8 @@
     'debug'   : '_g',
     'release' : '' }
 FLAGS = {
-    'debug'   : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
-    'release' : []}
+    'debug'   : ['--nobreak-on-abort', '--enable-slow-asserts', '--debug-code', '--verify-heap'],
+    'release' : ['--nobreak-on-abort']}
 TIMEOUT_SCALEFACTOR = {
     'debug'   : 4,
     'release' : 1 }
@@ -711,7 +723,7 @@
   def GetTimeout(self, testcase, mode):
     result = self.timeout * TIMEOUT_SCALEFACTOR[mode]
     if '--stress-opt' in self.GetVmFlags(testcase, mode):
-      return result * 2
+      return result * 4
     else:
       return result
 
@@ -850,6 +862,9 @@
     elif self.op == '==':
       inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
       return not inter.IsEmpty()
+    elif self.op == '!=':
+      inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
+      return inter.IsEmpty()
     else:
       assert self.op == '&&'
       return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
@@ -932,6 +947,9 @@
       elif self.Current(2) == '==':
         self.AddToken('==')
         self.Advance(2)
+      elif self.Current(2) == '!=':
+        self.AddToken('!=')
+        self.Advance(2)
       else:
         return None
     return self.tokens
@@ -984,7 +1002,7 @@
     return None
 
 
-BINARIES = ['==']
+BINARIES = ['==', '!=']
 def ParseOperatorExpression(scan):
   left = ParseAtomicExpression(scan)
   if not left: return None
@@ -1006,7 +1024,7 @@
     right = ParseOperatorExpression(scan)
     if not right:
       return None
-    left=  Operation(left, 'if', right)
+    left = Operation(left, 'if', right)
   return left
 
 
@@ -1186,6 +1204,8 @@
       default='scons')
   result.add_option("--report", help="Print a summary of the tests to be run",
       default=False, action="store_true")
+  result.add_option("--download-data", help="Download missing test suite data",
+      default=False, action="store_true")
   result.add_option("-s", "--suite", help="A test suite",
       default=[], action="append")
   result.add_option("-t", "--timeout", help="Timeout in seconds",
@@ -1211,6 +1231,7 @@
         dest="suppress_dialogs", default=True, action="store_true")
   result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
         dest="suppress_dialogs", action="store_false")
+  result.add_option("--mips-arch-variant", help="mips architecture variant: mips32r1/mips32r2", default="mips32r2");
   result.add_option("--shell", help="Path to V8 shell", default="d8")
   result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
   result.add_option("--store-unexpected-output",
@@ -1272,6 +1293,9 @@
   if options.snapshot:
     options.scons_flags.append("snapshot=on")
   global VARIANT_FLAGS
+  if options.mips_arch_variant:
+    options.scons_flags.append("mips_arch_variant=" + options.mips_arch_variant)
+
   if options.stress_only:
     VARIANT_FLAGS = [['--stress-opt', '--always-opt']]
   if options.nostress:
@@ -1452,6 +1476,11 @@
   root.GetTestStatus(context, sections, defs)
   config = Configuration(sections, defs)
 
+  # Download missing test suite data if requested.
+  if options.download_data:
+    for path in paths:
+      root.DownloadData(path, context)
+
   # List the tests
   all_cases = [ ]
   all_unused = [ ]
diff --git a/tools/tickprocessor-driver.js b/tools/tickprocessor-driver.js
index 4201e43..9af5ab6 100644
--- a/tools/tickprocessor-driver.js
+++ b/tools/tickprocessor-driver.js
@@ -52,6 +52,7 @@
 var tickProcessor = new TickProcessor(
   new (entriesProviders[params.platform])(params.nm),
   params.separateIc,
+  params.callGraphSize,
   params.ignoreUnknown,
   params.stateFilter,
   snapshotLogProcessor);
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 5f57835..05a3369 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -146,7 +146,12 @@
 
 
 function TickProcessor(
-    cppEntriesProvider, separateIc, ignoreUnknown, stateFilter, snapshotLogProcessor) {
+    cppEntriesProvider,
+    separateIc,
+    callGraphSize,
+    ignoreUnknown,
+    stateFilter,
+    snapshotLogProcessor) {
   LogReader.call(this, {
       'shared-library': { parsers: [null, parseInt, parseInt],
           processor: this.processSharedLibrary },
@@ -181,6 +186,7 @@
       'end-code-region': null });
 
   this.cppEntriesProvider_ = cppEntriesProvider;
+  this.callGraphSize_ = callGraphSize;
   this.ignoreUnknown_ = ignoreUnknown;
   this.stateFilter_ = stateFilter;
   this.snapshotLogProcessor_ = snapshotLogProcessor;
@@ -240,6 +246,7 @@
 
 TickProcessor.CALL_PROFILE_CUTOFF_PCT = 2.0;
 
+TickProcessor.CALL_GRAPH_SIZE = 5;
 
 /**
  * @override
@@ -535,7 +542,7 @@
           padLeft(rec.parentTotalPercent.toFixed(1), 5) + '%  ' +
           indentStr + rec.internalFuncName);
     // Limit backtrace depth.
-    if (indent < 10) {
+    if (indent < 2 * self.callGraphSize_) {
       self.printHeavyProfile(rec.children, indent + 2);
     }
     // Delimit top-level functions.
@@ -764,6 +771,8 @@
         'Show only ticks from OTHER VM state'],
     '-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL,
         'Show only ticks from EXTERNAL VM state'],
+    '--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE,
+        'Set the call graph size'],
     '--ignore-unknown': ['ignoreUnknown', true,
         'Exclude ticks of unknown code entries from processing'],
     '--separate-ic': ['separateIc', true,
@@ -792,6 +801,7 @@
   snapshotLogFileName: null,
   platform: 'unix',
   stateFilter: null,
+  callGraphSize: 5,
   ignoreUnknown: false,
   separateIc: false,
   nm: 'nm'
diff --git a/tools/utils.py b/tools/utils.py
index fb94d14..232314c 100644
--- a/tools/utils.py
+++ b/tools/utils.py
@@ -61,6 +61,8 @@
     return 'openbsd'
   elif id == 'SunOS':
     return 'solaris'
+  elif id == 'NetBSD':
+    return 'netbsd'
   else:
     return None